diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8c2271d5..367e8e14 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -133,7 +133,12 @@ jobs: - name: Test run: | cargo test --all-features --workspace - cargo test --package cuprate-blockchain --no-default-features --features redb --features service + cargo test --package cuprate-blockchain --no-default-features --features redb + + - name: Hack Check + run: | + cargo install cargo-hack --locked + cargo hack --workspace check --feature-powerset --no-dev-deps # TODO: upload binaries with `actions/upload-artifact@v3` - name: Build diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1b66a58e..2d990601 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -120,12 +120,15 @@ Before pushing your code, please run the following at the root of the repository After that, ensure all other CI passes by running: -| Command | Does what | -|------------------------------------------------------------------------|-----------| -| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK -| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied -| `cargo test --all-features --workspace` | Runs all tests -| `cargo build --all-features --all-targets --workspace` | Builds all code +| Command | Does what | +|------------------------------------------------------------------------|-------------------------------------------------------------------------| +| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK | +| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied | +| `cargo test --all-features --workspace` | Runs all tests | +| `cargo build --all-features --all-targets --workspace` | Builds all code | +| `cargo hack --workspace check --feature-powerset --no-dev-deps` | Uses `cargo hack` to check our crates build with different features set | + +`cargo hack` can be installed with `cargo` from: https://github.com/taiki-e/cargo-hack. **Note: in order for some tests to work, you will need to place a [`monerod`](https://www.getmonero.org/downloads/) binary at the root of the repository.** diff --git a/Cargo.lock b/Cargo.lock index 76ad13f0..fbac80ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,6 +56,18 @@ version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + [[package]] name = "async-stream" version = "0.3.5" @@ -238,6 +250,19 @@ dependencies = [ "digest", ] +[[package]] +name = "blake3" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -403,6 +428,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + [[package]] name = "core-foundation" version = "0.9.4" @@ -575,6 +606,7 @@ name = "cuprate-consensus" version = "0.1.0" dependencies = [ "cfg-if", + "cuprate-consensus-context", "cuprate-consensus-rules", "cuprate-helper", "cuprate-test-utils", @@ -587,12 +619,30 @@ dependencies = [ "proptest", "proptest-derive", "rand", - "randomx-rs", "rayon", "thiserror", "thread_local", "tokio", "tokio-test", + "tower 0.5.1", + "tracing", +] + +[[package]] +name = "cuprate-consensus-context" +version = "0.1.0" +dependencies = [ + "cuprate-consensus-rules", + "cuprate-helper", + "cuprate-types", + "futures", + "hex", + "monero-serai", + "randomx-rs", + "rayon", + "thiserror", + "thread_local", + "tokio", "tokio-util", "tower 0.5.1", "tracing", @@ -704,6 +754,7 @@ dependencies = [ "clap", "cuprate-blockchain", "cuprate-consensus", + "cuprate-consensus-context", "cuprate-consensus-rules", "cuprate-helper", "cuprate-types", @@ -814,6 +865,7 @@ dependencies = [ "cuprate-helper", "cuprate-pruning", "cuprate-test-utils", + "cuprate-types", "cuprate-wire", "futures", "hex", @@ -899,6 +951,7 @@ name = "cuprate-txpool" version = "0.0.0" dependencies = [ "bitflags 2.6.0", + "blake3", "bytemuck", "cuprate-database", "cuprate-database-service", @@ -972,6 +1025,7 @@ dependencies = [ "cuprate-async-buffer", "cuprate-blockchain", "cuprate-consensus", + "cuprate-consensus-context", "cuprate-consensus-rules", "cuprate-constants", "cuprate-cryptonight", diff --git a/Cargo.toml b/Cargo.toml index 6c322fbd..d5aca71e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "binaries/cuprated", "constants", "consensus", + "consensus/context", "consensus/fast-sync", "consensus/rules", "cryptonight", @@ -49,9 +50,39 @@ opt-level = 1 opt-level = 3 [workspace.dependencies] +# Cuprate members +cuprate-fast-sync = { path = "consensus/fast-sync" ,default-features = false} +cuprate-consensus-rules = { path = "consensus/rules" ,default-features = false} +cuprate-constants = { path = "constants" ,default-features = false} +cuprate-consensus = { path = "consensus" ,default-features = false} +cuprate-consensus-context = { path = "consensus/context" ,default-features = false} +cuprate-cryptonight = { path = "cryptonight" ,default-features = false} +cuprate-helper = { path = "helper" ,default-features = false} +cuprate-epee-encoding = { path = "net/epee-encoding" ,default-features = false} +cuprate-fixed-bytes = { path = "net/fixed-bytes" ,default-features = false} +cuprate-levin = { path = "net/levin" ,default-features = false} +cuprate-wire = { path = "net/wire" ,default-features = false} +cuprate-p2p = { path = "p2p/p2p" ,default-features = false} +cuprate-p2p-core = { path = "p2p/p2p-core" ,default-features = false} +cuprate-dandelion-tower = { path = "p2p/dandelion-tower" ,default-features = false} +cuprate-async-buffer = { path = "p2p/async-buffer" ,default-features = false} +cuprate-address-book = { path = "p2p/address-book" ,default-features = false} +cuprate-blockchain = { path = "storage/blockchain" ,default-features = false} +cuprate-database = { path = "storage/database" ,default-features = false} +cuprate-database-service = { path = "storage/service" ,default-features = false} +cuprate-txpool = { path = "storage/txpool" ,default-features = false} +cuprate-pruning = { path = "pruning" ,default-features = false} +cuprate-test-utils = { path = "test-utils" ,default-features = false} +cuprate-types = { path = "types" ,default-features = false} +cuprate-json-rpc = { path = "rpc/json-rpc" ,default-features = false} +cuprate-rpc-types = { path = "rpc/types" ,default-features = false} +cuprate-rpc-interface = { path = "rpc/interface" ,default-features = false} + +# External dependencies anyhow = { version = "1.0.89", default-features = false } async-trait = { version = "0.1.82", default-features = false } bitflags = { version = "2.6.0", default-features = false } +blake3 = { version = "1", default-features = false } borsh = { version = "1.5.1", default-features = false } bytemuck = { version = "1.18.0", default-features = false } bytes = { version = "1.7.2", default-features = false } @@ -322,4 +353,4 @@ non_camel_case_types = "deny" # unused_results = "deny" # non_exhaustive_omitted_patterns = "deny" # missing_docs = "deny" -# missing_copy_implementations = "deny" \ No newline at end of file +# missing_copy_implementations = "deny" diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index dc504d8d..b2700681 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -9,31 +9,32 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" [dependencies] # TODO: after v1.0.0, remove unneeded dependencies. -cuprate-consensus = { path = "../../consensus" } -cuprate-fast-sync = { path = "../../consensus/fast-sync" } -cuprate-consensus-rules = { path = "../../consensus/rules" } -cuprate-constants = { path = "../../constants", features = ["build", "rpc"] } -cuprate-cryptonight = { path = "../../cryptonight" } -cuprate-helper = { path = "../../helper" } -cuprate-epee-encoding = { path = "../../net/epee-encoding" } -cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } -cuprate-levin = { path = "../../net/levin" } -cuprate-wire = { path = "../../net/wire" } -cuprate-p2p = { path = "../../p2p/p2p" } -cuprate-p2p-core = { path = "../../p2p/p2p-core" } -cuprate-dandelion-tower = { path = "../../p2p/dandelion-tower" } -cuprate-async-buffer = { path = "../../p2p/async-buffer" } -cuprate-address-book = { path = "../../p2p/address-book" } -cuprate-blockchain = { path = "../../storage/blockchain", features = ["service"] } -cuprate-database-service = { path = "../../storage/service" } -cuprate-txpool = { path = "../../storage/txpool" } -cuprate-database = { path = "../../storage/database" } -cuprate-pruning = { path = "../../pruning" } -cuprate-test-utils = { path = "../../test-utils" } -cuprate-types = { path = "../../types" } -cuprate-json-rpc = { path = "../../rpc/json-rpc" } -cuprate-rpc-interface = { path = "../../rpc/interface" } -cuprate-rpc-types = { path = "../../rpc/types" } +cuprate-consensus = { workspace = true } +cuprate-fast-sync = { workspace = true } +cuprate-consensus-context = { workspace = true } +cuprate-consensus-rules = { workspace = true } +cuprate-constants = { workspace = true, features = ["build", "rpc"] } +cuprate-cryptonight = { workspace = true } +cuprate-helper = { workspace = true } +cuprate-epee-encoding = { workspace = true } +cuprate-fixed-bytes = { workspace = true } +cuprate-levin = { workspace = true } +cuprate-wire = { workspace = true } +cuprate-p2p = { workspace = true } +cuprate-p2p-core = { workspace = true } +cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } +cuprate-async-buffer = { workspace = true } +cuprate-address-book = { workspace = true } +cuprate-blockchain = { workspace = true } +cuprate-database-service = { workspace = true } +cuprate-txpool = { workspace = true } +cuprate-database = { workspace = true } +cuprate-pruning = { workspace = true } +cuprate-test-utils = { workspace = true } +cuprate-types = { workspace = true } +cuprate-json-rpc = { workspace = true } +cuprate-rpc-interface = { workspace = true } +cuprate-rpc-types = { workspace = true } # TODO: after v1.0.0, remove unneeded dependencies. anyhow = { workspace = true } diff --git a/binaries/cuprated/src/blockchain.rs b/binaries/cuprated/src/blockchain.rs index a06f3fa7..c4b75e4e 100644 --- a/binaries/cuprated/src/blockchain.rs +++ b/binaries/cuprated/src/blockchain.rs @@ -25,7 +25,7 @@ mod manager; mod syncer; mod types; -use types::{ +pub use types::{ ConcreteBlockVerifierService, ConcreteTxVerifierService, ConsensusBlockchainReadHandle, }; diff --git a/binaries/cuprated/src/blockchain/interface.rs b/binaries/cuprated/src/blockchain/interface.rs index 985e60d8..2482784f 100644 --- a/binaries/cuprated/src/blockchain/interface.rs +++ b/binaries/cuprated/src/blockchain/interface.rs @@ -8,17 +8,16 @@ use std::{ }; use monero_serai::{block::Block, transaction::Transaction}; -use rayon::prelude::*; use tokio::sync::{mpsc, oneshot}; use tower::{Service, ServiceExt}; use cuprate_blockchain::service::BlockchainReadHandle; use cuprate_consensus::transactions::new_tx_verification_data; -use cuprate_helper::cast::usize_to_u64; -use cuprate_types::{ - blockchain::{BlockchainReadRequest, BlockchainResponse}, - Chain, +use cuprate_txpool::service::{ + interface::{TxpoolReadRequest, TxpoolReadResponse}, + TxpoolReadHandle, }; +use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; use crate::{ blockchain::manager::{BlockchainManagerCommand, IncomingBlockOk}, @@ -38,7 +37,7 @@ pub enum IncomingBlockError { /// /// The inner values are the block hash and the indexes of the missing txs in the block. #[error("Unknown transactions in block.")] - UnknownTransactions([u8; 32], Vec), + UnknownTransactions([u8; 32], Vec), /// We are missing the block's parent. #[error("The block has an unknown parent.")] Orphan, @@ -59,8 +58,9 @@ pub enum IncomingBlockError { /// - the block's parent is unknown pub async fn handle_incoming_block( block: Block, - given_txs: Vec, + mut given_txs: HashMap<[u8; 32], Transaction>, blockchain_read_handle: &mut BlockchainReadHandle, + txpool_read_handle: &mut TxpoolReadHandle, ) -> Result { /// A [`HashSet`] of block hashes that the blockchain manager is currently handling. /// @@ -72,7 +72,12 @@ pub async fn handle_incoming_block( /// which are also more expensive than `Mutex`s. static BLOCKS_BEING_HANDLED: LazyLock>> = LazyLock::new(|| Mutex::new(HashSet::new())); - // FIXME: we should look in the tx-pool for txs when that is ready. + + if given_txs.len() > block.transactions.len() { + return Err(IncomingBlockError::InvalidBlock(anyhow::anyhow!( + "Too many transactions given for block" + ))); + } if !block_exists(block.header.previous, blockchain_read_handle) .await @@ -90,23 +95,36 @@ pub async fn handle_incoming_block( return Ok(IncomingBlockOk::AlreadyHave); } - // TODO: remove this when we have a working tx-pool. - if given_txs.len() != block.transactions.len() { - return Err(IncomingBlockError::UnknownTransactions( - block_hash, - (0..usize_to_u64(block.transactions.len())).collect(), - )); - } + let TxpoolReadResponse::TxsForBlock { mut txs, missing } = txpool_read_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolReadRequest::TxsForBlock(block.transactions.clone())) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + else { + unreachable!() + }; - // TODO: check we actually got given the right txs. - let prepped_txs = given_txs - .into_par_iter() - .map(|tx| { - let tx = new_tx_verification_data(tx)?; - Ok((tx.tx_hash, tx)) - }) - .collect::>() - .map_err(IncomingBlockError::InvalidBlock)?; + if !missing.is_empty() { + let needed_hashes = missing.iter().map(|index| block.transactions[*index]); + + for needed_hash in needed_hashes { + let Some(tx) = given_txs.remove(&needed_hash) else { + // We return back the indexes of all txs missing from our pool, not taking into account the txs + // that were given with the block, as these txs will be dropped. It is not worth it to try to add + // these txs to the pool as this will only happen with a misbehaving peer or if the txpool reaches + // the size limit. + return Err(IncomingBlockError::UnknownTransactions(block_hash, missing)); + }; + + txs.insert( + needed_hash, + new_tx_verification_data(tx) + .map_err(|e| IncomingBlockError::InvalidBlock(e.into()))?, + ); + } + } let Some(incoming_block_tx) = COMMAND_TX.get() else { // We could still be starting up the blockchain manager. @@ -119,28 +137,37 @@ pub async fn handle_incoming_block( return Ok(IncomingBlockOk::AlreadyHave); } - // From this point on we MUST not early return without removing the block hash from `BLOCKS_BEING_HANDLED`. + // We must remove the block hash from `BLOCKS_BEING_HANDLED`. + let _guard = { + struct RemoveFromBlocksBeingHandled { + block_hash: [u8; 32], + } + impl Drop for RemoveFromBlocksBeingHandled { + fn drop(&mut self) { + BLOCKS_BEING_HANDLED + .lock() + .unwrap() + .remove(&self.block_hash); + } + } + RemoveFromBlocksBeingHandled { block_hash } + }; let (response_tx, response_rx) = oneshot::channel(); incoming_block_tx .send(BlockchainManagerCommand::AddBlock { block, - prepped_txs, + prepped_txs: txs, response_tx, }) .await .expect("TODO: don't actually panic here, an err means we are shutting down"); - let res = response_rx + response_rx .await .expect("The blockchain manager will always respond") - .map_err(IncomingBlockError::InvalidBlock); - - // Remove the block hash from the blocks being handled. - BLOCKS_BEING_HANDLED.lock().unwrap().remove(&block_hash); - - res + .map_err(IncomingBlockError::InvalidBlock) } /// Check if we have a block with the given hash. diff --git a/binaries/cuprated/src/blockchain/manager.rs b/binaries/cuprated/src/blockchain/manager.rs index 118c8dd6..2166795e 100644 --- a/binaries/cuprated/src/blockchain/manager.rs +++ b/binaries/cuprated/src/blockchain/manager.rs @@ -8,15 +8,17 @@ use tracing::error; use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle}; use cuprate_consensus::{ - context::RawBlockChainContext, BlockChainContextRequest, BlockChainContextResponse, - BlockChainContextService, BlockVerifierService, ExtendedConsensusError, TxVerifierService, - VerifyBlockRequest, VerifyBlockResponse, VerifyTxRequest, VerifyTxResponse, + BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService, + BlockVerifierService, ExtendedConsensusError, TxVerifierService, VerifyBlockRequest, + VerifyBlockResponse, VerifyTxRequest, VerifyTxResponse, }; +use cuprate_consensus_context::RawBlockChainContext; use cuprate_p2p::{ block_downloader::{BlockBatch, BlockDownloaderConfig}, BroadcastSvc, NetworkInterface, }; use cuprate_p2p_core::ClearNet; +use cuprate_txpool::service::TxpoolWriteHandle; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, TransactionVerificationData, @@ -45,6 +47,7 @@ pub async fn init_blockchain_manager( clearnet_interface: NetworkInterface, blockchain_write_handle: BlockchainWriteHandle, blockchain_read_handle: BlockchainReadHandle, + txpool_write_handle: TxpoolWriteHandle, mut blockchain_context_service: BlockChainContextService, block_verifier_service: ConcreteBlockVerifierService, block_downloader_config: BlockDownloaderConfig, @@ -79,6 +82,7 @@ pub async fn init_blockchain_manager( let manager = BlockchainManager { blockchain_write_handle, blockchain_read_handle, + txpool_write_handle, blockchain_context_service, cached_blockchain_context: blockchain_context.unchecked_blockchain_context().clone(), block_verifier_service, @@ -101,6 +105,8 @@ pub struct BlockchainManager { blockchain_write_handle: BlockchainWriteHandle, /// A [`BlockchainReadHandle`]. blockchain_read_handle: BlockchainReadHandle, + /// A [`TxpoolWriteHandle`]. + txpool_write_handle: TxpoolWriteHandle, // TODO: Improve the API of the cache service. // TODO: rename the cache service -> `BlockchainContextService`. /// The blockchain context cache, this caches the current state of the blockchain to quickly calculate/retrieve diff --git a/binaries/cuprated/src/blockchain/manager/handler.rs b/binaries/cuprated/src/blockchain/manager/handler.rs index 9603bad5..5d1cd2d6 100644 --- a/binaries/cuprated/src/blockchain/manager/handler.rs +++ b/binaries/cuprated/src/blockchain/manager/handler.rs @@ -1,7 +1,10 @@ //! The blockchain manager handler functions. use bytes::Bytes; use futures::{TryFutureExt, TryStreamExt}; -use monero_serai::{block::Block, transaction::Transaction}; +use monero_serai::{ + block::Block, + transaction::{Input, Transaction}, +}; use rayon::prelude::*; use std::ops::ControlFlow; use std::{collections::HashMap, sync::Arc}; @@ -10,23 +13,21 @@ use tracing::info; use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle}; use cuprate_consensus::{ - block::PreparedBlock, context::NewBlockData, transactions::new_tx_verification_data, - BlockChainContextRequest, BlockChainContextResponse, BlockVerifierService, - ExtendedConsensusError, VerifyBlockRequest, VerifyBlockResponse, VerifyTxRequest, - VerifyTxResponse, + block::PreparedBlock, transactions::new_tx_verification_data, BlockChainContextRequest, + BlockChainContextResponse, BlockVerifierService, ExtendedConsensusError, VerifyBlockRequest, + VerifyBlockResponse, VerifyTxRequest, VerifyTxResponse, }; +use cuprate_consensus_context::NewBlockData; use cuprate_helper::cast::usize_to_u64; use cuprate_p2p::{block_downloader::BlockBatch, constants::LONG_BAN, BroadcastRequest}; +use cuprate_txpool::service::interface::TxpoolWriteRequest; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest}, AltBlockInformation, HardFork, TransactionVerificationData, VerifiedBlockInformation, }; -use crate::blockchain::manager::commands::IncomingBlockOk; use crate::{ - blockchain::{ - manager::commands::BlockchainManagerCommand, types::ConsensusBlockchainReadHandle, - }, + blockchain::manager::commands::{BlockchainManagerCommand, IncomingBlockOk}, constants::PANIC_CRITICAL_SERVICE_ERROR, signals::REORG_LOCK, }; @@ -434,6 +435,18 @@ impl super::BlockchainManager { &mut self, verified_block: VerifiedBlockInformation, ) { + // FIXME: this is pretty inefficient, we should probably return the KI map created in the consensus crate. + let spent_key_images = verified_block + .txs + .iter() + .flat_map(|tx| { + tx.tx.prefix().inputs.iter().map(|input| match input { + Input::ToKey { key_image, .. } => key_image.compress().0, + Input::Gen(_) => unreachable!(), + }) + }) + .collect::>(); + self.blockchain_context_service .ready() .await @@ -472,6 +485,14 @@ impl super::BlockchainManager { }; self.cached_blockchain_context = blockchain_context.unchecked_blockchain_context().clone(); + + self.txpool_write_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolWriteRequest::NewBlock { spent_key_images }) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR); } } diff --git a/binaries/cuprated/src/blockchain/syncer.rs b/binaries/cuprated/src/blockchain/syncer.rs index 7d6874e0..913c9834 100644 --- a/binaries/cuprated/src/blockchain/syncer.rs +++ b/binaries/cuprated/src/blockchain/syncer.rs @@ -1,11 +1,10 @@ // FIXME: This whole module is not great and should be rewritten when the PeerSet is made. -use std::{pin::pin, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; -use tokio::time::interval; use tokio::{ sync::{mpsc, Notify}, - time::sleep, + time::interval, }; use tower::{Service, ServiceExt}; use tracing::instrument; diff --git a/binaries/cuprated/src/blockchain/types.rs b/binaries/cuprated/src/blockchain/types.rs index e3ee62b3..54e46621 100644 --- a/binaries/cuprated/src/blockchain/types.rs +++ b/binaries/cuprated/src/blockchain/types.rs @@ -1,13 +1,7 @@ -use std::task::{Context, Poll}; - -use futures::future::BoxFuture; -use futures::{FutureExt, TryFutureExt}; -use tower::{util::MapErr, Service}; +use tower::util::MapErr; use cuprate_blockchain::{cuprate_database::RuntimeError, service::BlockchainReadHandle}; use cuprate_consensus::{BlockChainContextService, BlockVerifierService, TxVerifierService}; -use cuprate_p2p::block_downloader::{ChainSvcRequest, ChainSvcResponse}; -use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; /// The [`BlockVerifierService`] with all generic types defined. pub type ConcreteBlockVerifierService = BlockVerifierService< diff --git a/binaries/cuprated/src/p2p.rs b/binaries/cuprated/src/p2p.rs index f55d41db..cdf1cef7 100644 --- a/binaries/cuprated/src/p2p.rs +++ b/binaries/cuprated/src/p2p.rs @@ -2,4 +2,7 @@ //! //! Will handle initiating the P2P and contains a protocol request handler. +mod network_address; pub mod request_handler; + +pub use network_address::CrossNetworkInternalPeerId; diff --git a/binaries/cuprated/src/p2p/network_address.rs b/binaries/cuprated/src/p2p/network_address.rs new file mode 100644 index 00000000..7fa8e86c --- /dev/null +++ b/binaries/cuprated/src/p2p/network_address.rs @@ -0,0 +1,16 @@ +use std::net::SocketAddr; + +use cuprate_p2p_core::{client::InternalPeerID, ClearNet, NetworkZone}; + +/// An identifier for a P2P peer on any network. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum CrossNetworkInternalPeerId { + /// A clear-net peer. + ClearNet(InternalPeerID<::Addr>), +} + +impl From::Addr>> for CrossNetworkInternalPeerId { + fn from(addr: InternalPeerID<::Addr>) -> Self { + Self::ClearNet(addr) + } +} diff --git a/binaries/cuprated/src/rpc.rs b/binaries/cuprated/src/rpc.rs index 9d12c1b5..1f06d0ac 100644 --- a/binaries/cuprated/src/rpc.rs +++ b/binaries/cuprated/src/rpc.rs @@ -3,6 +3,7 @@ //! Will contain the code to initiate the RPC and a request handler. mod bin; +mod constants; mod handler; mod helper; mod json; diff --git a/binaries/cuprated/src/rpc/constants.rs b/binaries/cuprated/src/rpc/constants.rs new file mode 100644 index 00000000..1236269d --- /dev/null +++ b/binaries/cuprated/src/rpc/constants.rs @@ -0,0 +1,5 @@ +//! Constants used within RPC. + +/// The string message used in RPC response fields for when +/// `cuprated` does not support a field that `monerod` has. +pub(super) const FIELD_NOT_SUPPORTED: &str = "`cuprated` does not support this field."; diff --git a/binaries/cuprated/src/rpc/handler.rs b/binaries/cuprated/src/rpc/handler.rs index 13e8924e..b0d54ab8 100644 --- a/binaries/cuprated/src/rpc/handler.rs +++ b/binaries/cuprated/src/rpc/handler.rs @@ -57,33 +57,6 @@ pub enum BlockchainManagerRequest { /// The height of the next block in the chain. TargetHeight, - /// Calculate proof-of-work for this block. - CalculatePow { - /// The hardfork of the protocol at this block height. - hardfork: HardFork, - /// The height of the block. - height: usize, - /// The block data. - block: Block, - /// The seed hash for the proof-of-work. - seed_hash: [u8; 32], - }, - - /// Add auxirilly proof-of-work to a block. - /// - /// From the RPC `add_aux_pow` usecase's documentation: - /// ```` - /// This enables merge mining with Monero without requiring - /// software that manually alters the extra field in the coinbase - /// tx to include the merkle root of the aux blocks. - /// ```` - AddAuxPow { - /// The block template to add to. - block_template: Block, - /// The auxirilly proof-of-work to add. - aux_pow: Vec, - }, - /// Generate new blocks. /// /// This request is only for regtest, see RPC's `generateblocks`. @@ -98,19 +71,17 @@ pub enum BlockchainManagerRequest { wallet_address: String, }, - /// Get a visual [`String`] overview of blockchain progress. - /// - /// This is a highly implementation specific format used by - /// `monerod` in the `sync_info` RPC call's `overview` field; - /// it is essentially an ASCII visual of blocks. - /// - /// See also: - /// - - /// - - Overview { - /// TODO: the current blockchain height? do we need to pass this? - height: usize, - }, + // // TODO: the below requests actually belong to the block downloader/syncer: + // // + // /// Get [`Span`] data. + // /// + // /// This is data that describes an active downloading process, + // /// if we are fully synced, this will return an empty [`Vec`]. + // Spans, + + // + /// Get the next [`PruningSeed`] needed for a pruned sync. + NextNeededPruningSeed, } /// TODO: use real type when public. @@ -144,12 +115,6 @@ pub enum BlockchainManagerResponse { /// Response to [`BlockchainManagerRequest::TargetHeight`] TargetHeight { height: usize }, - /// Response to [`BlockchainManagerRequest::CalculatePow`] - CalculatePow([u8; 32]), - - /// Response to [`BlockchainManagerRequest::AddAuxPow`] - AddAuxPow(AddAuxPow), - /// Response to [`BlockchainManagerRequest::GenerateBlocks`] GenerateBlocks { /// Hashes of the blocks generated. @@ -158,8 +123,10 @@ pub enum BlockchainManagerResponse { height: usize, }, - /// Response to [`BlockchainManagerRequest::Overview`] - Overview(String), + // /// Response to [`BlockchainManagerRequest::Spans`]. + // Spans(Vec>), + /// Response to [`BlockchainManagerRequest::NextNeededPruningSeed`]. + NextNeededPruningSeed(PruningSeed), } /// TODO: use real type when public. diff --git a/binaries/cuprated/src/rpc/helper.rs b/binaries/cuprated/src/rpc/helper.rs index 5d6477c5..dec5d79e 100644 --- a/binaries/cuprated/src/rpc/helper.rs +++ b/binaries/cuprated/src/rpc/helper.rs @@ -8,7 +8,6 @@ use std::sync::Arc; use anyhow::{anyhow, Error}; -use cuprate_rpc_types::misc::{BlockHeader, KeyImageSpentStatus}; use futures::StreamExt; use monero_serai::block::Block; use tower::{Service, ServiceExt}; @@ -19,6 +18,7 @@ use cuprate_helper::{ cast::{u64_to_usize, usize_to_u64}, map::split_u128_into_low_high_bits, }; +use cuprate_rpc_types::misc::{BlockHeader, KeyImageSpentStatus}; use cuprate_types::{ blockchain::BlockchainReadRequest, Chain, ExtendedBlockHeader, VerifiedBlockInformation, }; diff --git a/binaries/cuprated/src/rpc/json.rs b/binaries/cuprated/src/rpc/json.rs index ed080296..c6602570 100644 --- a/binaries/cuprated/src/rpc/json.rs +++ b/binaries/cuprated/src/rpc/json.rs @@ -4,7 +4,6 @@ use std::{ }; use anyhow::{anyhow, Error}; -use cuprate_p2p_core::{client::handshaker::builder::DummyAddressBook, ClearNet}; use futures::TryFutureExt; use monero_serai::block::Block; use strum::{EnumCount, VariantArray}; @@ -19,6 +18,7 @@ use cuprate_helper::{ cast::{u64_to_usize, usize_to_u64}, map::split_u128_into_low_high_bits, }; +use cuprate_p2p_core::{client::handshaker::builder::DummyAddressBook, ClearNet}; use cuprate_rpc_interface::RpcHandler; use cuprate_rpc_types::{ base::{AccessResponseBase, ResponseBase}, @@ -784,12 +784,16 @@ async fn sync_info( .map(|info| SyncInfoPeer { info }) .collect(); - let next_needed_pruning_seed = - address_book::next_needed_pruning_seed::(&mut DummyAddressBook) - .await? - .compress(); - let overview = blockchain_manager::overview(&mut state.blockchain_manager, height).await?; - let spans = address_book::spans::(&mut DummyAddressBook).await?; + // TODO + // let next_needed_pruning_seed = + // address_book::next_needed_pruning_seed::(&mut DummyAddressBook) + // .await? + // .compress(); + // let overview = blockchain_manager::overview(&mut state.blockchain_manager, height).await?; + // let spans = address_book::spans::(&mut DummyAddressBook).await?; + let next_needed_pruning_seed = todo!(); + let overview = todo!(); + let spans = todo!(); Ok(SyncInfoResponse { base: AccessResponseBase::OK, @@ -837,15 +841,16 @@ async fn get_miner_data( let difficulty = format!("{:#x}", context.next_difficulty); let median_weight = usize_to_u64(context.median_weight_for_block_reward); let already_generated_coins = context.already_generated_coins; - let tx_backlog = txpool::block_template_backlog(&mut state.txpool_read) - .await? - .into_iter() - .map(|entry| GetMinerDataTxBacklogEntry { - id: hex::encode(entry.id), - weight: entry.weight, - fee: entry.fee, - }) - .collect(); + let tx_backlog = todo!(); + // let tx_backlog = txpool::block_template_backlog(&mut state.txpool_read) + // .await? + // .into_iter() + // .map(|entry| GetMinerDataTxBacklogEntry { + // id: hex::encode(entry.id), + // weight: entry.weight, + // fee: entry.fee, + // }) + // .collect(); Ok(GetMinerDataResponse { base: ResponseBase::OK, @@ -887,16 +892,18 @@ async fn calc_pow( let block = Block::read(&mut block_blob.as_slice())?; let seed_hash = helper::hex_to_hash(request.seed_hash)?; - let pow_hash = blockchain_manager::calculate_pow( - &mut state.blockchain_manager, - hardfork, - request.height, - block, - seed_hash, - ) - .await?; + // let pow_hash = blockchain_manager::calculate_pow( + // &mut state.blockchain_manager, + // hardfork, + // request.height, + // block, + // seed_hash, + // ) + // .await?; - let hex = hex::encode(pow_hash); + // let hex = hex::encode(pow_hash); + + let hex = todo!(); Ok(CalcPowResponse { pow_hash: hex }) } @@ -931,9 +938,10 @@ async fn add_aux_pow( }) .collect::, Error>>()?; - let resp = - blockchain_manager::add_aux_pow(&mut state.blockchain_manager, block_template, aux_pow) - .await?; + let resp = todo!(); + // let resp = + // blockchain_manager::add_aux_pow(&mut state.blockchain_manager, block_template, aux_pow) + // .await?; let blocktemplate_blob = hex::encode(resp.blocktemplate_blob); let blockhashing_blob = hex::encode(resp.blockhashing_blob); diff --git a/binaries/cuprated/src/rpc/request/address_book.rs b/binaries/cuprated/src/rpc/request/address_book.rs index c3ffbd3c..9d71b51c 100644 --- a/binaries/cuprated/src/rpc/request/address_book.rs +++ b/binaries/cuprated/src/rpc/request/address_book.rs @@ -3,16 +3,20 @@ use std::convert::Infallible; use anyhow::{anyhow, Error}; -use cuprate_pruning::PruningSeed; -use cuprate_rpc_types::misc::{ConnectionInfo, Span}; use tower::ServiceExt; use cuprate_helper::cast::usize_to_u64; use cuprate_p2p_core::{ services::{AddressBookRequest, AddressBookResponse}, - types::BanState, + types::{BanState, ConnectionId}, AddressBook, NetworkZone, }; +use cuprate_pruning::PruningSeed; +use cuprate_rpc_types::misc::{ConnectionInfo, Span}; + +use crate::rpc::constants::FIELD_NOT_SUPPORTED; + +// FIXME: use `anyhow::Error` over `tower::BoxError` in address book. // FIXME: use `anyhow::Error` over `tower::BoxError` in address book. @@ -53,34 +57,28 @@ pub(crate) async fn connection_info( let vec = vec .into_iter() .map(|info| { - use cuprate_p2p_core::types::AddressType as A1; - use cuprate_rpc_types::misc::AddressType as A2; - - let address_type = match info.address_type { - A1::Invalid => A2::Invalid, - A1::Ipv4 => A2::Ipv4, - A1::Ipv6 => A2::Ipv6, - A1::I2p => A2::I2p, - A1::Tor => A2::Tor, + let (ip, port) = match info.socket_addr { + Some(socket) => (socket.ip().to_string(), socket.port().to_string()), + None => (String::new(), String::new()), }; ConnectionInfo { address: info.address.to_string(), - address_type, + address_type: info.address_type, avg_download: info.avg_download, avg_upload: info.avg_upload, - connection_id: hex::encode(info.connection_id.to_ne_bytes()), + connection_id: String::from(ConnectionId::DEFAULT_STR), current_download: info.current_download, current_upload: info.current_upload, height: info.height, host: info.host, incoming: info.incoming, - ip: info.ip, + ip, live_time: info.live_time, localhost: info.localhost, local_ip: info.local_ip, - peer_id: info.peer_id, - port: info.port, + peer_id: hex::encode(info.peer_id.to_ne_bytes()), + port, pruning_seed: info.pruning_seed.compress(), recv_count: info.recv_count, recv_idle_time: info.recv_idle_time, @@ -170,53 +168,3 @@ pub(crate) async fn get_bans( Ok(bans) } - -/// [`AddressBookRequest::Spans`] -pub(crate) async fn spans( - address_book: &mut impl AddressBook, -) -> Result, Error> { - let AddressBookResponse::Spans(vec) = address_book - .ready() - .await - .map_err(|e| anyhow!(e))? - .call(AddressBookRequest::Spans) - .await - .map_err(|e| anyhow!(e))? - else { - unreachable!(); - }; - - // FIXME: impl this map somewhere instead of inline. - let vec = vec - .into_iter() - .map(|span| Span { - connection_id: hex::encode(span.connection_id.to_ne_bytes()), - nblocks: span.nblocks, - rate: span.rate, - remote_address: span.remote_address.to_string(), - size: span.size, - speed: span.speed, - start_block_height: span.start_block_height, - }) - .collect(); - - Ok(vec) -} - -/// [`AddressBookRequest::NextNeededPruningSeed`] -pub(crate) async fn next_needed_pruning_seed( - address_book: &mut impl AddressBook, -) -> Result { - let AddressBookResponse::NextNeededPruningSeed(seed) = address_book - .ready() - .await - .map_err(|e| anyhow!(e))? - .call(AddressBookRequest::NextNeededPruningSeed) - .await - .map_err(|e| anyhow!(e))? - else { - unreachable!(); - }; - - Ok(seed) -} diff --git a/binaries/cuprated/src/rpc/request/blockchain_context.rs b/binaries/cuprated/src/rpc/request/blockchain_context.rs index 9001b32d..c6f0f225 100644 --- a/binaries/cuprated/src/rpc/request/blockchain_context.rs +++ b/binaries/cuprated/src/rpc/request/blockchain_context.rs @@ -3,12 +3,14 @@ use std::convert::Infallible; use anyhow::{anyhow, Error}; +use monero_serai::block::Block; use tower::{Service, ServiceExt}; -use cuprate_consensus::context::{ +use cuprate_consensus_context::{ BlockChainContext, BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService, }; +use cuprate_helper::cast::u64_to_usize; use cuprate_types::{FeeEstimate, HardFork, HardForkInfo}; // FIXME: use `anyhow::Error` over `tower::BoxError` in blockchain context. @@ -68,3 +70,30 @@ pub(crate) async fn fee_estimate( Ok(fee) } + +/// [`BlockChainContextRequest::CalculatePow`] +pub(crate) async fn calculate_pow( + blockchain_context: &mut BlockChainContextService, + hardfork: HardFork, + height: u64, + block: Box, + seed_hash: [u8; 32], +) -> Result<[u8; 32], Error> { + let BlockChainContextResponse::CalculatePow(hash) = blockchain_context + .ready() + .await + .map_err(|e| anyhow!(e))? + .call(BlockChainContextRequest::CalculatePow { + hardfork, + height: u64_to_usize(height), + block, + seed_hash, + }) + .await + .map_err(|e| anyhow!(e))? + else { + unreachable!(); + }; + + Ok(hash) +} diff --git a/binaries/cuprated/src/rpc/request/blockchain_manager.rs b/binaries/cuprated/src/rpc/request/blockchain_manager.rs index 62eecdae..18b75deb 100644 --- a/binaries/cuprated/src/rpc/request/blockchain_manager.rs +++ b/binaries/cuprated/src/rpc/request/blockchain_manager.rs @@ -1,15 +1,18 @@ //! Functions for [`BlockchainManagerRequest`] & [`BlockchainManagerResponse`]. use anyhow::Error; -use cuprate_types::{AddAuxPow, AuxPow, HardFork}; use monero_serai::block::Block; use tower::{Service, ServiceExt}; use cuprate_helper::cast::{u64_to_usize, usize_to_u64}; +use cuprate_p2p_core::{types::ConnectionId, NetworkZone}; use cuprate_pruning::PruningSeed; +use cuprate_rpc_types::misc::Span; +use cuprate_types::{AddAuxPow, AuxPow, HardFork}; -use crate::rpc::handler::{ - BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse, +use crate::rpc::{ + constants::FIELD_NOT_SUPPORTED, + handler::{BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse}, }; /// [`BlockchainManagerRequest::PopBlocks`] @@ -144,52 +147,6 @@ pub(crate) async fn target_height( Ok(usize_to_u64(height)) } -/// [`BlockchainManagerRequest::CalculatePow`] -pub(crate) async fn calculate_pow( - blockchain_manager: &mut BlockchainManagerHandle, - hardfork: HardFork, - height: u64, - block: Block, - seed_hash: [u8; 32], -) -> Result<[u8; 32], Error> { - let BlockchainManagerResponse::CalculatePow(hash) = blockchain_manager - .ready() - .await? - .call(BlockchainManagerRequest::CalculatePow { - hardfork, - height: u64_to_usize(height), - block, - seed_hash, - }) - .await? - else { - unreachable!(); - }; - - Ok(hash) -} - -/// [`BlockchainManagerRequest::AddAuxPow`] -pub(crate) async fn add_aux_pow( - blockchain_manager: &mut BlockchainManagerHandle, - block_template: Block, - aux_pow: Vec, -) -> Result { - let BlockchainManagerResponse::AddAuxPow(response) = blockchain_manager - .ready() - .await? - .call(BlockchainManagerRequest::AddAuxPow { - block_template, - aux_pow, - }) - .await? - else { - unreachable!(); - }; - - Ok(response) -} - /// [`BlockchainManagerRequest::GenerateBlocks`] pub(crate) async fn generate_blocks( blockchain_manager: &mut BlockchainManagerHandle, @@ -215,21 +172,50 @@ pub(crate) async fn generate_blocks( Ok((blocks, usize_to_u64(height))) } -/// [`BlockchainManagerRequest::Overview`] -pub(crate) async fn overview( +// [`BlockchainManagerRequest::Spans`] +pub(crate) async fn spans( blockchain_manager: &mut BlockchainManagerHandle, - height: u64, -) -> Result { - let BlockchainManagerResponse::Overview(overview) = blockchain_manager +) -> Result, Error> { + // let BlockchainManagerResponse::Spans(vec) = blockchain_manager + // .ready() + // .await? + // .call(BlockchainManagerRequest::Spans) + // .await? + // else { + // unreachable!(); + // }; + + let vec: Vec> = todo!(); + + // FIXME: impl this map somewhere instead of inline. + let vec = vec + .into_iter() + .map(|span| Span { + connection_id: String::from(ConnectionId::DEFAULT_STR), + nblocks: span.nblocks, + rate: span.rate, + remote_address: span.remote_address.to_string(), + size: span.size, + speed: span.speed, + start_block_height: span.start_block_height, + }) + .collect(); + + Ok(vec) +} + +/// [`BlockchainManagerRequest::NextNeededPruningSeed`] +pub(crate) async fn next_needed_pruning_seed( + blockchain_manager: &mut BlockchainManagerHandle, +) -> Result { + let BlockchainManagerResponse::NextNeededPruningSeed(seed) = blockchain_manager .ready() .await? - .call(BlockchainManagerRequest::Overview { - height: u64_to_usize(height), - }) + .call(BlockchainManagerRequest::NextNeededPruningSeed) .await? else { unreachable!(); }; - Ok(overview) + Ok(seed) } diff --git a/binaries/cuprated/src/rpc/request/txpool.rs b/binaries/cuprated/src/rpc/request/txpool.rs index 3ef456cd..eadbb23d 100644 --- a/binaries/cuprated/src/rpc/request/txpool.rs +++ b/binaries/cuprated/src/rpc/request/txpool.rs @@ -11,7 +11,7 @@ use cuprate_txpool::{ interface::{TxpoolReadRequest, TxpoolReadResponse}, TxpoolReadHandle, }, - BlockTemplateTxEntry, TxEntry, + TxEntry, }; // FIXME: use `anyhow::Error` over `tower::BoxError` in txpool. @@ -32,24 +32,6 @@ pub(crate) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result Result, Error> { - let TxpoolReadResponse::BlockTemplateBacklog(tx_entries) = txpool_read - .ready() - .await - .map_err(|e| anyhow!(e))? - .call(TxpoolReadRequest::BlockTemplateBacklog) - .await - .map_err(|e| anyhow!(e))? - else { - unreachable!(); - }; - - Ok(tx_entries) -} - /// [`TxpoolReadRequest::Size`] pub(crate) async fn size( txpool_read: &mut TxpoolReadHandle, diff --git a/binaries/cuprated/src/statics.rs b/binaries/cuprated/src/statics.rs index 8aab1c9a..9839608f 100644 --- a/binaries/cuprated/src/statics.rs +++ b/binaries/cuprated/src/statics.rs @@ -1,7 +1,7 @@ //! Global `static`s used throughout `cuprated`. use std::{ - sync::{atomic::AtomicU64, LazyLock}, + sync::LazyLock, time::{SystemTime, UNIX_EPOCH}, }; diff --git a/binaries/cuprated/src/txpool.rs b/binaries/cuprated/src/txpool.rs index a6f05e75..9592c2bf 100644 --- a/binaries/cuprated/src/txpool.rs +++ b/binaries/cuprated/src/txpool.rs @@ -1,3 +1,15 @@ //! Transaction Pool //! -//! Will handle initiating the tx-pool, providing the preprocessor required for the dandelion pool. +//! Handles initiating the tx-pool, providing the preprocessor required for the dandelion pool. +use cuprate_consensus::BlockChainContextService; +use cuprate_p2p::NetworkInterface; +use cuprate_p2p_core::ClearNet; +use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle}; + +use crate::blockchain::ConcreteTxVerifierService; + +mod dandelion; +mod incoming_tx; +mod txs_being_handled; + +pub use incoming_tx::IncomingTxHandler; diff --git a/binaries/cuprated/src/txpool/dandelion.rs b/binaries/cuprated/src/txpool/dandelion.rs new file mode 100644 index 00000000..d791b62d --- /dev/null +++ b/binaries/cuprated/src/txpool/dandelion.rs @@ -0,0 +1,65 @@ +use std::time::Duration; + +use cuprate_dandelion_tower::{ + pool::DandelionPoolService, DandelionConfig, DandelionRouter, Graph, +}; +use cuprate_p2p::NetworkInterface; +use cuprate_p2p_core::ClearNet; +use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle}; + +use crate::{ + p2p::CrossNetworkInternalPeerId, + txpool::incoming_tx::{DandelionTx, TxId}, +}; + +mod diffuse_service; +mod stem_service; +mod tx_store; + +/// The configuration used for [`cuprate_dandelion_tower`]. +/// +/// TODO: should we expose this to users of cuprated? probably not. +const DANDELION_CONFIG: DandelionConfig = DandelionConfig { + time_between_hop: Duration::from_millis(175), + epoch_duration: Duration::from_secs(10 * 60), + fluff_probability: 0.12, + graph: Graph::FourRegular, +}; + +/// A [`DandelionRouter`] with all generic types defined. +type ConcreteDandelionRouter = DandelionRouter< + stem_service::OutboundPeerStream, + diffuse_service::DiffuseService, + CrossNetworkInternalPeerId, + stem_service::StemPeerService, + DandelionTx, +>; + +/// Starts the dandelion pool manager task and returns a handle to send txs to broadcast. +pub fn start_dandelion_pool_manager( + router: ConcreteDandelionRouter, + txpool_read_handle: TxpoolReadHandle, + txpool_write_handle: TxpoolWriteHandle, +) -> DandelionPoolService { + cuprate_dandelion_tower::pool::start_dandelion_pool_manager( + // TODO: make this constant configurable? + 32, + router, + tx_store::TxStoreService { + txpool_read_handle, + txpool_write_handle, + }, + DANDELION_CONFIG, + ) +} + +/// Creates a [`DandelionRouter`] from a [`NetworkInterface`]. +pub fn dandelion_router(clear_net: NetworkInterface) -> ConcreteDandelionRouter { + DandelionRouter::new( + diffuse_service::DiffuseService { + clear_net_broadcast_service: clear_net.broadcast_svc(), + }, + stem_service::OutboundPeerStream { clear_net }, + DANDELION_CONFIG, + ) +} diff --git a/binaries/cuprated/src/txpool/dandelion/diffuse_service.rs b/binaries/cuprated/src/txpool/dandelion/diffuse_service.rs new file mode 100644 index 00000000..621503fe --- /dev/null +++ b/binaries/cuprated/src/txpool/dandelion/diffuse_service.rs @@ -0,0 +1,44 @@ +use std::{ + future::{ready, Ready}, + task::{Context, Poll}, +}; + +use futures::FutureExt; +use tower::Service; + +use cuprate_dandelion_tower::traits::DiffuseRequest; +use cuprate_p2p::{BroadcastRequest, BroadcastSvc}; +use cuprate_p2p_core::ClearNet; + +use crate::txpool::dandelion::DandelionTx; + +/// The dandelion diffusion service. +pub struct DiffuseService { + pub clear_net_broadcast_service: BroadcastSvc, +} + +impl Service> for DiffuseService { + type Response = (); + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.clear_net_broadcast_service + .poll_ready(cx) + .map_err(Into::into) + } + + fn call(&mut self, req: DiffuseRequest) -> Self::Future { + // TODO: the dandelion crate should pass along where we got the tx from. + let Ok(()) = self + .clear_net_broadcast_service + .call(BroadcastRequest::Transaction { + tx_bytes: req.0 .0, + direction: None, + received_from: None, + }) + .into_inner(); + + ready(Ok(())) + } +} diff --git a/binaries/cuprated/src/txpool/dandelion/stem_service.rs b/binaries/cuprated/src/txpool/dandelion/stem_service.rs new file mode 100644 index 00000000..5c0ba65e --- /dev/null +++ b/binaries/cuprated/src/txpool/dandelion/stem_service.rs @@ -0,0 +1,68 @@ +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +use bytes::Bytes; +use futures::Stream; +use tower::Service; + +use cuprate_dandelion_tower::{traits::StemRequest, OutboundPeer}; +use cuprate_p2p::{ClientPoolDropGuard, NetworkInterface}; +use cuprate_p2p_core::{ + client::{Client, InternalPeerID}, + ClearNet, NetworkZone, PeerRequest, ProtocolRequest, +}; +use cuprate_wire::protocol::NewTransactions; + +use crate::{p2p::CrossNetworkInternalPeerId, txpool::dandelion::DandelionTx}; + +/// The dandelion outbound peer stream. +pub struct OutboundPeerStream { + pub clear_net: NetworkInterface, +} + +impl Stream for OutboundPeerStream { + type Item = Result< + OutboundPeer>, + tower::BoxError, + >; + + fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + // TODO: make the outbound peer choice random. + Poll::Ready(Some(Ok(self + .clear_net + .client_pool() + .outbound_client() + .map_or(OutboundPeer::Exhausted, |client| { + OutboundPeer::Peer( + CrossNetworkInternalPeerId::ClearNet(client.info.id), + StemPeerService(client), + ) + })))) + } +} + +/// The stem service, used to send stem txs. +pub struct StemPeerService(ClientPoolDropGuard); + +impl Service> for StemPeerService { + type Response = as Service>::Response; + type Error = tower::BoxError; + type Future = as Service>::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx) + } + + fn call(&mut self, req: StemRequest) -> Self::Future { + self.0 + .call(PeerRequest::Protocol(ProtocolRequest::NewTransactions( + NewTransactions { + txs: vec![req.0 .0], + dandelionpp_fluff: false, + padding: Bytes::new(), + }, + ))) + } +} diff --git a/binaries/cuprated/src/txpool/dandelion/tx_store.rs b/binaries/cuprated/src/txpool/dandelion/tx_store.rs new file mode 100644 index 00000000..b890ffdc --- /dev/null +++ b/binaries/cuprated/src/txpool/dandelion/tx_store.rs @@ -0,0 +1,74 @@ +use std::task::{Context, Poll}; + +use bytes::Bytes; +use futures::{future::BoxFuture, FutureExt}; +use tower::{Service, ServiceExt}; + +use cuprate_dandelion_tower::{ + traits::{TxStoreRequest, TxStoreResponse}, + State, +}; +use cuprate_database::RuntimeError; +use cuprate_txpool::service::{ + interface::{TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest}, + TxpoolReadHandle, TxpoolWriteHandle, +}; + +use super::{DandelionTx, TxId}; + +/// The dandelion tx-store service. +/// +/// This is just mapping the interface [`cuprate_dandelion_tower`] wants to what [`cuprate_txpool`] provides. +pub struct TxStoreService { + pub txpool_read_handle: TxpoolReadHandle, + pub txpool_write_handle: TxpoolWriteHandle, +} + +impl Service> for TxStoreService { + type Response = TxStoreResponse; + type Error = tower::BoxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: TxStoreRequest) -> Self::Future { + match req { + TxStoreRequest::Get(tx_id) => self + .txpool_read_handle + .clone() + .oneshot(TxpoolReadRequest::TxBlob(tx_id)) + .map(|res| match res { + Ok(TxpoolReadResponse::TxBlob { + tx_blob, + state_stem, + }) => { + let state = if state_stem { + State::Stem + } else { + State::Fluff + }; + + Ok(TxStoreResponse::Transaction(Some(( + DandelionTx(Bytes::from(tx_blob)), + state, + )))) + } + Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Transaction(None)), + Err(e) => Err(e.into()), + Ok(_) => unreachable!(), + }) + .boxed(), + TxStoreRequest::Promote(tx_id) => self + .txpool_write_handle + .clone() + .oneshot(TxpoolWriteRequest::Promote(tx_id)) + .map(|res| match res { + Ok(_) | Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Ok), + Err(e) => Err(e.into()), + }) + .boxed(), + } + } +} diff --git a/binaries/cuprated/src/txpool/incoming_tx.rs b/binaries/cuprated/src/txpool/incoming_tx.rs new file mode 100644 index 00000000..e2041598 --- /dev/null +++ b/binaries/cuprated/src/txpool/incoming_tx.rs @@ -0,0 +1,379 @@ +use std::{ + collections::HashSet, + sync::Arc, + task::{Context, Poll}, +}; + +use bytes::Bytes; +use futures::{future::BoxFuture, FutureExt}; +use monero_serai::transaction::Transaction; +use tower::{Service, ServiceExt}; + +use cuprate_consensus::{ + transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse, + BlockChainContextService, ExtendedConsensusError, VerifyTxRequest, +}; +use cuprate_dandelion_tower::{ + pool::{DandelionPoolService, IncomingTxBuilder}, + State, TxState, +}; +use cuprate_helper::asynch::rayon_spawn_async; +use cuprate_p2p::NetworkInterface; +use cuprate_p2p_core::ClearNet; +use cuprate_txpool::{ + service::{ + interface::{ + TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest, TxpoolWriteResponse, + }, + TxpoolReadHandle, TxpoolWriteHandle, + }, + transaction_blob_hash, +}; +use cuprate_types::TransactionVerificationData; + +use crate::{ + blockchain::ConcreteTxVerifierService, + constants::PANIC_CRITICAL_SERVICE_ERROR, + p2p::CrossNetworkInternalPeerId, + signals::REORG_LOCK, + txpool::{ + dandelion, + txs_being_handled::{TxsBeingHandled, TxsBeingHandledLocally}, + }, +}; + +/// An error that can happen handling an incoming tx. +pub enum IncomingTxError { + Parse(std::io::Error), + Consensus(ExtendedConsensusError), + DuplicateTransaction, +} + +/// Incoming transactions. +pub struct IncomingTxs { + /// The raw bytes of the transactions. + pub txs: Vec, + /// The routing state of the transactions. + pub state: TxState, +} + +/// The transaction type used for dandelion++. +#[derive(Clone)] +pub struct DandelionTx(pub Bytes); + +/// A transaction ID/hash. +pub(super) type TxId = [u8; 32]; + +/// The service than handles incoming transaction pool transactions. +/// +/// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes. +pub struct IncomingTxHandler { + /// A store of txs currently being handled in incoming tx requests. + pub(super) txs_being_handled: TxsBeingHandled, + /// The blockchain context cache. + pub(super) blockchain_context_cache: BlockChainContextService, + /// The dandelion txpool manager. + pub(super) dandelion_pool_manager: + DandelionPoolService, + /// The transaction verifier service. + pub(super) tx_verifier_service: ConcreteTxVerifierService, + /// The txpool write handle. + pub(super) txpool_write_handle: TxpoolWriteHandle, + /// The txpool read handle. + pub(super) txpool_read_handle: TxpoolReadHandle, +} + +impl IncomingTxHandler { + /// Initialize the [`IncomingTxHandler`]. + #[expect(clippy::significant_drop_tightening)] + pub fn init( + clear_net: NetworkInterface, + txpool_write_handle: TxpoolWriteHandle, + txpool_read_handle: TxpoolReadHandle, + blockchain_context_cache: BlockChainContextService, + tx_verifier_service: ConcreteTxVerifierService, + ) -> Self { + let dandelion_router = dandelion::dandelion_router(clear_net); + + let dandelion_pool_manager = dandelion::start_dandelion_pool_manager( + dandelion_router, + txpool_read_handle.clone(), + txpool_write_handle.clone(), + ); + + Self { + txs_being_handled: TxsBeingHandled::new(), + blockchain_context_cache, + dandelion_pool_manager, + tx_verifier_service, + txpool_write_handle, + txpool_read_handle, + } + } +} + +impl Service for IncomingTxHandler { + type Response = (); + type Error = IncomingTxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: IncomingTxs) -> Self::Future { + handle_incoming_txs( + req, + self.txs_being_handled.clone(), + self.blockchain_context_cache.clone(), + self.tx_verifier_service.clone(), + self.txpool_write_handle.clone(), + self.txpool_read_handle.clone(), + self.dandelion_pool_manager.clone(), + ) + .boxed() + } +} + +/// Handles the incoming txs. +async fn handle_incoming_txs( + IncomingTxs { txs, state }: IncomingTxs, + txs_being_handled: TxsBeingHandled, + mut blockchain_context_cache: BlockChainContextService, + mut tx_verifier_service: ConcreteTxVerifierService, + mut txpool_write_handle: TxpoolWriteHandle, + mut txpool_read_handle: TxpoolReadHandle, + mut dandelion_pool_manager: DandelionPoolService, +) -> Result<(), IncomingTxError> { + let _reorg_guard = REORG_LOCK.read().await; + + let (txs, stem_pool_txs, txs_being_handled_guard) = + prepare_incoming_txs(txs, txs_being_handled, &mut txpool_read_handle).await?; + + let BlockChainContextResponse::Context(context) = blockchain_context_cache + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(BlockChainContextRequest::Context) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + else { + unreachable!() + }; + + let context = context.unchecked_blockchain_context(); + + tx_verifier_service + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(VerifyTxRequest::Prepped { + txs: txs.clone(), + current_chain_height: context.chain_height, + top_hash: context.top_hash, + time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(), + hf: context.current_hf, + }) + .await + .map_err(IncomingTxError::Consensus)?; + + for tx in txs { + handle_valid_tx( + tx, + state.clone(), + &mut txpool_write_handle, + &mut dandelion_pool_manager, + ) + .await; + } + + // Re-relay any txs we got in the block that were already in our stem pool. + for stem_tx in stem_pool_txs { + rerelay_stem_tx( + &stem_tx, + state.clone(), + &mut txpool_read_handle, + &mut dandelion_pool_manager, + ) + .await; + } + + Ok(()) +} + +/// Prepares the incoming transactions for verification. +/// +/// This will filter out all transactions already in the pool or txs already being handled in another request. +/// +/// Returns in order: +/// - The [`TransactionVerificationData`] for all the txs we did not already have +/// - The Ids of the transactions in the incoming message that are in our stem-pool +/// - A [`TxsBeingHandledLocally`] guard that prevents verifying the same tx at the same time across 2 tasks. +async fn prepare_incoming_txs( + tx_blobs: Vec, + txs_being_handled: TxsBeingHandled, + txpool_read_handle: &mut TxpoolReadHandle, +) -> Result< + ( + Vec>, + Vec, + TxsBeingHandledLocally, + ), + IncomingTxError, +> { + let mut tx_blob_hashes = HashSet::new(); + let mut txs_being_handled_locally = txs_being_handled.local_tracker(); + + // Compute the blob hash for each tx and filter out the txs currently being handled by another incoming tx batch. + let txs = tx_blobs + .into_iter() + .filter_map(|tx_blob| { + let tx_blob_hash = transaction_blob_hash(&tx_blob); + + // If a duplicate is in here the incoming tx batch contained the same tx twice. + if !tx_blob_hashes.insert(tx_blob_hash) { + return Some(Err(IncomingTxError::DuplicateTransaction)); + } + + // If a duplicate is here it is being handled in another batch. + if !txs_being_handled_locally.try_add_tx(tx_blob_hash) { + return None; + } + + Some(Ok((tx_blob_hash, tx_blob))) + }) + .collect::, _>>()?; + + // Filter the txs already in the txpool out. + // This will leave the txs already in the pool in [`TxBeingHandledLocally`] but that shouldn't be an issue. + let TxpoolReadResponse::FilterKnownTxBlobHashes { + unknown_blob_hashes, + stem_pool_hashes, + } = txpool_read_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolReadRequest::FilterKnownTxBlobHashes(tx_blob_hashes)) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + else { + unreachable!() + }; + + // Now prepare the txs for verification. + rayon_spawn_async(move || { + let txs = txs + .into_iter() + .filter_map(|(tx_blob_hash, tx_blob)| { + if unknown_blob_hashes.contains(&tx_blob_hash) { + Some(tx_blob) + } else { + None + } + }) + .map(|bytes| { + let tx = Transaction::read(&mut bytes.as_ref()).map_err(IncomingTxError::Parse)?; + + let tx = new_tx_verification_data(tx) + .map_err(|e| IncomingTxError::Consensus(e.into()))?; + + Ok(Arc::new(tx)) + }) + .collect::, IncomingTxError>>()?; + + Ok((txs, stem_pool_hashes, txs_being_handled_locally)) + }) + .await +} + +/// Handle a verified tx. +/// +/// This will add the tx to the txpool and route it to the network. +async fn handle_valid_tx( + tx: Arc, + state: TxState, + txpool_write_handle: &mut TxpoolWriteHandle, + dandelion_pool_manager: &mut DandelionPoolService< + DandelionTx, + TxId, + CrossNetworkInternalPeerId, + >, +) { + let incoming_tx = + IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx.tx_blob)), tx.tx_hash); + + let TxpoolWriteResponse::AddTransaction(double_spend) = txpool_write_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolWriteRequest::AddTransaction { + tx, + state_stem: state.is_stem_stage(), + }) + .await + .expect("TODO") + else { + unreachable!() + }; + + // TODO: track double spends to quickly ignore them from their blob hash. + if let Some(tx_hash) = double_spend { + return; + }; + + // TODO: There is a race condition possible if a tx and block come in at the same time: . + + let incoming_tx = incoming_tx + .with_routing_state(state) + .with_state_in_db(None) + .build() + .unwrap(); + + dandelion_pool_manager + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(incoming_tx) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR); +} + +/// Re-relay a tx that was already in our stem pool. +async fn rerelay_stem_tx( + tx_hash: &TxId, + state: TxState, + txpool_read_handle: &mut TxpoolReadHandle, + dandelion_pool_manager: &mut DandelionPoolService< + DandelionTx, + TxId, + CrossNetworkInternalPeerId, + >, +) { + let Ok(TxpoolReadResponse::TxBlob { tx_blob, .. }) = txpool_read_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolReadRequest::TxBlob(*tx_hash)) + .await + else { + // The tx could have been dropped from the pool. + return; + }; + + let incoming_tx = + IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx_blob)), *tx_hash); + + let incoming_tx = incoming_tx + .with_routing_state(state) + .with_state_in_db(Some(State::Stem)) + .build() + .unwrap(); + + dandelion_pool_manager + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(incoming_tx) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR); +} diff --git a/binaries/cuprated/src/txpool/txs_being_handled.rs b/binaries/cuprated/src/txpool/txs_being_handled.rs new file mode 100644 index 00000000..122b8ac7 --- /dev/null +++ b/binaries/cuprated/src/txpool/txs_being_handled.rs @@ -0,0 +1,53 @@ +use std::sync::Arc; + +use dashmap::DashSet; + +/// A set of txs currently being handled, shared between instances of the incoming tx handler. +#[derive(Clone)] +pub struct TxsBeingHandled(Arc>); + +impl TxsBeingHandled { + /// Create a new [`TxsBeingHandled`] + pub fn new() -> Self { + Self(Arc::new(DashSet::new())) + } + + /// Create a new [`TxsBeingHandledLocally`] that will keep track of txs being handled in a request. + pub fn local_tracker(&self) -> TxsBeingHandledLocally { + TxsBeingHandledLocally { + txs_being_handled: self.clone(), + txs: vec![], + } + } +} + +/// A tracker of txs being handled in a single request. This will add the txs to the global [`TxsBeingHandled`] +/// tracker as well. +/// +/// When this is dropped the txs will be removed from [`TxsBeingHandled`]. +pub struct TxsBeingHandledLocally { + txs_being_handled: TxsBeingHandled, + txs: Vec<[u8; 32]>, +} + +impl TxsBeingHandledLocally { + /// Try add a tx to the map from its [`transaction_blob_hash`](cuprate_txpool::transaction_blob_hash). + /// + /// Returns `true` if the tx was added and `false` if another task is already handling this tx. + pub fn try_add_tx(&mut self, tx_blob_hash: [u8; 32]) -> bool { + if !self.txs_being_handled.0.insert(tx_blob_hash) { + return false; + } + + self.txs.push(tx_blob_hash); + true + } +} + +impl Drop for TxsBeingHandledLocally { + fn drop(&mut self) { + for hash in &self.txs { + self.txs_being_handled.0.remove(hash); + } + } +} diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index 1993c47e..fe8f1f05 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -16,7 +16,8 @@ cargo doc --open --package cuprate-blockchain | Crate | In-tree path | Purpose | |-------|--------------|---------| | [`cuprate-consensus`](https://doc.cuprate.org/cuprate_consensus) | [`consensus/`](https://github.com/Cuprate/cuprate/tree/main/consensus) | TODO -| [`cuprate-consensus-rules`](https://doc.cuprate.org/cuprate_consensus_rules) | [`consensus/rules/`](https://github.com/Cuprate/cuprate/tree/main/consensus-rules) | TODO +| [`cuprate-consensus-context`](https://doc.cuprate.org/cuprate_consensus_context) | [`consensus/context/`](https://github.com/Cuprate/cuprate/tree/main/consensus/context) | TODO +| [`cuprate-consensus-rules`](https://doc.cuprate.org/cuprate_consensus_rules) | [`consensus/rules/`](https://github.com/Cuprate/cuprate/tree/main/consensus/rules) | TODO | [`cuprate-fast-sync`](https://doc.cuprate.org/cuprate_fast_sync) | [`consensus/fast-sync/`](https://github.com/Cuprate/cuprate/tree/main/consensus/fast-sync) | Fast block synchronization ## Networking diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 12d97eed..8b732a07 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -8,9 +8,10 @@ authors = ["Boog900"] repository = "https://github.com/Cuprate/cuprate/tree/main/consensus" [dependencies] -cuprate-helper = { path = "../helper", default-features = false, features = ["std", "asynch", "num"] } -cuprate-consensus-rules = { path = "./rules", features = ["rayon"] } -cuprate-types = { path = "../types" } +cuprate-helper = { workspace = true, default-features = false, features = ["std", "asynch", "num"] } +cuprate-consensus-rules = { workspace = true, features = ["rayon"] } +cuprate-types = { workspace = true } +cuprate-consensus-context = { workspace = true } cfg-if = { workspace = true } thiserror = { workspace = true } @@ -18,20 +19,17 @@ tower = { workspace = true, features = ["util"] } tracing = { workspace = true, features = ["std", "attributes"] } futures = { workspace = true, features = ["std", "async-await"] } -randomx-rs = { workspace = true } monero-serai = { workspace = true, features = ["std"] } rayon = { workspace = true } thread_local = { workspace = true } -tokio = { workspace = true, features = ["rt"] } -tokio-util = { workspace = true } hex = { workspace = true } rand = { workspace = true } [dev-dependencies] -cuprate-test-utils = { path = "../test-utils" } -cuprate-consensus-rules = {path = "./rules", features = ["proptest"]} +cuprate-test-utils = { workspace = true } +cuprate-consensus-rules = { workspace = true, features = ["proptest"]} hex-literal = { workspace = true } curve25519-dalek = { workspace = true } @@ -42,4 +40,4 @@ proptest = { workspace = true } proptest-derive = { workspace = true } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/consensus/context/Cargo.toml b/consensus/context/Cargo.toml new file mode 100644 index 00000000..76790464 --- /dev/null +++ b/consensus/context/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "cuprate-consensus-context" +version = "0.1.0" +edition = "2021" +license = "MIT" +authors = ["SyntheticBird","Boog900"] + +[dependencies] +cuprate-consensus-rules = { workspace = true, features = ["proptest"]} +cuprate-helper = { workspace = true, default-features = false, features = ["std", "cast", "num", "asynch"] } +cuprate-types = { workspace = true, default-features = false, features = ["blockchain"] } + +futures = { workspace = true, features = ["std", "async-await"] } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} +tokio-util = { workspace = true } +tower = { workspace = true, features = ["util"] } +tracing = { workspace = true, features = ["std", "attributes"] } +thiserror = { workspace = true } + +monero-serai = { workspace = true, features = ["std"] } +randomx-rs = { workspace = true } +rayon = { workspace = true } +thread_local = { workspace = true } +hex = { workspace = true } + +[lints] +workspace = true diff --git a/consensus/src/context/alt_chains.rs b/consensus/context/src/alt_chains.rs similarity index 94% rename from consensus/src/context/alt_chains.rs rename to consensus/context/src/alt_chains.rs index cd945c81..df82ef34 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/context/src/alt_chains.rs @@ -9,9 +9,8 @@ use cuprate_types::{ }; use crate::{ - ExtendedConsensusError, - __private::Database, - context::{difficulty::DifficultyCache, rx_vms::RandomXVm, weight::BlockWeightsCache}, + ContextCacheError, __private::Database, difficulty::DifficultyCache, rx_vms::RandomXVm, + weight::BlockWeightsCache, }; pub(crate) mod sealed { @@ -38,7 +37,7 @@ pub struct AltChainContextCache { pub chain_height: usize, /// The top hash of the alt chain. pub top_hash: [u8; 32], - /// The [`ChainID`] of the alt chain. + /// The [`ChainId`] of the alt chain. pub chain_id: Option, /// The parent [`Chain`] of this alt chain. pub parent_chain: Chain, @@ -98,7 +97,7 @@ impl AltChainMap { &mut self, prev_id: [u8; 32], database: D, - ) -> Result, ExtendedConsensusError> { + ) -> Result, ContextCacheError> { if let Some(cache) = self.alt_cache_map.remove(&prev_id) { return Ok(cache); } @@ -133,7 +132,7 @@ pub(crate) async fn get_alt_chain_difficulty_cache( prev_id: [u8; 32], main_chain_difficulty_cache: &DifficultyCache, mut database: D, -) -> Result { +) -> Result { // find the block with hash == prev_id. let BlockchainResponse::FindBlock(res) = database .ready() @@ -180,7 +179,7 @@ pub(crate) async fn get_alt_chain_weight_cache( prev_id: [u8; 32], main_chain_weight_cache: &BlockWeightsCache, mut database: D, -) -> Result { +) -> Result { // find the block with hash == prev_id. let BlockchainResponse::FindBlock(res) = database .ready() diff --git a/consensus/src/context/difficulty.rs b/consensus/context/src/difficulty.rs similarity index 95% rename from consensus/src/context/difficulty.rs rename to consensus/context/src/difficulty.rs index 9316dc5e..1b61eb9e 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/context/src/difficulty.rs @@ -17,7 +17,7 @@ use cuprate_types::{ Chain, }; -use crate::{Database, ExtendedConsensusError, HardFork}; +use crate::{ContextCacheError, Database, HardFork}; /// The amount of blocks we account for to calculate difficulty const DIFFICULTY_WINDOW: usize = 720; @@ -33,9 +33,9 @@ const DIFFICULTY_LAG: usize = 15; /// #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct DifficultyCacheConfig { - pub(crate) window: usize, - pub(crate) cut: usize, - pub(crate) lag: usize, + pub window: usize, + pub cut: usize, + pub lag: usize, } impl DifficultyCacheConfig { @@ -73,14 +73,13 @@ impl DifficultyCacheConfig { #[derive(Debug, Clone, Eq, PartialEq)] pub struct DifficultyCache { /// The list of timestamps in the window. - /// len <= [`DIFFICULTY_BLOCKS_COUNT`] - pub(crate) timestamps: VecDeque, + pub timestamps: VecDeque, /// The current cumulative difficulty of the chain. - pub(crate) cumulative_difficulties: VecDeque, + pub cumulative_difficulties: VecDeque, /// The last height we accounted for. - pub(crate) last_accounted_height: usize, + pub last_accounted_height: usize, /// The config - pub(crate) config: DifficultyCacheConfig, + pub config: DifficultyCacheConfig, } impl DifficultyCache { @@ -91,7 +90,7 @@ impl DifficultyCache { config: DifficultyCacheConfig, database: D, chain: Chain, - ) -> Result { + ) -> Result { tracing::info!("Initializing difficulty cache this may take a while."); let mut block_start = chain_height.saturating_sub(config.total_block_count()); @@ -134,7 +133,7 @@ impl DifficultyCache { &mut self, numb_blocks: usize, database: D, - ) -> Result<(), ExtendedConsensusError> { + ) -> Result<(), ContextCacheError> { let Some(retained_blocks) = self.timestamps.len().checked_sub(numb_blocks) else { // More blocks to pop than we have in the cache, so just restart a new cache. *self = Self::init_from_chain_height( @@ -330,7 +329,7 @@ fn next_difficulty( } // TODO: do checked operations here and unwrap so we don't silently overflow? - (windowed_work * hf.block_time().as_secs() as u128 + time_span - 1) / time_span + (windowed_work * u128::from(hf.block_time().as_secs()) + time_span - 1) / time_span } /// Get the start and end of the window to calculate difficulty. @@ -361,7 +360,7 @@ async fn get_blocks_in_pow_info( database: D, block_heights: Range, chain: Chain, -) -> Result<(VecDeque, VecDeque), ExtendedConsensusError> { +) -> Result<(VecDeque, VecDeque), ContextCacheError> { tracing::info!("Getting blocks timestamps"); let BlockchainResponse::BlockExtendedHeaderInRange(ext_header) = database diff --git a/consensus/src/context/hardforks.rs b/consensus/context/src/hardforks.rs similarity index 90% rename from consensus/src/context/hardforks.rs rename to consensus/context/src/hardforks.rs index 16ae7638..e6af492b 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/context/src/hardforks.rs @@ -9,7 +9,7 @@ use cuprate_types::{ Chain, }; -use crate::{Database, ExtendedConsensusError}; +use crate::{ContextCacheError, Database}; /// The default amount of hard-fork votes to track to decide on activation of a hard-fork. /// @@ -21,9 +21,9 @@ const DEFAULT_WINDOW_SIZE: usize = 10080; // supermajority window check length - #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HardForkConfig { /// The network we are on. - pub(crate) info: HFsInfo, + pub info: HFsInfo, /// The amount of votes we are taking into account to decide on a fork activation. - pub(crate) window: usize, + pub window: usize, } impl HardForkConfig { @@ -54,17 +54,17 @@ impl HardForkConfig { /// A struct that keeps track of the current hard-fork and current votes. #[derive(Debug, Clone, Eq, PartialEq)] -pub(crate) struct HardForkState { +pub struct HardForkState { /// The current active hard-fork. - pub(crate) current_hardfork: HardFork, + pub current_hardfork: HardFork, /// The hard-fork config. - pub(crate) config: HardForkConfig, + pub config: HardForkConfig, /// The votes in the current window. - pub(crate) votes: HFVotes, + pub votes: HFVotes, /// The last block height accounted for. - pub(crate) last_height: usize, + pub last_height: usize, } impl HardForkState { @@ -74,7 +74,7 @@ impl HardForkState { chain_height: usize, config: HardForkConfig, mut database: D, - ) -> Result { + ) -> Result { tracing::info!("Initializing hard-fork state this may take a while."); let block_start = chain_height.saturating_sub(config.window); @@ -122,11 +122,11 @@ impl HardForkState { /// # Invariant /// /// This _must_ only be used on a main-chain cache. - pub(crate) async fn pop_blocks_main_chain( + pub async fn pop_blocks_main_chain( &mut self, numb_blocks: usize, database: D, - ) -> Result<(), ExtendedConsensusError> { + ) -> Result<(), ContextCacheError> { let Some(retained_blocks) = self.votes.total_votes().checked_sub(self.config.window) else { *self = Self::init_from_chain_height( self.last_height + 1 - numb_blocks, @@ -159,7 +159,7 @@ impl HardForkState { } /// Add a new block to the cache. - pub(crate) fn new_block(&mut self, vote: HardFork, height: usize) { + pub fn new_block(&mut self, vote: HardFork, height: usize) { // We don't _need_ to take in `height` but it's for safety, so we don't silently loose track // of blocks. assert_eq!(self.last_height + 1, height); @@ -194,7 +194,7 @@ impl HardForkState { } /// Returns the current hard-fork. - pub(crate) const fn current_hardfork(&self) -> HardFork { + pub const fn current_hardfork(&self) -> HardFork { self.current_hardfork } } @@ -205,7 +205,7 @@ async fn get_votes_in_range( database: D, block_heights: Range, window_size: usize, -) -> Result { +) -> Result { let mut votes = HFVotes::new(window_size); let BlockchainResponse::BlockExtendedHeaderInRange(vote_list) = database diff --git a/consensus/src/context.rs b/consensus/context/src/lib.rs similarity index 83% rename from consensus/src/context.rs rename to consensus/context/src/lib.rs index 3c944a92..acc4d23d 100644 --- a/consensus/src/context.rs +++ b/consensus/context/src/lib.rs @@ -1,9 +1,13 @@ //! # Blockchain Context //! -//! This module contains a service to get cached context from the blockchain: [`BlockChainContext`]. +//! This crate contains a service to get cached context from the blockchain: [`BlockChainContext`]. //! This is used during contextual validation, this does not have all the data for contextual validation //! (outputs) for that you will need a [`Database`]. -//! + +// Used in documentation references for [`BlockChainContextRequest`] +// FIXME: should we pull in a dependency just to link docs? +use monero_serai as _; + use std::{ cmp::min, collections::HashMap, @@ -14,18 +18,19 @@ use std::{ }; use futures::{channel::oneshot, FutureExt}; +use monero_serai::block::Block; use tokio::sync::mpsc; use tokio_util::sync::PollSender; use tower::Service; -use cuprate_consensus_rules::{blocks::ContextToVerifyBlock, current_unix_timestamp, HardFork}; +use cuprate_consensus_rules::{ + blocks::ContextToVerifyBlock, current_unix_timestamp, ConsensusError, HardFork, +}; -use crate::{Database, ExtendedConsensusError}; - -pub(crate) mod difficulty; -pub(crate) mod hardforks; -pub(crate) mod rx_vms; -pub(crate) mod weight; +pub mod difficulty; +pub mod hardforks; +pub mod rx_vms; +pub mod weight; mod alt_chains; mod task; @@ -36,13 +41,13 @@ use difficulty::DifficultyCache; use rx_vms::RandomXVm; use weight::BlockWeightsCache; -pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache}; +pub use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache}; pub use difficulty::DifficultyCacheConfig; pub use hardforks::HardForkConfig; pub use tokens::*; pub use weight::BlockWeightsCacheConfig; -pub(crate) const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; +pub const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; /// Config for the context service. pub struct ContextConfig { @@ -91,7 +96,7 @@ impl ContextConfig { pub async fn initialize_blockchain_context( cfg: ContextConfig, database: D, -) -> Result +) -> Result where D: Database + Clone + Send + Sync + 'static, D::Future: Send + 'static, @@ -263,6 +268,21 @@ pub enum BlockChainContextRequest { grace_blocks: u64, }, + /// Calculate proof-of-work for this block. + CalculatePow { + /// The hardfork of the protocol at this block height. + hardfork: HardFork, + /// The height of the block. + height: usize, + /// The block data. + /// + /// This is boxed because [`Block`] causes this enum to be 1200 bytes, + /// where the 2nd variant is only 96 bytes. + block: Box, + /// The seed hash for the proof-of-work. + seed_hash: [u8; 32], + }, + /// Clear the alt chain context caches. ClearAltCache, @@ -360,6 +380,9 @@ pub enum BlockChainContextResponse { /// Response to [`BlockChainContextRequest::FeeEstimate`] FeeEstimate(FeeEstimate), + /// Response to [`BlockChainContextRequest::CalculatePow`] + CalculatePow([u8; 32]), + /// Response to [`BlockChainContextRequest::AltChains`] /// /// If the inner [`Vec::is_empty`], there were no alternate chains. @@ -414,3 +437,52 @@ impl Service for BlockChainContextService { .boxed() } } + +#[derive(Debug, thiserror::Error)] +pub enum ContextCacheError { + /// A consensus error. + #[error("{0}")] + ConErr(#[from] ConsensusError), + /// A database error. + #[error("Database error: {0}")] + DBErr(#[from] tower::BoxError), +} + +use __private::Database; + +pub mod __private { + use std::future::Future; + + use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; + + /// A type alias trait used to represent a database, so we don't have to write [`tower::Service`] bounds + /// everywhere. + /// + /// Automatically implemented for: + /// ```ignore + /// tower::Service + /// ``` + pub trait Database: + tower::Service< + BlockchainReadRequest, + Response = BlockchainResponse, + Error = tower::BoxError, + Future = Self::Future2, + > + { + type Future2: Future> + Send + 'static; + } + + impl< + T: tower::Service< + BlockchainReadRequest, + Response = BlockchainResponse, + Error = tower::BoxError, + >, + > Database for T + where + T::Future: Future> + Send + 'static, + { + type Future2 = T::Future; + } +} diff --git a/consensus/src/context/rx_vms.rs b/consensus/context/src/rx_vms.rs similarity index 90% rename from consensus/src/context/rx_vms.rs rename to consensus/context/src/rx_vms.rs index c6375fc1..803bb324 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/context/src/rx_vms.rs @@ -26,10 +26,10 @@ use cuprate_types::{ Chain, }; -use crate::{Database, ExtendedConsensusError}; +use crate::{ContextCacheError, Database}; /// The amount of randomX VMs to keep in the cache. -const RX_SEEDS_CACHED: usize = 2; +pub const RX_SEEDS_CACHED: usize = 2; /// A multithreaded randomX VM. #[derive(Debug)] @@ -72,14 +72,14 @@ impl RandomX for RandomXVm { /// The randomX VMs cache, keeps the VM needed to calculate the current block's proof-of-work hash (if a VM is needed) and a /// couple more around this VM. #[derive(Clone, Debug)] -pub(crate) struct RandomXVmCache { +pub struct RandomXVmCache { /// The top [`RX_SEEDS_CACHED`] RX seeds. - pub(crate) seeds: VecDeque<(usize, [u8; 32])>, + pub seeds: VecDeque<(usize, [u8; 32])>, /// The VMs for `seeds` (if after hf 12, otherwise this will be empty). - pub(crate) vms: HashMap>, + pub vms: HashMap>, /// A single cached VM that was given to us from a part of Cuprate. - pub(crate) cached_vm: Option<([u8; 32], Arc)>, + pub cached_vm: Option<([u8; 32], Arc)>, } impl RandomXVmCache { @@ -88,7 +88,7 @@ impl RandomXVmCache { chain_height: usize, hf: &HardFork, database: D, - ) -> Result { + ) -> Result { let seed_heights = get_last_rx_seed_heights(chain_height - 1, RX_SEEDS_CACHED); let seed_hashes = get_block_hashes(seed_heights.clone(), database).await?; @@ -125,18 +125,18 @@ impl RandomXVmCache { } /// Add a randomX VM to the cache, with the seed it was created with. - pub(crate) fn add_vm(&mut self, vm: ([u8; 32], Arc)) { + pub fn add_vm(&mut self, vm: ([u8; 32], Arc)) { self.cached_vm.replace(vm); } /// Creates a RX VM for an alt chain, looking at the main chain RX VMs to see if we can use one /// of them first. - pub(crate) async fn get_alt_vm( + pub async fn get_alt_vm( &self, height: usize, chain: Chain, database: D, - ) -> Result, ExtendedConsensusError> { + ) -> Result, ContextCacheError> { let seed_height = randomx_seed_height(height); let BlockchainResponse::BlockHash(seed_hash) = database @@ -162,7 +162,7 @@ impl RandomXVmCache { } /// Get the main-chain `RandomX` VMs. - pub(crate) async fn get_vms(&mut self) -> HashMap> { + pub async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. Some(0) => (), @@ -214,7 +214,7 @@ impl RandomXVmCache { } /// Removes all the `RandomX` VMs above the `new_height`. - pub(crate) fn pop_blocks_main_chain(&mut self, new_height: usize) { + pub fn pop_blocks_main_chain(&mut self, new_height: usize) { self.seeds.retain(|(height, _)| *height < new_height); self.vms.retain(|height, _| *height < new_height); } @@ -222,7 +222,7 @@ impl RandomXVmCache { /// Add a new block to the VM cache. /// /// hash is the block hash not the blocks proof-of-work hash. - pub(crate) fn new_block(&mut self, height: usize, hash: &[u8; 32]) { + pub fn new_block(&mut self, height: usize, hash: &[u8; 32]) { if is_randomx_seed_height(height) { tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",); @@ -243,7 +243,7 @@ impl RandomXVmCache { /// Get the last `amount` of RX seeds, the top height returned here will not necessarily be the RX VM for the top block /// in the chain as VMs include some lag before a seed activates. -pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize) -> Vec { +pub fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize) -> Vec { let mut seeds = Vec::with_capacity(amount); if is_randomx_seed_height(last_height) { seeds.push(last_height); @@ -268,7 +268,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize async fn get_block_hashes( heights: Vec, database: D, -) -> Result, ExtendedConsensusError> { +) -> Result, ContextCacheError> { let mut fut = FuturesOrdered::new(); for height in heights { @@ -281,7 +281,7 @@ async fn get_block_hashes( else { panic!("Database sent incorrect response!"); }; - Result::<_, ExtendedConsensusError>::Ok(hash) + Result::<_, ContextCacheError>::Ok(hash) }); } diff --git a/consensus/src/context/task.rs b/consensus/context/src/task.rs similarity index 96% rename from consensus/src/context/task.rs rename to consensus/context/src/task.rs index c51c795e..b0759952 100644 --- a/consensus/src/context/task.rs +++ b/consensus/context/src/task.rs @@ -16,13 +16,10 @@ use cuprate_types::{ }; use crate::{ - context::{ - alt_chains::{get_alt_chain_difficulty_cache, get_alt_chain_weight_cache, AltChainMap}, - difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest, - BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken, - BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, - }, - Database, ExtendedConsensusError, + alt_chains::{get_alt_chain_difficulty_cache, get_alt_chain_weight_cache, AltChainMap}, + difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest, + BlockChainContextResponse, ContextCacheError, ContextConfig, Database, RawBlockChainContext, + ValidityToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, }; /// A request from the context service to the context task. @@ -68,7 +65,7 @@ impl ContextTask { pub(crate) async fn init_context( cfg: ContextConfig, mut database: D, - ) -> Result { + ) -> Result { let ContextConfig { difficulty_cfg, weights_config, @@ -327,7 +324,8 @@ impl ContextTask { } BlockChainContextRequest::HardForkInfo(_) | BlockChainContextRequest::FeeEstimate { .. } - | BlockChainContextRequest::AltChains => { + | BlockChainContextRequest::AltChains + | BlockChainContextRequest::CalculatePow { .. } => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } }) diff --git a/consensus/src/context/tokens.rs b/consensus/context/src/tokens.rs similarity index 100% rename from consensus/src/context/tokens.rs rename to consensus/context/src/tokens.rs diff --git a/consensus/src/context/weight.rs b/consensus/context/src/weight.rs similarity index 96% rename from consensus/src/context/weight.rs rename to consensus/context/src/weight.rs index e95ae605..7f725998 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/context/src/weight.rs @@ -21,12 +21,12 @@ use cuprate_types::{ Chain, }; -use crate::{Database, ExtendedConsensusError, HardFork}; +use crate::{ContextCacheError, Database, HardFork}; /// The short term block weight window. -const SHORT_TERM_WINDOW: usize = 100; +pub const SHORT_TERM_WINDOW: usize = 100; /// The long term block weight window. -const LONG_TERM_WINDOW: usize = 100000; +pub const LONG_TERM_WINDOW: usize = 100000; /// Configuration for the block weight cache. /// @@ -80,7 +80,7 @@ impl BlockWeightsCache { config: BlockWeightsCacheConfig, database: D, chain: Chain, - ) -> Result { + ) -> Result { tracing::info!("Initializing weight cache this may take a while."); let long_term_weights = get_long_term_weight_in_range( @@ -121,7 +121,7 @@ impl BlockWeightsCache { &mut self, numb_blocks: usize, database: D, - ) -> Result<(), ExtendedConsensusError> { + ) -> Result<(), ContextCacheError> { if self.long_term_weights.window_len() <= numb_blocks { // More blocks to pop than we have in the cache, so just restart a new cache. *self = Self::init_from_chain_height( @@ -258,7 +258,7 @@ fn calculate_effective_median_block_weight( } /// Calculates a blocks long term weight. -pub(crate) fn calculate_block_long_term_weight( +pub fn calculate_block_long_term_weight( hf: HardFork, block_weight: usize, long_term_median: usize, @@ -287,7 +287,7 @@ async fn get_blocks_weight_in_range( range: Range, database: D, chain: Chain, -) -> Result, ExtendedConsensusError> { +) -> Result, ContextCacheError> { tracing::info!("getting block weights."); let BlockchainResponse::BlockExtendedHeaderInRange(ext_headers) = database @@ -311,7 +311,7 @@ async fn get_long_term_weight_in_range( range: Range, database: D, chain: Chain, -) -> Result, ExtendedConsensusError> { +) -> Result, ContextCacheError> { tracing::info!("getting block long term weights."); let BlockchainResponse::BlockExtendedHeaderInRange(ext_headers) = database diff --git a/consensus/fast-sync/Cargo.toml b/consensus/fast-sync/Cargo.toml index 1d7d97b4..8e732a6f 100644 --- a/consensus/fast-sync/Cargo.toml +++ b/consensus/fast-sync/Cargo.toml @@ -9,11 +9,12 @@ name = "cuprate-fast-sync-create-hashes" path = "src/create.rs" [dependencies] -cuprate-blockchain = { path = "../../storage/blockchain" } -cuprate-consensus = { path = ".." } -cuprate-consensus-rules = { path = "../rules" } -cuprate-types = { path = "../../types" } -cuprate-helper = { path = "../../helper", features = ["cast"] } +cuprate-blockchain = { workspace = true } +cuprate-consensus = { workspace = true } +cuprate-consensus-rules = { workspace = true } +cuprate-consensus-context = { workspace = true } +cuprate-types = { workspace = true } +cuprate-helper = { workspace = true, features = ["cast"] } clap = { workspace = true, features = ["derive", "std"] } hex = { workspace = true } @@ -27,4 +28,4 @@ tower = { workspace = true } [dev-dependencies] [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/consensus/fast-sync/src/fast_sync.rs b/consensus/fast-sync/src/fast_sync.rs index ec4ea297..3764e217 100644 --- a/consensus/fast-sync/src/fast_sync.rs +++ b/consensus/fast-sync/src/fast_sync.rs @@ -12,10 +12,8 @@ use monero_serai::{ }; use tower::{Service, ServiceExt}; -use cuprate_consensus::{ - context::{BlockChainContextRequest, BlockChainContextResponse}, - transactions::new_tx_verification_data, -}; +use cuprate_consensus::transactions::new_tx_verification_data; +use cuprate_consensus_context::{BlockChainContextRequest, BlockChainContextResponse}; use cuprate_consensus_rules::{miner_tx::MinerTxError, ConsensusError}; use cuprate_helper::cast::u64_to_usize; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 50117acf..8999cbcf 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -11,10 +11,10 @@ proptest = ["cuprate-types/proptest"] rayon = ["dep:rayon"] [dependencies] -cuprate-constants = { path = "../../constants", default-features = false } -cuprate-helper = { path = "../../helper", default-features = false, features = ["std", "cast"] } -cuprate-types = { path = "../../types", default-features = false } -cuprate-cryptonight = {path = "../../cryptonight"} +cuprate-constants = { workspace = true, default-features = false, features = ["block"] } +cuprate-helper = { workspace = true, default-features = false, features = ["std", "cast"] } +cuprate-types = { workspace = true, default-features = false } +cuprate-cryptonight = { workspace = true } monero-serai = { workspace = true, features = ["std"] } curve25519-dalek = { workspace = true, features = ["alloc", "zeroize", "precomputed-tables"] } diff --git a/consensus/rules/src/lib.rs b/consensus/rules/src/lib.rs index 876e2f7f..eef20c1e 100644 --- a/consensus/rules/src/lib.rs +++ b/consensus/rules/src/lib.rs @@ -63,9 +63,9 @@ where /// An internal function that returns an iterator or a parallel iterator if the /// `rayon` feature is enabled. #[cfg(not(feature = "rayon"))] -fn try_par_iter(t: T) -> impl std::iter::Iterator +fn try_par_iter(t: T) -> impl Iterator where - T: std::iter::IntoIterator, + T: IntoIterator, { t.into_iter() } diff --git a/consensus/rules/src/miner_tx.rs b/consensus/rules/src/miner_tx.rs index 5221ee55..bb3b004a 100644 --- a/consensus/rules/src/miner_tx.rs +++ b/consensus/rules/src/miner_tx.rs @@ -68,7 +68,7 @@ pub fn calculate_block_reward( .unwrap(); let effective_median_bw: u128 = median_bw.try_into().unwrap(); - (((base_reward as u128 * multiplicand) / effective_median_bw) / effective_median_bw) + (((u128::from(base_reward) * multiplicand) / effective_median_bw) / effective_median_bw) .try_into() .unwrap() } diff --git a/consensus/src/block.rs b/consensus/src/block.rs index ceb2cbab..3f5d749e 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -14,6 +14,9 @@ use monero_serai::{ }; use tower::{Service, ServiceExt}; +use cuprate_consensus_context::{ + BlockChainContextRequest, BlockChainContextResponse, RawBlockChainContext, +}; use cuprate_helper::asynch::rayon_spawn_async; use cuprate_types::{ AltBlockInformation, TransactionVerificationData, VerifiedBlockInformation, @@ -30,7 +33,6 @@ use cuprate_consensus_rules::{ }; use crate::{ - context::{BlockChainContextRequest, BlockChainContextResponse, RawBlockChainContext}, transactions::{VerifyTxRequest, VerifyTxResponse}, Database, ExtendedConsensusError, }; diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index 3a5ea7cb..18c27345 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -7,6 +7,12 @@ use std::{collections::HashMap, sync::Arc}; use monero_serai::{block::Block, transaction::Input}; use tower::{Service, ServiceExt}; +use cuprate_consensus_context::{ + difficulty::DifficultyCache, + rx_vms::RandomXVm, + weight::{self, BlockWeightsCache}, + AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, +}; use cuprate_consensus_rules::{ blocks::{ check_block_pow, check_block_weight, check_timestamp, randomx_seed_height, BlockError, @@ -22,12 +28,6 @@ use cuprate_types::{ use crate::{ block::{free::pull_ordered_transactions, PreparedBlock}, - context::{ - difficulty::DifficultyCache, - rx_vms::RandomXVm, - weight::{self, BlockWeightsCache}, - AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, - }, BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, VerifyBlockResponse, }; diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs index 029a5ae6..ef384f5d 100644 --- a/consensus/src/block/batch_prepare.rs +++ b/consensus/src/block/batch_prepare.rs @@ -5,6 +5,7 @@ use rayon::prelude::*; use tower::{Service, ServiceExt}; use tracing::instrument; +use cuprate_consensus_context::rx_vms::RandomXVm; use cuprate_consensus_rules::{ blocks::{check_block_pow, is_randomx_seed_height, randomx_seed_height, BlockError}, hard_forks::HardForkError, @@ -15,7 +16,6 @@ use cuprate_helper::asynch::rayon_spawn_async; use crate::{ block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow}, - context::rx_vms::RandomXVm, transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, VerifyBlockResponse, diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 7280f2ff..f21d00b2 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -24,13 +24,12 @@ use cuprate_consensus_rules::ConsensusError; mod batch_verifier; pub mod block; -pub mod context; #[cfg(test)] mod tests; pub mod transactions; pub use block::{BlockVerifierService, VerifyBlockRequest, VerifyBlockResponse}; -pub use context::{ +pub use cuprate_consensus_context::{ initialize_blockchain_context, BlockChainContext, BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService, ContextConfig, }; diff --git a/consensus/src/tests/context.rs b/consensus/src/tests/context.rs index fdef0ac8..b9c52177 100644 --- a/consensus/src/tests/context.rs +++ b/consensus/src/tests/context.rs @@ -2,15 +2,13 @@ use proptest::strategy::ValueTree; use proptest::{strategy::Strategy, test_runner::TestRunner}; use tower::ServiceExt; -use crate::{ - context::{ - initialize_blockchain_context, BlockChainContextRequest, BlockChainContextResponse, - ContextConfig, NewBlockData, - }, - tests::mock_db::*, - HardFork, +use cuprate_consensus_context::{ + initialize_blockchain_context, BlockChainContextRequest, BlockChainContextResponse, + ContextConfig, NewBlockData, }; +use crate::{tests::mock_db::*, HardFork}; + pub(crate) mod data; mod difficulty; mod hardforks; diff --git a/consensus/src/tests/context/difficulty.rs b/consensus/src/tests/context/difficulty.rs index d5027f50..f1c0fd97 100644 --- a/consensus/src/tests/context/difficulty.rs +++ b/consensus/src/tests/context/difficulty.rs @@ -4,10 +4,10 @@ use proptest::collection::{size_range, vec}; use proptest::{prelude::*, prop_assert_eq, prop_compose, proptest}; use crate::{ - context::difficulty::*, tests::{context::data::DIF_3000000_3002000, mock_db::*}, HardFork, }; +use cuprate_consensus_context::difficulty::*; use cuprate_helper::num::median; use cuprate_types::Chain; diff --git a/consensus/src/tests/context/hardforks.rs b/consensus/src/tests/context/hardforks.rs index 17bd47f9..f0800232 100644 --- a/consensus/src/tests/context/hardforks.rs +++ b/consensus/src/tests/context/hardforks.rs @@ -1,13 +1,11 @@ use proptest::{collection::vec, prelude::*}; +use cuprate_consensus_context::{hardforks::HardForkState, HardForkConfig}; use cuprate_consensus_rules::hard_forks::{HFInfo, HFsInfo, HardFork, NUMB_OF_HARD_FORKS}; -use crate::{ - context::{hardforks::HardForkState, HardForkConfig}, - tests::{ - context::data::{HFS_2678808_2688888, HFS_2688888_2689608}, - mock_db::*, - }, +use crate::tests::{ + context::data::{HFS_2678808_2688888, HFS_2688888_2689608}, + mock_db::*, }; const TEST_WINDOW_SIZE: usize = 25; diff --git a/consensus/src/tests/context/rx_vms.rs b/consensus/src/tests/context/rx_vms.rs index b1eba8e2..41c62796 100644 --- a/consensus/src/tests/context/rx_vms.rs +++ b/consensus/src/tests/context/rx_vms.rs @@ -3,15 +3,13 @@ use std::collections::VecDeque; use proptest::prelude::*; use tokio::runtime::Builder; +use cuprate_consensus_context::rx_vms::{get_last_rx_seed_heights, RandomXVmCache}; use cuprate_consensus_rules::{ blocks::{is_randomx_seed_height, randomx_seed_height}, HardFork, }; -use crate::{ - context::rx_vms::{get_last_rx_seed_heights, RandomXVmCache}, - tests::mock_db::*, -}; +use crate::tests::mock_db::*; #[test] fn rx_heights_consistent() { diff --git a/consensus/src/tests/context/weight.rs b/consensus/src/tests/context/weight.rs index b23f8f80..dab3979e 100644 --- a/consensus/src/tests/context/weight.rs +++ b/consensus/src/tests/context/weight.rs @@ -1,11 +1,11 @@ use crate::{ - context::{ - weight::{calculate_block_long_term_weight, BlockWeightsCache}, - BlockWeightsCacheConfig, - }, tests::{context::data::BW_2850000_3050000, mock_db::*}, HardFork, }; +use cuprate_consensus_context::{ + weight::{calculate_block_long_term_weight, BlockWeightsCache}, + BlockWeightsCacheConfig, +}; use cuprate_types::Chain; pub(crate) const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = diff --git a/constants/Cargo.toml b/constants/Cargo.toml index 6d3e031b..5ce37325 100644 --- a/constants/Cargo.toml +++ b/constants/Cargo.toml @@ -19,4 +19,4 @@ rpc = [] [dev-dependencies] [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 111c6f02..1b3158f1 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -16,7 +16,8 @@ atomic = ["dep:crossbeam"] asynch = ["dep:futures", "dep:rayon"] cast = [] constants = [] -fs = ["dep:dirs"] +crypto = ["dep:curve25519-dalek", "dep:monero-serai", "std"] +fs = ["dep:dirs", "std"] num = [] map = ["cast", "dep:monero-serai", "dep:cuprate-constants"] time = ["dep:chrono", "std"] @@ -24,14 +25,15 @@ thread = ["std", "dep:target_os_lib"] tx = ["dep:monero-serai"] [dependencies] -cuprate-constants = { path = "../constants", optional = true, features = ["block"] } +cuprate-constants = { workspace = true, optional = true, features = ["block"] } -crossbeam = { workspace = true, optional = true } -chrono = { workspace = true, optional = true, features = ["std", "clock"] } -dirs = { workspace = true, optional = true } -futures = { workspace = true, optional = true, features = ["std"] } -monero-serai = { workspace = true, optional = true } -rayon = { workspace = true, optional = true } +chrono = { workspace = true, optional = true, features = ["std", "clock"] } +crossbeam = { workspace = true, optional = true } +curve25519-dalek = { workspace = true, optional = true } +dirs = { workspace = true, optional = true } +futures = { workspace = true, optional = true, features = ["std"] } +monero-serai = { workspace = true, optional = true } +rayon = { workspace = true, optional = true } # This is kinda a stupid work around. # [thread] needs to activate one of these libs (windows|libc) diff --git a/helper/src/cast.rs b/helper/src/cast.rs index 99b7f53e..5628d7d6 100644 --- a/helper/src/cast.rs +++ b/helper/src/cast.rs @@ -18,7 +18,6 @@ // // //============================ SAFETY: DO NOT REMOVE ===========================// -//---------------------------------------------------------------------------------------------------- Free functions /// Cast [`u64`] to [`usize`]. #[inline(always)] pub const fn u64_to_usize(u: u64) -> usize { diff --git a/helper/src/crypto.rs b/helper/src/crypto.rs new file mode 100644 index 00000000..1a27cd30 --- /dev/null +++ b/helper/src/crypto.rs @@ -0,0 +1,122 @@ +//! Crypto related functions and runtime initialized constants + +//---------------------------------------------------------------------------------------------------- Use +use std::sync::LazyLock; + +use curve25519_dalek::{ + constants::ED25519_BASEPOINT_POINT, edwards::VartimeEdwardsPrecomputation, + traits::VartimePrecomputedMultiscalarMul, EdwardsPoint, Scalar, +}; +use monero_serai::generators::H; + +//---------------------------------------------------------------------------------------------------- Pre-computation + +/// This is the decomposed amount table containing the mandatory Pre-RCT amounts. It is used to pre-compute +/// zero commitments at runtime. +/// +/// Defined at: +/// - +#[rustfmt::skip] +pub const ZERO_COMMITMENT_DECOMPOSED_AMOUNT: [u64; 172] = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 20, 30, 40, 50, 60, 70, 80, 90, + 100, 200, 300, 400, 500, 600, 700, 800, 900, + 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, + 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, + 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, + 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, + 10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000, + 100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000, + 1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000, + 10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000, + 100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000, + 1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000, + 10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000, + 100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000, + 1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000, + 10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000, + 100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000, + 1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000, + 10000000000000000000 +]; + +/// Runtime initialized [`H`] generator. +static H_PRECOMP: LazyLock = + LazyLock::new(|| VartimeEdwardsPrecomputation::new([*H, ED25519_BASEPOINT_POINT])); + +/// Runtime initialized zero commitment lookup table +/// +/// # Invariant +/// This function assumes that the [`ZERO_COMMITMENT_DECOMPOSED_AMOUNT`] +/// table is sorted. +pub static ZERO_COMMITMENT_LOOKUP_TABLE: LazyLock<[EdwardsPoint; 172]> = LazyLock::new(|| { + let mut lookup_table: [EdwardsPoint; 172] = [ED25519_BASEPOINT_POINT; 172]; + + for (i, amount) in ZERO_COMMITMENT_DECOMPOSED_AMOUNT.into_iter().enumerate() { + lookup_table[i] = ED25519_BASEPOINT_POINT + *H * Scalar::from(amount); + } + + lookup_table +}); + +//---------------------------------------------------------------------------------------------------- Free functions + +/// This function computes the zero commitment given a specific amount. +/// +/// It will first attempt to lookup into the table of known Pre-RCT value. +/// Compute it otherwise. +#[expect(clippy::cast_possible_truncation)] +pub fn compute_zero_commitment(amount: u64) -> EdwardsPoint { + // OPTIMIZATION: Unlike monerod which execute a linear search across its lookup + // table (O(n)). Cuprate is making use of an arithmetic based constant time + // version (O(1)). It has been benchmarked in both hit and miss scenarios against + // a binary search lookup (O(log2(n))). To understand the following algorithm it + // is important to observe the pattern that follows the values of + // [`ZERO_COMMITMENT_DECOMPOSED_AMOUNT`]. + + // First obtain the logarithm base 10 of the amount. and extend it back to obtain + // the amount without its most significant digit. + let Some(log) = amount.checked_ilog10() else { + // amount = 0 so H component is 0. + return ED25519_BASEPOINT_POINT; + }; + let div = 10_u64.pow(log); + + // Extract the most significant digit. + let most_significant_digit = amount / div; + + // If the *rounded* version is different than the exact amount. Then + // there aren't only trailing zeroes behind the most significant digit. + // The amount is not part of the table and can calculated apart. + if most_significant_digit * div != amount { + return H_PRECOMP.vartime_multiscalar_mul([Scalar::from(amount), Scalar::ONE]); + } + + // Calculating the index back by progressing within the powers of 10. + // The index of the first value in the cached amount's row. + let row_start = u64::from(log) * 9; + // The index of the cached amount + let index = (most_significant_digit - 1 + row_start) as usize; + + ZERO_COMMITMENT_LOOKUP_TABLE[index] +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + use curve25519_dalek::{traits::VartimePrecomputedMultiscalarMul, Scalar}; + + use crate::crypto::{compute_zero_commitment, H_PRECOMP, ZERO_COMMITMENT_DECOMPOSED_AMOUNT}; + + #[test] + /// Compare the output of `compute_zero_commitment` for all + /// preRCT decomposed amounts against their actual computation. + /// + /// Assert that the lookup table returns the correct commitments + fn compare_lookup_with_computation() { + for amount in ZERO_COMMITMENT_DECOMPOSED_AMOUNT { + let commitment = H_PRECOMP.vartime_multiscalar_mul([Scalar::from(amount), Scalar::ONE]); + assert!(commitment == compute_zero_commitment(amount)); + } + } +} diff --git a/helper/src/lib.rs b/helper/src/lib.rs index bfd2fd60..9bd64fa1 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -11,7 +11,7 @@ pub mod atomic; #[cfg(feature = "cast")] pub mod cast; -#[cfg(feature = "fs")] +#[cfg(all(feature = "fs", feature = "std"))] pub mod fs; pub mod network; @@ -30,6 +30,9 @@ pub mod time; #[cfg(feature = "tx")] pub mod tx; + +#[cfg(feature = "crypto")] +pub mod crypto; //---------------------------------------------------------------------------------------------------- Private Usage //---------------------------------------------------------------------------------------------------- diff --git a/net/epee-encoding/Cargo.toml b/net/epee-encoding/Cargo.toml index c021e429..4724e2d0 100644 --- a/net/epee-encoding/Cargo.toml +++ b/net/epee-encoding/Cargo.toml @@ -15,8 +15,8 @@ default = ["std"] std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"] [dependencies] -cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } -cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false } +cuprate-helper = { workspace = true, default-features = false, features = ["cast"] } +cuprate-fixed-bytes = { workspace = true, default-features = false } paste = "1.0.15" ref-cast = "1.0.23" @@ -27,4 +27,4 @@ thiserror = { workspace = true, optional = true} hex = { workspace = true, features = ["default"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/net/epee-encoding/src/container_as_blob.rs b/net/epee-encoding/src/container_as_blob.rs index 83078c2c..363e157b 100644 --- a/net/epee-encoding/src/container_as_blob.rs +++ b/net/epee-encoding/src/container_as_blob.rs @@ -1,3 +1,5 @@ +use alloc::{string::ToString, vec, vec::Vec}; + use bytes::{Buf, BufMut, Bytes, BytesMut}; use ref_cast::RefCast; diff --git a/net/epee-encoding/src/error.rs b/net/epee-encoding/src/error.rs index 756cd136..7206189a 100644 --- a/net/epee-encoding/src/error.rs +++ b/net/epee-encoding/src/error.rs @@ -1,3 +1,4 @@ +use alloc::string::{String, ToString}; use core::{ fmt::{Debug, Formatter}, num::TryFromIntError, diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index d55a5460..a6ff1b04 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -64,6 +64,7 @@ use hex as _; extern crate alloc; +use alloc::string::ToString; use core::str::from_utf8 as str_from_utf8; use bytes::{Buf, BufMut, Bytes, BytesMut}; diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 816203e5..4762c96a 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -1,7 +1,7 @@ //! This module contains a [`EpeeValue`] trait and //! impls for some possible base epee values. -use alloc::{string::String, vec::Vec}; +use alloc::{string::String, vec, vec::Vec}; use core::fmt::Debug; use bytes::{Buf, BufMut, Bytes, BytesMut}; diff --git a/net/levin/Cargo.toml b/net/levin/Cargo.toml index 68c32e54..a9f3c1f2 100644 --- a/net/levin/Cargo.toml +++ b/net/levin/Cargo.toml @@ -12,7 +12,7 @@ default = [] tracing = ["dep:tracing", "tokio-util/tracing"] [dependencies] -cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } +cuprate-helper = { workspace = true, default-features = false, features = ["cast"] } cfg-if = { workspace = true } thiserror = { workspace = true } @@ -30,4 +30,4 @@ tokio = { workspace = true, features = ["full"] } futures = { workspace = true, features = ["std"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/net/wire/Cargo.toml b/net/wire/Cargo.toml index 0b77cf1b..b500a288 100644 --- a/net/wire/Cargo.toml +++ b/net/wire/Cargo.toml @@ -11,11 +11,11 @@ default = [] tracing = ["cuprate-levin/tracing"] [dependencies] -cuprate-levin = { path = "../levin" } -cuprate-epee-encoding = { path = "../epee-encoding" } -cuprate-fixed-bytes = { path = "../fixed-bytes" } -cuprate-types = { path = "../../types", default-features = false, features = ["epee"] } -cuprate-helper = { path = "../../helper", default-features = false, features = ["map"] } +cuprate-levin = { workspace = true } +cuprate-epee-encoding = { workspace = true } +cuprate-fixed-bytes = { workspace = true } +cuprate-types = { workspace = true, default-features = false, features = ["epee"] } +cuprate-helper = { workspace = true, default-features = false, features = ["map"] } bitflags = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] } diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index 9afc2552..a88819f7 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -7,9 +7,9 @@ authors = ["Boog900"] [dependencies] -cuprate-constants = { path = "../../constants" } -cuprate-pruning = { path = "../../pruning" } -cuprate-p2p-core = { path = "../p2p-core" } +cuprate-constants = { workspace = true } +cuprate-pruning = { workspace = true } +cuprate-p2p-core = { workspace = true, features = ["borsh"] } tower = { workspace = true, features = ["util"] } tokio = { workspace = true, features = ["time", "fs", "rt"]} @@ -26,9 +26,9 @@ rand = { workspace = true, features = ["std", "std_rng"] } borsh = { workspace = true, features = ["derive", "std"]} [dev-dependencies] -cuprate-test-utils = {path = "../../test-utils"} +cuprate-test-utils = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index bc7109bc..10be8e3d 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -425,8 +425,7 @@ impl Service> for AddressBook { | AddressBookRequest::SetBan(_) | AddressBookRequest::GetBans | AddressBookRequest::ConnectionInfo - | AddressBookRequest::NextNeededPruningSeed - | AddressBookRequest::Spans => { + | AddressBookRequest::NextNeededPruningSeed => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } }; diff --git a/p2p/dandelion-tower/src/router.rs b/p2p/dandelion-tower/src/router.rs index 88702be0..7ca05989 100644 --- a/p2p/dandelion-tower/src/router.rs +++ b/p2p/dandelion-tower/src/router.rs @@ -73,6 +73,15 @@ pub enum TxState { Local, } +impl TxState { + /// Returns `true` if the tx is in the stem stage. + /// + /// [`TxState::Local`] & [`TxState::Stem`] are the 2 stem stage states. + pub const fn is_stem_stage(&self) -> bool { + matches!(self, Self::Local | Self::Stem { .. }) + } +} + /// A request to route a transaction. pub struct DandelionRouteReq { /// The transaction. diff --git a/p2p/p2p-core/Cargo.toml b/p2p/p2p-core/Cargo.toml index a30590fa..bc6c8335 100644 --- a/p2p/p2p-core/Cargo.toml +++ b/p2p/p2p-core/Cargo.toml @@ -10,9 +10,10 @@ default = ["borsh"] borsh = ["dep:borsh", "cuprate-pruning/borsh"] [dependencies] -cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } -cuprate-wire = { path = "../../net/wire", features = ["tracing"] } -cuprate-pruning = { path = "../../pruning" } +cuprate-helper = { workspace = true, features = ["asynch"], default-features = false } +cuprate-wire = { workspace = true, features = ["tracing"] } +cuprate-pruning = { workspace = true } +cuprate-types = { workspace = true } tokio = { workspace = true, features = ["net", "sync", "macros", "time", "rt", "rt-multi-thread"]} tokio-util = { workspace = true, features = ["codec"] } @@ -29,10 +30,10 @@ hex-literal = { workspace = true } borsh = { workspace = true, features = ["derive", "std"], optional = true } [dev-dependencies] -cuprate-test-utils = { path = "../../test-utils" } +cuprate-test-utils = { workspace = true } hex = { workspace = true, features = ["std"] } tokio-test = { workspace = true } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs index 4c039bb6..e4e80477 100644 --- a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs +++ b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs @@ -113,8 +113,7 @@ impl Service> for DummyAddressBook { | AddressBookRequest::SetBan(_) | AddressBookRequest::GetBans | AddressBookRequest::ConnectionInfo - | AddressBookRequest::NextNeededPruningSeed - | AddressBookRequest::Spans => { + | AddressBookRequest::NextNeededPruningSeed => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } })) diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index 481f0243..63853e7c 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -6,7 +6,7 @@ use cuprate_wire::{CoreSyncData, PeerListEntryBase}; use crate::{ client::InternalPeerID, handles::ConnectionHandle, - types::{BanState, ConnectionInfo, SetBan, Span}, + types::{BanState, ConnectionInfo, SetBan}, NetZoneAddress, NetworkAddressIncorrectZone, NetworkZone, }; @@ -133,12 +133,6 @@ pub enum AddressBookRequest { /// Get the state of all bans. GetBans, - /// Get [`Span`] data. - /// - /// This is data that describes an active downloading process, - /// if we are fully synced, this will return an empty [`Vec`]. - Spans, - /// Get the next [`PruningSeed`] needed for a pruned sync. NextNeededPruningSeed, } @@ -179,9 +173,6 @@ pub enum AddressBookResponse { /// Response to [`AddressBookRequest::GetBans`]. GetBans(Vec>), - /// Response to [`AddressBookRequest::Spans`]. - Spans(Vec>), - /// Response to [`AddressBookRequest::NextNeededPruningSeed`]. NextNeededPruningSeed(PruningSeed), } diff --git a/p2p/p2p-core/src/types.rs b/p2p/p2p-core/src/types.rs index eef3cb85..ca560555 100644 --- a/p2p/p2p-core/src/types.rs +++ b/p2p/p2p-core/src/types.rs @@ -3,6 +3,7 @@ use std::time::{Duration, Instant}; use cuprate_pruning::PruningSeed; +use cuprate_types::{AddressType, ConnectionState}; use crate::NetZoneAddress; @@ -24,83 +25,23 @@ pub struct BanState { pub unban_instant: Option, } -/// An enumeration of address types. -/// -/// Used [`ConnectionInfo::address_type`]. -#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(u8)] -pub enum AddressType { - #[default] - Invalid, - Ipv4, - Ipv6, - I2p, - Tor, -} - -impl AddressType { - /// Convert [`Self`] to a [`u8`]. - /// - /// ```rust - /// use cuprate_p2p_core::AddressType as A; - /// - /// assert_eq!(A::Invalid.to_u8(), 0); - /// assert_eq!(A::Ipv4.to_u8(), 1); - /// assert_eq!(A::Ipv6.to_u8(), 2); - /// assert_eq!(A::I2p.to_u8(), 3); - /// assert_eq!(A::Tor.to_u8(), 4); - /// ``` - pub const fn to_u8(self) -> u8 { - self as u8 - } - - /// Convert a [`u8`] to a [`Self`]. - /// - /// # Errors - /// This returns [`None`] if `u > 4`. - /// - /// ```rust - /// use cuprate_p2p_core::AddressType as A; - /// - /// assert_eq!(A::from_u8(0), Some(A::Invalid)); - /// assert_eq!(A::from_u8(1), Some(A::Ipv4)); - /// assert_eq!(A::from_u8(2), Some(A::Ipv6)); - /// assert_eq!(A::from_u8(3), Some(A::I2p)); - /// assert_eq!(A::from_u8(4), Some(A::Tor)); - /// assert_eq!(A::from_u8(5), None); - /// ``` - pub const fn from_u8(u: u8) -> Option { - Some(match u { - 0 => Self::Invalid, - 1 => Self::Ipv4, - 2 => Self::Ipv6, - 3 => Self::I2p, - 4 => Self::Tor, - _ => return None, - }) - } -} - -// TODO: reduce fields and map to RPC type. -// /// Data within [`crate::services::AddressBookResponse::ConnectionInfo`]. pub struct ConnectionInfo { + // The following fields are mostly the same as `monerod`. pub address: A, pub address_type: AddressType, pub avg_download: u64, pub avg_upload: u64, - pub connection_id: u64, // TODO: boost::uuids::uuid pub current_download: u64, pub current_upload: u64, pub height: u64, + /// Either a domain or an IP without the port. pub host: String, pub incoming: bool, - pub ip: String, pub live_time: u64, pub localhost: bool, pub local_ip: bool, - pub peer_id: String, - pub port: String, + pub peer_id: u64, pub pruning_seed: PruningSeed, pub recv_count: u64, pub recv_idle_time: u64, @@ -108,16 +49,44 @@ pub struct ConnectionInfo { pub rpc_port: u16, pub send_count: u64, pub send_idle_time: u64, - pub state: String, // TODO: what type is this? + pub state: ConnectionState, pub support_flags: u32, + + // The following fields are slightly different than `monerod`. + + // + /// [`None`] if Tor/i2p or unknown. + pub socket_addr: Option, + + /// This field does not exist for `cuprated`'s RPC, this is just a marker type: + /// - + /// - + /// + /// [`ConnectionId::DEFAULT_STR`] is used when mapping to the RPC type. + pub connection_id: ConnectionId, +} + +/// Marker type for `monerod`'s connection ID. +/// +/// `connection_id` is a 128-bit `uuid` in `monerod`. +/// `cuprated` does not support this field so it returns +/// the default value in the RPC interface, an all 0-bit UUID. +/// +/// This default value in string form is [`ConnectionId::DEFAULT_STR`]. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ConnectionId; + +impl ConnectionId { + /// [`str`] representation of a default connection ID. + pub const DEFAULT_STR: &str = "00000000000000000000000000000000"; } /// Used in RPC's `sync_info`. /// -/// Data within [`crate::services::AddressBookResponse::Spans`]. +// TODO: fix docs after +// Data within [`crate::services::AddressBookResponse::Spans`]. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Span { - pub connection_id: u64, // TODO: boost::uuids::uuid pub nblocks: u64, pub rate: u32, pub remote_address: A, diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index 3444b5ef..866fb918 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -6,15 +6,15 @@ license = "MIT" authors = ["Boog900"] [dependencies] -cuprate-constants = { path = "../../constants" } -cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } -cuprate-wire = { path = "../../net/wire" } -cuprate-p2p-core = { path = "../p2p-core", features = ["borsh"] } -cuprate-address-book = { path = "../address-book" } -cuprate-pruning = { path = "../../pruning" } -cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } -cuprate-async-buffer = { path = "../async-buffer" } -cuprate-types = { path = "../../types", default-features = false } +cuprate-constants = { workspace = true } +cuprate-fixed-bytes = { workspace = true } +cuprate-wire = { workspace = true } +cuprate-p2p-core = { workspace = true, features = ["borsh"] } +cuprate-address-book = { workspace = true } +cuprate-pruning = { workspace = true } +cuprate-helper = { workspace = true, features = ["asynch"], default-features = false } +cuprate-async-buffer = { workspace = true } +cuprate-types = { workspace = true, default-features = false } monero-serai = { workspace = true, features = ["std"] } @@ -35,10 +35,10 @@ tracing = { workspace = true, features = ["std", "attributes"] } borsh = { workspace = true, features = ["derive", "std"] } [dev-dependencies] -cuprate-test-utils = { path = "../../test-utils" } +cuprate-test-utils = { workspace = true } indexmap = { workspace = true } proptest = { workspace = true } tokio-test = { workspace = true } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/p2p/p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs index fc97fc1b..67c8f112 100644 --- a/p2p/p2p/src/client_pool.rs +++ b/p2p/p2p/src/client_pool.rs @@ -18,13 +18,13 @@ use tracing::{Instrument, Span}; use cuprate_p2p_core::{ client::{Client, InternalPeerID}, handles::ConnectionHandle, - NetworkZone, + ConnectionDirection, NetworkZone, }; pub(crate) mod disconnect_monitor; mod drop_guard_client; -pub(crate) use drop_guard_client::ClientPoolDropGuard; +pub use drop_guard_client::ClientPoolDropGuard; /// The client pool, which holds currently connected free peers. /// @@ -165,6 +165,17 @@ impl ClientPool { sync_data.cumulative_difficulty() > cumulative_difficulty }) } + + /// Returns the first outbound peer when iterating over the peers. + pub fn outbound_client(self: &Arc) -> Option> { + let client = self + .clients + .iter() + .find(|element| element.value().info.direction == ConnectionDirection::Outbound)?; + let id = *client.key(); + + Some(self.borrow_client(&id).unwrap()) + } } mod sealed { diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index b3577a77..541784c9 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -18,7 +18,7 @@ use cuprate_p2p_core::{ pub mod block_downloader; mod broadcast; -mod client_pool; +pub mod client_pool; pub mod config; pub mod connection_maintainer; pub mod constants; @@ -26,6 +26,7 @@ mod inbound_server; use block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}; pub use broadcast::{BroadcastRequest, BroadcastSvc}; +pub use client_pool::{ClientPool, ClientPoolDropGuard}; pub use config::{AddressBookConfig, P2PConfig}; use connection_maintainer::MakeConnectionRequest; @@ -82,7 +83,7 @@ where let outbound_handshaker = outbound_handshaker_builder.build(); - let client_pool = client_pool::ClientPool::new(); + let client_pool = ClientPool::new(); let (make_connection_tx, make_connection_rx) = mpsc::channel(3); @@ -132,7 +133,7 @@ where #[derive(Clone)] pub struct NetworkInterface { /// A pool of free connected peers. - pool: Arc>, + pool: Arc>, /// A [`Service`] that allows broadcasting to all connected peers. broadcast_svc: BroadcastSvc, /// A channel to request extra connections. @@ -173,7 +174,7 @@ impl NetworkInterface { } /// Borrows the `ClientPool`, for access to connected peers. - pub const fn client_pool(&self) -> &Arc> { + pub const fn client_pool(&self) -> &Arc> { &self.pool } } diff --git a/pruning/Cargo.toml b/pruning/Cargo.toml index e898fd5e..4b03551d 100644 --- a/pruning/Cargo.toml +++ b/pruning/Cargo.toml @@ -10,11 +10,11 @@ default = [] borsh = ["dep:borsh"] [dependencies] -cuprate-constants = { path = "../constants" } +cuprate-constants = { workspace = true, features = ["block"] } thiserror = { workspace = true } borsh = { workspace = true, features = ["derive", "std"], optional = true } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/rpc/interface/Cargo.toml b/rpc/interface/Cargo.toml index 00f7a228..c5d4db70 100644 --- a/rpc/interface/Cargo.toml +++ b/rpc/interface/Cargo.toml @@ -10,23 +10,23 @@ keywords = ["cuprate", "rpc", "interface"] [features] default = ["dummy", "serde"] -dummy = [] +dummy = ["dep:cuprate-helper", "dep:futures"] [dependencies] -cuprate-epee-encoding = { path = "../../net/epee-encoding", default-features = false } -cuprate-json-rpc = { path = "../json-rpc", default-features = false } -cuprate-rpc-types = { path = "../types", features = ["serde", "epee"], default-features = false } -cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } +cuprate-epee-encoding = { workspace = true, default-features = false } +cuprate-json-rpc = { workspace = true, default-features = false } +cuprate-rpc-types = { workspace = true, features = ["serde", "epee"], default-features = false } +cuprate-helper = { workspace = true, features = ["asynch"], default-features = false, optional = true } anyhow = { workspace = true } axum = { version = "0.7.5", features = ["json"], default-features = false } serde = { workspace = true, optional = true } -tower = { workspace = true } +tower = { workspace = true, features = ["util"] } paste = { workspace = true } -futures = { workspace = true } +futures = { workspace = true, optional = true } [dev-dependencies] -cuprate-test-utils = { path = "../../test-utils" } +cuprate-test-utils = { workspace = true } axum = { version = "0.7.5", features = ["json", "tokio", "http2"] } serde_json = { workspace = true, features = ["std"] } diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index cfe7e47d..6d8797b2 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -10,22 +10,22 @@ keywords = ["cuprate", "rpc", "types", "monero"] [features] default = ["serde", "epee"] -serde = ["dep:serde", "cuprate-fixed-bytes/serde"] -epee = ["dep:cuprate-epee-encoding"] +serde = ["dep:serde", "cuprate-fixed-bytes/serde", "cuprate-types/serde"] +epee = ["dep:cuprate-epee-encoding", "cuprate-types/epee"] [dependencies] -cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true } -cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } -cuprate-types = { path = "../../types", default-features = false, features = ["epee", "serde"] } +cuprate-epee-encoding = { workspace = true, optional = true } +cuprate-fixed-bytes = { workspace = true } +cuprate-types = { workspace = true, default-features = false } -paste = { workspace = true } -serde = { workspace = true, optional = true } +paste = { workspace = true } +serde = { workspace = true, optional = true } [dev-dependencies] -cuprate-test-utils = { path = "../../test-utils" } +cuprate-test-utils = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/rpc/types/src/base.rs b/rpc/types/src/base.rs index 407b8c11..89eafc5b 100644 --- a/rpc/types/src/base.rs +++ b/rpc/types/src/base.rs @@ -164,7 +164,7 @@ impl AccessResponseBase { /// use cuprate_rpc_types::{misc::*, base::*}; /// /// assert_eq!(AccessResponseBase::OK_UNTRUSTED, AccessResponseBase { - /// response_base: ResponseBase::ok_untrusted(), + /// response_base: ResponseBase::OK_UNTRUSTED, /// credits: 0, /// top_hash: "".into(), /// }); diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index a68d3e10..7b941918 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -20,12 +20,16 @@ use cuprate_types::BlockCompleteEntry; use crate::{ base::AccessResponseBase, - defaults::{default_false, default_zero}, macros::{define_request, define_request_and_response, define_request_and_response_doc}, - misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfoExtent, PoolTxInfo, Status}, + misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolTxInfo, Status}, rpc_call::RpcCallValue, }; +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::{default_false, default_zero}; +#[cfg(feature = "epee")] +use crate::misc::PoolInfoExtent; + //---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { get_blocks_by_heightbin, diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index 2b1e378c..cb55e64a 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -8,10 +8,6 @@ use serde::{Deserialize, Serialize}; use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{ - default_false, default_height, default_one, default_string, default_true, default_vec, - default_zero, - }, macros::define_request_and_response, misc::{ AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, @@ -21,6 +17,12 @@ use crate::{ rpc_call::RpcCallValue, }; +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::{ + default_false, default_height, default_one, default_string, default_true, default_vec, + default_zero, +}; + //---------------------------------------------------------------------------------------------------- Macro /// Adds a (de)serialization doc-test to a type in `json.rs`. /// @@ -658,7 +660,7 @@ define_request_and_response! { connections: vec![ ConnectionInfo { address: "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "22ef856d0f1d44cc95e84fecfd065fe2".into(), @@ -680,12 +682,12 @@ define_request_and_response! { rpc_port: 0, send_count: 3406572, send_idle_time: 30, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 }, ConnectionInfo { address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "c7734e15936f485a86d2b0534f87e499".into(), @@ -707,7 +709,7 @@ define_request_and_response! { rpc_port: 0, send_count: 3370566, send_idle_time: 120, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 } ], @@ -1251,7 +1253,7 @@ define_request_and_response! { SyncInfoPeer { info: ConnectionInfo { address: "142.93.128.65:44986".into(), - address_type: 1, + address_type: cuprate_types::AddressType::Ipv4, avg_download: 1, avg_upload: 1, connection_id: "a5803c4c2dac49e7b201dccdef54c862".into(), @@ -1273,14 +1275,14 @@ define_request_and_response! { rpc_port: 18089, send_count: 32235, send_idle_time: 6, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 1 } }, SyncInfoPeer { info: ConnectionInfo { address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "277f7c821bc546878c8bd29977e780f5".into(), @@ -1302,7 +1304,7 @@ define_request_and_response! { rpc_port: 0, send_count: 99120, send_idle_time: 15, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 } } diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index be1069ec..403a3ea3 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -6,6 +6,7 @@ )] mod constants; +#[cfg(any(feature = "serde", feature = "epee"))] mod defaults; mod free; mod macros; diff --git a/rpc/types/src/misc/distribution.rs b/rpc/types/src/misc/distribution.rs index faac7ad7..e920d129 100644 --- a/rpc/types/src/misc/distribution.rs +++ b/rpc/types/src/misc/distribution.rs @@ -20,8 +20,8 @@ use cuprate_epee_encoding::{ "rpc/core_rpc_server_commands_defs.h", 45..=55 )] -#[cfg(feature = "epee")] -fn compress_integer_array(_: &[u64]) -> error::Result> { +#[cfg(any(feature = "epee", feature = "serde"))] +fn compress_integer_array(_: &[u64]) -> Vec { todo!() } @@ -33,6 +33,7 @@ fn compress_integer_array(_: &[u64]) -> error::Result> { "rpc/core_rpc_server_commands_defs.h", 57..=72 )] +#[cfg(any(feature = "epee", feature = "serde"))] fn decompress_integer_array(_: &[u8]) -> Vec { todo!() } @@ -135,12 +136,7 @@ fn serialize_distribution_as_compressed_data(v: &Vec, s: S) -> Result compressed_data.serialize(s), - Err(_) => Err(serde::ser::Error::custom( - "error compressing distribution array", - )), - } + compress_integer_array(v).serialize(s) } /// Deserializer function for [`DistributionCompressedBinary::distribution`]. @@ -256,7 +252,7 @@ impl EpeeObject for Distribution { distribution, amount, }) => { - let compressed_data = compress_integer_array(&distribution)?; + let compressed_data = compress_integer_array(&distribution); start_height.write(w)?; base.write(w)?; diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 49fed6f7..8f7467ba 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -11,10 +11,10 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::epee_object; -use crate::{ - defaults::{default_string, default_zero}, - macros::monero_definition_link, -}; +use crate::macros::monero_definition_link; + +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::default_zero; //---------------------------------------------------------------------------------------------------- Macros /// This macro (local to this file) defines all the misc types. @@ -110,7 +110,7 @@ define_struct_and_impl_epee! { /// Used in [`crate::json::GetConnectionsResponse`]. ConnectionInfo { address: String, - address_type: crate::misc::AddressType, + address_type: cuprate_types::AddressType, avg_download: u64, avg_upload: u64, connection_id: String, @@ -135,7 +135,7 @@ define_struct_and_impl_epee! { // Exists in the original definition, but isn't // used or (de)serialized for RPC purposes. // ssl: bool, - state: String, + state: cuprate_types::ConnectionState, support_flags: u32, } } @@ -148,7 +148,7 @@ define_struct_and_impl_epee! { )] /// Used in [`crate::json::SetBansRequest`]. SetBan { - #[cfg_attr(feature = "serde", serde(default = "default_string"))] + #[cfg_attr(feature = "serde", serde(default = "crate::defaults::default_string"))] host: String, #[cfg_attr(feature = "serde", serde(default = "default_zero"))] ip: u32, diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 6c4c021c..3694041c 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -8,7 +8,6 @@ use serde::{Deserialize, Serialize}; use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_string, default_true, default_vec, default_zero}, macros::define_request_and_response, misc::{ GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo, @@ -17,6 +16,9 @@ use crate::{ RpcCallValue, }; +#[cfg(any(feature = "serde", feature = "epee"))] +use crate::defaults::{default_false, default_string, default_true, default_vec, default_zero}; + //---------------------------------------------------------------------------------------------------- Macro /// Adds a (de)serialization doc-test to a type in `other.rs`. /// diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 00579110..6fd973cd 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -9,37 +9,36 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/cuprate-bloc keywords = ["cuprate", "blockchain", "database"] [features] -default = ["heed", "service"] +default = ["heed"] # default = ["redb", "service"] # default = ["redb-memory", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:thread_local", "dep:rayon", "cuprate-helper/thread"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] [dependencies] -cuprate-database = { path = "../database" } -cuprate-database-service = { path = "../service" } -cuprate-helper = { path = "../../helper", features = ["fs", "map"] } -cuprate-types = { path = "../../types", features = ["blockchain"] } -cuprate-pruning = { path = "../../pruning" } +cuprate-database = { workspace = true } +cuprate-database-service = { workspace = true } +cuprate-helper = { workspace = true, features = ["fs", "map", "crypto", "tx", "thread"] } +cuprate-types = { workspace = true, features = ["blockchain"] } +cuprate-pruning = { workspace = true } bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } curve25519-dalek = { workspace = true } -rand = { workspace = true } +rand = { workspace = true, features = ["std", "std_rng"] } monero-serai = { workspace = true, features = ["std"] } serde = { workspace = true, optional = true } -# `service` feature. tower = { workspace = true } -thread_local = { workspace = true, optional = true } -rayon = { workspace = true, optional = true } +thread_local = { workspace = true } +rayon = { workspace = true } [dev-dependencies] -cuprate-constants = { path = "../../constants" } -cuprate-helper = { path = "../../helper", features = ["thread", "cast"] } -cuprate-test-utils = { path = "../../test-utils" } +cuprate-constants = { workspace = true } +cuprate-helper = { workspace = true, features = ["thread", "cast"] } +cuprate-test-utils = { workspace = true } tokio = { workspace = true, features = ["full"] } tempfile = { workspace = true } diff --git a/storage/blockchain/README.md b/storage/blockchain/README.md index 48005469..3f97a3d6 100644 --- a/storage/blockchain/README.md +++ b/storage/blockchain/README.md @@ -32,9 +32,6 @@ use cuprate_blockchain::{ This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_blockchain` internally. # Feature flags -The `service` module requires the `service` feature to be enabled. -See the module for more documentation. - Different database backends are enabled by the feature flags: - `heed` (LMDB) - `redb` @@ -45,7 +42,7 @@ The default is `heed`. # Invariants when not using `service` -`cuprate_blockchain` can be used without the `service` feature enabled but +`cuprate_blockchain` can be used without the `service` module but there are some things that must be kept in mind when doing so. Failing to uphold these invariants may cause panics. diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index f66cd99b..7db8cc6e 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -29,16 +29,12 @@ pub use free::open; pub mod config; pub mod ops; +pub mod service; pub mod tables; pub mod types; -//---------------------------------------------------------------------------------------------------- Feature-gated -#[cfg(feature = "service")] -pub mod service; - //---------------------------------------------------------------------------------------------------- Private #[cfg(test)] pub(crate) mod tests; -#[cfg(feature = "service")] // only needed in `service` for now pub(crate) mod unsafe_sendable; diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index 1c7c1d78..14c209ab 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -1,12 +1,13 @@ //! Output functions. //---------------------------------------------------------------------------------------------------- Import -use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar}; -use monero_serai::{generators::H, transaction::Timelock}; +use curve25519_dalek::edwards::CompressedEdwardsY; +use monero_serai::transaction::Timelock; use cuprate_database::{ RuntimeError, {DatabaseRo, DatabaseRw}, }; +use cuprate_helper::crypto::compute_zero_commitment; use cuprate_helper::map::u64_to_timelock; use cuprate_types::OutputOnChain; @@ -155,9 +156,7 @@ pub fn output_to_output_on_chain( amount: Amount, table_tx_unlock_time: &impl DatabaseRo, ) -> Result { - // FIXME: implement lookup table for common values: - // - let commitment = ED25519_BASEPOINT_POINT + *H * Scalar::from(amount); + let commitment = compute_zero_commitment(amount); let time_lock = if output .output_flags diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index c9799a2c..5a60ad53 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -2,10 +2,10 @@ //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; -use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar}; use monero_serai::transaction::{Input, Timelock, Transaction}; use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_helper::crypto::compute_zero_commitment; use crate::{ ops::{ @@ -136,12 +136,9 @@ pub fn add_tx( .enumerate() .map(|(i, output)| { // Create commitment. - // - // FIXME: implement lookup table for common values: - // + let commitment = if miner_tx { - ED25519_BASEPOINT_POINT - + *monero_serai::generators::H * Scalar::from(output.amount.unwrap_or(0)) + compute_zero_commitment(output.amount.unwrap_or(0)) } else { proofs .as_ref() diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 53bf1dfa..c5eb80c2 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -10,8 +10,6 @@ //! //! The system is managed by this crate, and only requires [`init`] by the user. //! -//! This module must be enabled with the `service` feature. -//! //! ## Handles //! The 2 handles to the database are: //! - [`BlockchainReadHandle`] diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index 7a2f4ae1..feeaf876 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -9,10 +9,10 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/database" keywords = ["cuprate", "database"] [features] -# default = ["heed"] +default = ["heed"] # default = ["redb"] # default = ["redb-memory"] -heed = ["dep:heed"] +heed = [] redb = ["dep:redb"] redb-memory = ["redb"] @@ -25,7 +25,7 @@ paste = { workspace = true } thiserror = { workspace = true } # Optional features. -heed = { version = "0.20.5", features = ["read-txn-no-tls"], optional = true } +heed = { version = "0.20.5", features = ["read-txn-no-tls"] } redb = { version = "2.1.3", optional = true } serde = { workspace = true, optional = true } diff --git a/storage/database/src/backend/mod.rs b/storage/database/src/backend/mod.rs index 11ae40b8..ebe12d86 100644 --- a/storage/database/src/backend/mod.rs +++ b/storage/database/src/backend/mod.rs @@ -4,6 +4,8 @@ cfg_if::cfg_if! { // If both backends are enabled, fallback to `heed`. // This is useful when using `--all-features`. if #[cfg(all(feature = "redb", not(feature = "heed")))] { + use heed as _; + mod redb; pub use redb::ConcreteEnv; } else { diff --git a/storage/service/Cargo.toml b/storage/service/Cargo.toml index ed46b355..ebdb13e8 100644 --- a/storage/service/Cargo.toml +++ b/storage/service/Cargo.toml @@ -8,14 +8,20 @@ authors = ["Boog900"] repository = "https://github.com/Cuprate/cuprate/tree/main/storage/service" keywords = ["cuprate", "service", "database"] +[features] +default = ["heed"] +heed = ["cuprate-database/heed"] +redb = ["cuprate-database/redb"] +redb-memorey = ["cuprate-database/redb-memory"] + [dependencies] -cuprate-database = { path = "../database" } -cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } +cuprate-database = { workspace = true } +cuprate-helper = { workspace = true, features = ["fs", "thread", "map", "asynch"] } serde = { workspace = true, optional = true } rayon = { workspace = true } tower = { workspace = true } -futures = { workspace = true } +futures = { workspace = true, features = ["std"] } crossbeam = { workspace = true, features = ["std"] } [lints] diff --git a/storage/service/src/service/write.rs b/storage/service/src/service/write.rs index f75d6151..607c4aa6 100644 --- a/storage/service/src/service/write.rs +++ b/storage/service/src/service/write.rs @@ -30,6 +30,14 @@ pub struct DatabaseWriteHandle { crossbeam::channel::Sender<(Req, oneshot::Sender>)>, } +impl Clone for DatabaseWriteHandle { + fn clone(&self) -> Self { + Self { + sender: self.sender.clone(), + } + } +} + impl DatabaseWriteHandle where Req: Send + 'static, diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index 70211d9e..c9082655 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -9,38 +9,38 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/txpool" keywords = ["cuprate", "txpool", "transaction", "pool", "database"] [features] -default = ["heed", "service"] +default = ["heed"] # default = ["redb", "service"] # default = ["redb-memory", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:tower", "dep:rayon", "dep:cuprate-database-service"] serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] [dependencies] -cuprate-database = { path = "../database", features = ["heed"] } -cuprate-database-service = { path = "../service", optional = true } -cuprate-types = { path = "../../types" } -cuprate-helper = { path = "../../helper", default-features = false, features = ["constants"] } +cuprate-database = { workspace = true, features = ["heed"] } +cuprate-database-service = { workspace = true } +cuprate-types = { workspace = true } +cuprate-helper = { workspace = true, default-features = false, features = ["constants"] } monero-serai = { workspace = true, features = ["std"] } bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } thiserror = { workspace = true } -hex = { workspace = true } +hex = { workspace = true, features = ["std"] } +blake3 = { workspace = true, features = ["std"] } -tower = { workspace = true, optional = true } -rayon = { workspace = true, optional = true } +tower = { workspace = true } +rayon = { workspace = true } serde = { workspace = true, optional = true } [dev-dependencies] -cuprate-test-utils = { path = "../../test-utils" } +cuprate-test-utils = { workspace = true } tokio = { workspace = true } tempfile = { workspace = true } hex-literal = { workspace = true } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/storage/txpool/README.md b/storage/txpool/README.md index 80d3b25b..d14f445b 100644 --- a/storage/txpool/README.md +++ b/storage/txpool/README.md @@ -37,10 +37,6 @@ use cuprate_txpool::{ This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_txpool` internally. # Feature flags - -The `service` module requires the `service` feature to be enabled. -See the module for more documentation. - Different database backends are enabled by the feature flags: - `heed` (LMDB) diff --git a/storage/txpool/src/free.rs b/storage/txpool/src/free.rs index d394002b..d0f9a313 100644 --- a/storage/txpool/src/free.rs +++ b/storage/txpool/src/free.rs @@ -3,7 +3,7 @@ //---------------------------------------------------------------------------------------------------- Import use cuprate_database::{ConcreteEnv, Env, EnvInner, InitError, RuntimeError, TxRw}; -use crate::{config::Config, tables::OpenTables}; +use crate::{config::Config, tables::OpenTables, types::TransactionBlobHash}; //---------------------------------------------------------------------------------------------------- Free functions /// Open the txpool database using the passed [`Config`]. @@ -60,3 +60,13 @@ pub fn open(config: Config) -> Result { Ok(env) } + +/// Calculate the transaction blob hash. +/// +/// This value is supposed to be quick to compute just based of the tx-blob without needing to parse the tx. +/// +/// The exact way the hash is calculated is not stable and is subject to change, as such it should not be exposed +/// as a way to interact with Cuprate externally. +pub fn transaction_blob_hash(tx_blob: &[u8]) -> TransactionBlobHash { + blake3::hash(tx_blob).into() +} diff --git a/storage/txpool/src/lib.rs b/storage/txpool/src/lib.rs index 20001360..53e53ecf 100644 --- a/storage/txpool/src/lib.rs +++ b/storage/txpool/src/lib.rs @@ -4,24 +4,24 @@ clippy::significant_drop_tightening )] +// Used in docs: . +use tower as _; + pub mod config; mod free; pub mod ops; -#[cfg(feature = "service")] pub mod service; pub mod tables; mod tx; pub mod types; pub use config::Config; -pub use free::open; -pub use tx::{BlockTemplateTxEntry, TxEntry}; +pub use free::{open, transaction_blob_hash}; +pub use tx::TxEntry; //re-exports pub use cuprate_database; -// TODO: remove when used. -use tower as _; #[cfg(test)] mod test { use cuprate_test_utils as _; diff --git a/storage/txpool/src/ops.rs b/storage/txpool/src/ops.rs index 50d9ea4a..289a8bbf 100644 --- a/storage/txpool/src/ops.rs +++ b/storage/txpool/src/ops.rs @@ -85,7 +85,7 @@ mod key_images; mod tx_read; mod tx_write; -pub use tx_read::get_transaction_verification_data; +pub use tx_read::{get_transaction_verification_data, in_stem_pool}; pub use tx_write::{add_transaction, remove_transaction}; /// An error that can occur on some tx-write ops. diff --git a/storage/txpool/src/ops/tx_read.rs b/storage/txpool/src/ops/tx_read.rs index db894151..55690750 100644 --- a/storage/txpool/src/ops/tx_read.rs +++ b/storage/txpool/src/ops/tx_read.rs @@ -8,7 +8,10 @@ use monero_serai::transaction::Transaction; use cuprate_database::{DatabaseRo, RuntimeError}; use cuprate_types::{TransactionVerificationData, TxVersion}; -use crate::{tables::Tables, types::TransactionHash}; +use crate::{ + tables::{Tables, TransactionInfos}, + types::{TransactionHash, TxStateFlags}, +}; /// Gets the [`TransactionVerificationData`] of a transaction in the tx-pool, leaving the tx in the pool. pub fn get_transaction_verification_data( @@ -34,3 +37,17 @@ pub fn get_transaction_verification_data( cached_verification_state: Mutex::new(cached_verification_state), }) } + +/// Returns `true` if the transaction with the given hash is in the stem pool. +/// +/// # Errors +/// This will return an [`Err`] if the transaction is not in the pool. +pub fn in_stem_pool( + tx_hash: &TransactionHash, + tx_infos: &impl DatabaseRo, +) -> Result { + Ok(tx_infos + .get(tx_hash)? + .flags + .contains(TxStateFlags::STATE_STEM)) +} diff --git a/storage/txpool/src/ops/tx_write.rs b/storage/txpool/src/ops/tx_write.rs index 9885b9c5..dc5ab463 100644 --- a/storage/txpool/src/ops/tx_write.rs +++ b/storage/txpool/src/ops/tx_write.rs @@ -8,6 +8,7 @@ use cuprate_database::{DatabaseRw, RuntimeError, StorableVec}; use cuprate_types::TransactionVerificationData; use crate::{ + free::transaction_blob_hash, ops::{ key_images::{add_tx_key_images, remove_tx_key_images}, TxPoolWriteError, @@ -56,6 +57,12 @@ pub fn add_transaction( let kis_table = tables.spent_key_images_mut(); add_tx_key_images(&tx.tx.prefix().inputs, &tx.tx_hash, kis_table)?; + // Add the blob hash to table 4. + let blob_hash = transaction_blob_hash(&tx.tx_blob); + tables + .known_blob_hashes_mut() + .put(&blob_hash, &tx.tx_hash)?; + Ok(()) } @@ -79,5 +86,9 @@ pub fn remove_transaction( let kis_table = tables.spent_key_images_mut(); remove_tx_key_images(&tx.prefix().inputs, kis_table)?; + // Remove the blob hash from table 4. + let blob_hash = transaction_blob_hash(&tx_blob); + tables.known_blob_hashes_mut().delete(&blob_hash)?; + Ok(()) } diff --git a/storage/txpool/src/service.rs b/storage/txpool/src/service.rs index 91a7060c..a82de5bf 100644 --- a/storage/txpool/src/service.rs +++ b/storage/txpool/src/service.rs @@ -10,8 +10,6 @@ //! //! The system is managed by this crate, and only requires [`init`] by the user. //! -//! This module must be enabled with the `service` feature. -//! //! ## Handles //! The 2 handles to the database are: //! - [`TxpoolReadHandle`] @@ -42,7 +40,7 @@ //! To interact with the database (whether reading or writing data), //! a `Request` can be sent using one of the above handles. //! -//! Both the handles implement `tower::Service`, so they can be [`tower::Service::call`]ed. +//! Both the handles implement [`tower::Service`], so they can be [`tower::Service::call`]ed. //! //! An `async`hronous channel will be returned from the call. //! This channel can be `.await`ed upon to (eventually) receive diff --git a/storage/txpool/src/service/interface.rs b/storage/txpool/src/service/interface.rs index 931154e1..a27c6309 100644 --- a/storage/txpool/src/service/interface.rs +++ b/storage/txpool/src/service/interface.rs @@ -1,33 +1,39 @@ //! Tx-pool [`service`](super) interface. //! //! This module contains `cuprate_txpool`'s [`tower::Service`] request and response enums. -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use cuprate_types::TransactionVerificationData; use crate::{ - tx::{BlockTemplateTxEntry, TxEntry}, - types::TransactionHash, + tx::TxEntry, + types::{KeyImage, TransactionBlobHash, TransactionHash}, }; //---------------------------------------------------------------------------------------------------- TxpoolReadRequest /// The transaction pool [`tower::Service`] read request type. +#[derive(Clone)] pub enum TxpoolReadRequest { - /// A request for the blob (raw bytes) of a transaction with the given hash. + /// Get the blob (raw bytes) of a transaction with the given hash. TxBlob(TransactionHash), - /// A request for the [`TransactionVerificationData`] of a transaction in the tx pool. + /// Get the [`TransactionVerificationData`] of a transaction in the tx pool. TxVerificationData(TransactionHash), + /// Filter (remove) all **known** transactions from the set. + /// + /// The hash is **not** the transaction hash, it is the hash of the serialized tx-blob. + FilterKnownTxBlobHashes(HashSet), + + /// Get some transactions for an incoming block. + TxsForBlock(Vec), + /// Get information on all transactions in the pool. Backlog, - /// Get information on all transactions in - /// the pool for block template purposes. - /// - /// This is only slightly different to [`TxpoolReadRequest::Backlog`]. - BlockTemplateBacklog, - /// Get the number of transactions in the pool. Size { /// If this is [`true`], the size returned will @@ -40,26 +46,34 @@ pub enum TxpoolReadRequest { /// The transaction pool [`tower::Service`] read response type. #[expect(clippy::large_enum_variant)] pub enum TxpoolReadResponse { - /// Response to [`TxpoolReadRequest::TxBlob`]. - /// - /// The inner value is the raw bytes of a transaction. - // TODO: use bytes::Bytes. - TxBlob(Vec), + /// The response for [`TxpoolReadRequest::TxBlob`]. + TxBlob { tx_blob: Vec, state_stem: bool }, - /// Response to [`TxpoolReadRequest::TxVerificationData`]. + /// The response for [`TxpoolReadRequest::TxVerificationData`]. TxVerificationData(TransactionVerificationData), + /// The response for [`TxpoolReadRequest::FilterKnownTxBlobHashes`]. + FilterKnownTxBlobHashes { + /// The blob hashes that are unknown. + unknown_blob_hashes: HashSet, + /// The tx hashes of the blob hashes that were known but were in the stem pool. + stem_pool_hashes: Vec, + }, + + /// The response for [`TxpoolReadRequest::TxsForBlock`]. + TxsForBlock { + /// The txs we had in the txpool. + txs: HashMap<[u8; 32], TransactionVerificationData>, + /// The indexes of the missing txs. + missing: Vec, + }, + /// Response to [`TxpoolReadRequest::Backlog`]. /// /// The inner [`Vec`] contains information on all /// the transactions currently in the pool. Backlog(Vec), - /// Response to [`TxpoolReadRequest::BlockTemplateBacklog`]. - /// - /// The inner [`Vec`] contains information on transactions - BlockTemplateBacklog(Vec), - /// Response to [`TxpoolReadRequest::Size`]. /// /// The inner value is the amount of @@ -84,9 +98,17 @@ pub enum TxpoolWriteRequest { }, /// Remove a transaction with the given hash from the pool. - /// - /// Returns [`TxpoolWriteResponse::Ok`]. RemoveTransaction(TransactionHash), + + /// Promote a transaction from the stem pool to the fluff pool. + /// If the tx is already in the fluff pool this does nothing. + Promote(TransactionHash), + + /// Tell the tx-pool about a new block. + NewBlock { + /// The spent key images in the new block. + spent_key_images: Vec, + }, } //---------------------------------------------------------------------------------------------------- TxpoolWriteResponse @@ -95,6 +117,8 @@ pub enum TxpoolWriteRequest { pub enum TxpoolWriteResponse { /// Response to: /// - [`TxpoolWriteRequest::RemoveTransaction`] + /// - [`TxpoolWriteRequest::Promote`] + /// - [`TxpoolWriteRequest::NewBlock`] Ok, /// Response to [`TxpoolWriteRequest::AddTransaction`]. diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index 229bf2be..0de1e7d0 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -4,22 +4,24 @@ clippy::unnecessary_wraps, reason = "TODO: finish implementing the signatures from " )] - -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use rayon::ThreadPool; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; +use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use crate::{ - ops::get_transaction_verification_data, + ops::{get_transaction_verification_data, in_stem_pool}, service::{ interface::{TxpoolReadRequest, TxpoolReadResponse}, types::{ReadResponseResult, TxpoolReadHandle}, }, - tables::{OpenTables, TransactionBlobs}, - types::TransactionHash, + tables::{KnownBlobHashes, OpenTables, TransactionBlobs, TransactionInfos}, + types::{TransactionBlobHash, TransactionHash}, }; // TODO: update the docs here @@ -57,7 +59,6 @@ fn init_read_service_with_pool(env: Arc, pool: Arc) -> /// 1. `Request` is mapped to a handler function /// 2. Handler function is called /// 3. [`TxpoolReadResponse`] is returned -#[expect(clippy::needless_pass_by_value)] fn map_request( env: &ConcreteEnv, // Access to the database request: TxpoolReadRequest, // The request we must fulfill @@ -65,8 +66,11 @@ fn map_request( match request { TxpoolReadRequest::TxBlob(tx_hash) => tx_blob(env, &tx_hash), TxpoolReadRequest::TxVerificationData(tx_hash) => tx_verification_data(env, &tx_hash), + TxpoolReadRequest::FilterKnownTxBlobHashes(blob_hashes) => { + filter_known_tx_blob_hashes(env, blob_hashes) + } + TxpoolReadRequest::TxsForBlock(txs_needed) => txs_for_block(env, txs_needed), TxpoolReadRequest::Backlog => backlog(env), - TxpoolReadRequest::BlockTemplateBacklog => block_template_backlog(env), TxpoolReadRequest::Size { include_sensitive_txs, } => size(env, include_sensitive_txs), @@ -97,10 +101,14 @@ fn tx_blob(env: &ConcreteEnv, tx_hash: &TransactionHash) -> ReadResponseResult { let tx_ro = inner_env.tx_ro()?; let tx_blobs_table = inner_env.open_db_ro::(&tx_ro)?; + let tx_infos_table = inner_env.open_db_ro::(&tx_ro)?; - tx_blobs_table - .get(tx_hash) - .map(|blob| TxpoolReadResponse::TxBlob(blob.0)) + let tx_blob = tx_blobs_table.get(tx_hash)?.0; + + Ok(TxpoolReadResponse::TxBlob { + tx_blob, + state_stem: in_stem_pool(tx_hash, &tx_infos_table)?, + }) } /// [`TxpoolReadRequest::TxVerificationData`]. @@ -114,18 +122,85 @@ fn tx_verification_data(env: &ConcreteEnv, tx_hash: &TransactionHash) -> ReadRes get_transaction_verification_data(tx_hash, &tables).map(TxpoolReadResponse::TxVerificationData) } +/// [`TxpoolReadRequest::FilterKnownTxBlobHashes`]. +fn filter_known_tx_blob_hashes( + env: &ConcreteEnv, + mut blob_hashes: HashSet, +) -> ReadResponseResult { + let inner_env = env.env_inner(); + let tx_ro = inner_env.tx_ro()?; + + let tx_blob_hashes = inner_env.open_db_ro::(&tx_ro)?; + let tx_infos = inner_env.open_db_ro::(&tx_ro)?; + + let mut stem_pool_hashes = Vec::new(); + + // A closure that returns `true` if a tx with a certain blob hash is unknown. + // This also fills in `stem_tx_hashes`. + let mut tx_unknown = |blob_hash| -> Result { + match tx_blob_hashes.get(&blob_hash) { + Ok(tx_hash) => { + if in_stem_pool(&tx_hash, &tx_infos)? { + stem_pool_hashes.push(tx_hash); + } + Ok(false) + } + Err(RuntimeError::KeyNotFound) => Ok(true), + Err(e) => Err(e), + } + }; + + let mut err = None; + blob_hashes.retain(|blob_hash| match tx_unknown(*blob_hash) { + Ok(res) => res, + Err(e) => { + err = Some(e); + false + } + }); + + if let Some(e) = err { + return Err(e); + } + + Ok(TxpoolReadResponse::FilterKnownTxBlobHashes { + unknown_blob_hashes: blob_hashes, + stem_pool_hashes, + }) +} + +/// [`TxpoolReadRequest::TxsForBlock`]. +fn txs_for_block(env: &ConcreteEnv, txs: Vec) -> ReadResponseResult { + let inner_env = env.env_inner(); + let tx_ro = inner_env.tx_ro()?; + + let tables = inner_env.open_tables(&tx_ro)?; + + let mut missing_tx_indexes = Vec::with_capacity(txs.len()); + let mut txs_verification_data = HashMap::with_capacity(txs.len()); + + for (i, tx_hash) in txs.into_iter().enumerate() { + match get_transaction_verification_data(&tx_hash, &tables) { + Ok(tx) => { + txs_verification_data.insert(tx_hash, tx); + } + Err(RuntimeError::KeyNotFound) => missing_tx_indexes.push(i), + Err(e) => return Err(e), + } + } + + Ok(TxpoolReadResponse::TxsForBlock { + txs: txs_verification_data, + missing: missing_tx_indexes, + }) +} + /// [`TxpoolReadRequest::Backlog`]. #[inline] fn backlog(env: &ConcreteEnv) -> ReadResponseResult { Ok(TxpoolReadResponse::Backlog(todo!())) } -/// [`TxpoolReadRequest::BlockTemplateBacklog`]. -#[inline] -fn block_template_backlog(env: &ConcreteEnv) -> ReadResponseResult { - Ok(TxpoolReadResponse::BlockTemplateBacklog(todo!())) -} - /// [`TxpoolReadRequest::Size`]. #[inline] fn size(env: &ConcreteEnv, include_sensitive_txs: bool) -> ReadResponseResult { diff --git a/storage/txpool/src/service/write.rs b/storage/txpool/src/service/write.rs index 8a3b1bf7..13ab81fa 100644 --- a/storage/txpool/src/service/write.rs +++ b/storage/txpool/src/service/write.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use cuprate_database::{ConcreteEnv, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database::{ConcreteEnv, DatabaseRo, DatabaseRw, Env, EnvInner, RuntimeError, TxRw}; use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::TransactionVerificationData; @@ -10,8 +10,8 @@ use crate::{ interface::{TxpoolWriteRequest, TxpoolWriteResponse}, types::TxpoolWriteHandle, }, - tables::OpenTables, - types::TransactionHash, + tables::{OpenTables, Tables, TransactionInfos}, + types::{KeyImage, TransactionHash, TxStateFlags}, }; //---------------------------------------------------------------------------------------------------- init_write_service @@ -31,6 +31,8 @@ fn handle_txpool_request( add_transaction(env, tx, *state_stem) } TxpoolWriteRequest::RemoveTransaction(tx_hash) => remove_transaction(env, tx_hash), + TxpoolWriteRequest::Promote(tx_hash) => promote(env, tx_hash), + TxpoolWriteRequest::NewBlock { spent_key_images } => new_block(env, spent_key_images), } } @@ -101,3 +103,68 @@ fn remove_transaction( TxRw::commit(tx_rw)?; Ok(TxpoolWriteResponse::Ok) } + +/// [`TxpoolWriteRequest::Promote`] +fn promote( + env: &ConcreteEnv, + tx_hash: &TransactionHash, +) -> Result { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + let res = || { + let mut tx_infos = env_inner.open_db_rw::(&tx_rw)?; + + tx_infos.update(tx_hash, |mut info| { + info.flags.remove(TxStateFlags::STATE_STEM); + Some(info) + }) + }; + + if let Err(e) = res() { + // error promoting the tx, abort the DB transaction. + TxRw::abort(tx_rw) + .expect("could not maintain database atomicity by aborting write transaction"); + + return Err(e); + } + + TxRw::commit(tx_rw)?; + Ok(TxpoolWriteResponse::Ok) +} + +/// [`TxpoolWriteRequest::NewBlock`] +fn new_block( + env: &ConcreteEnv, + spent_key_images: &[KeyImage], +) -> Result { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + // FIXME: use try blocks once stable. + let result = || { + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + + // Remove all txs which spend key images that were spent in the new block. + for key_image in spent_key_images { + match tables_mut + .spent_key_images() + .get(key_image) + .and_then(|tx_hash| ops::remove_transaction(&tx_hash, &mut tables_mut)) + { + Ok(()) | Err(RuntimeError::KeyNotFound) => (), + Err(e) => return Err(e), + } + } + + Ok(()) + }; + + if let Err(e) = result() { + TxRw::abort(tx_rw)?; + return Err(e); + } + + TxRw::commit(tx_rw)?; + Ok(TxpoolWriteResponse::Ok) +} diff --git a/storage/txpool/src/tables.rs b/storage/txpool/src/tables.rs index dbb686ae..1f2d4490 100644 --- a/storage/txpool/src/tables.rs +++ b/storage/txpool/src/tables.rs @@ -16,7 +16,9 @@ //! accessing _all_ tables defined here at once. use cuprate_database::{define_tables, StorableVec}; -use crate::types::{KeyImage, RawCachedVerificationState, TransactionHash, TransactionInfo}; +use crate::types::{ + KeyImage, RawCachedVerificationState, TransactionBlobHash, TransactionHash, TransactionInfo, +}; define_tables! { /// Serialized transaction blobs. @@ -41,5 +43,9 @@ define_tables! { /// /// This table contains the spent key images from all transactions in the pool. 3 => SpentKeyImages, - KeyImage => TransactionHash + KeyImage => TransactionHash, + + /// Transaction blob hashes that are in the pool. + 4 => KnownBlobHashes, + TransactionBlobHash => TransactionHash, } diff --git a/storage/txpool/src/tx.rs b/storage/txpool/src/tx.rs index 03cc48c4..29afae8c 100644 --- a/storage/txpool/src/tx.rs +++ b/storage/txpool/src/tx.rs @@ -5,6 +5,8 @@ /// Used in [`TxpoolReadResponse::Backlog`](crate::service::interface::TxpoolReadResponse::Backlog). #[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)] pub struct TxEntry { + /// The transaction's ID (hash). + pub id: [u8; 32], /// The transaction's weight. pub weight: u64, /// The transaction's fee. @@ -12,17 +14,3 @@ pub struct TxEntry { /// How long the transaction has been in the pool. pub time_in_pool: std::time::Duration, } - -/// Data about a transaction in the pool -/// for use in a block template. -/// -/// Used in [`TxpoolReadResponse::BlockTemplateBacklog`](crate::service::interface::TxpoolReadResponse::BlockTemplateBacklog). -#[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)] -pub struct BlockTemplateTxEntry { - /// The transaction's ID (hash). - pub id: [u8; 32], - /// The transaction's weight. - pub weight: u64, - /// The transaction's fee. - pub fee: u64, -} diff --git a/storage/txpool/src/types.rs b/storage/txpool/src/types.rs index 4da2d0fe..2acb819e 100644 --- a/storage/txpool/src/types.rs +++ b/storage/txpool/src/types.rs @@ -6,7 +6,6 @@ //! //! use bytemuck::{Pod, Zeroable}; - use monero_serai::transaction::Timelock; use cuprate_types::{CachedVerificationState, HardFork}; @@ -17,6 +16,9 @@ pub type KeyImage = [u8; 32]; /// A transaction hash. pub type TransactionHash = [u8; 32]; +/// A transaction blob hash. +pub type TransactionBlobHash = [u8; 32]; + bitflags::bitflags! { /// Flags representing the state of the transaction in the pool. #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index abf7ee44..4eb56844 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -6,10 +6,10 @@ license = "MIT" authors = ["Boog900", "hinto-janai"] [dependencies] -cuprate-types = { path = "../types" } -cuprate-helper = { path = "../helper", features = ["map", "tx"] } -cuprate-wire = { path = "../net/wire" } -cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } +cuprate-types = { workspace = true } +cuprate-helper = { workspace = true, features = ["map", "tx"] } +cuprate-wire = { workspace = true } +cuprate-p2p-core = { workspace = true, features = ["borsh"] } hex = { workspace = true } hex-literal = { workspace = true } @@ -31,4 +31,4 @@ hex = { workspace = true } pretty_assertions = { workspace = true } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/types/Cargo.toml b/types/Cargo.toml index 8ac6b25f..e1ffb196 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -12,21 +12,23 @@ keywords = ["cuprate", "types"] default = ["blockchain", "epee", "serde", "json", "hex"] blockchain = [] epee = ["dep:cuprate-epee-encoding"] -serde = ["dep:serde"] +serde = ["dep:serde", "hex"] proptest = ["dep:proptest", "dep:proptest-derive"] json = ["hex", "dep:cuprate-helper"] -hex = ["dep:hex"] +# We sadly have no choice but to enable serde here as otherwise we will get warnings from the `hex` dep being unused. +# This isn't too bad as `HexBytes` only makes sense with serde anyway. +hex = ["serde", "dep:hex"] [dependencies] -cuprate-epee-encoding = { path = "../net/epee-encoding", optional = true } -cuprate-helper = { path = "../helper", optional = true, features = ["cast"] } -cuprate-fixed-bytes = { path = "../net/fixed-bytes" } +cuprate-epee-encoding = { workspace = true, optional = true, features = ["std"] } +cuprate-helper = { workspace = true, optional = true, features = ["cast"] } +cuprate-fixed-bytes = { workspace = true, features = ["std", "serde"] } bytes = { workspace = true } curve25519-dalek = { workspace = true } monero-serai = { workspace = true } hex = { workspace = true, features = ["serde", "alloc"], optional = true } -serde = { workspace = true, features = ["derive"], optional = true } +serde = { workspace = true, features = ["std", "derive"], optional = true } strum = { workspace = true, features = ["derive"] } thiserror = { workspace = true } @@ -39,4 +41,4 @@ pretty_assertions = { workspace = true } serde_json = { workspace = true, features = ["std"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/types/src/address_type.rs b/types/src/address_type.rs new file mode 100644 index 00000000..743902da --- /dev/null +++ b/types/src/address_type.rs @@ -0,0 +1,147 @@ +//! Types of network addresses; used in P2P. + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + error, + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +use strum::{ + AsRefStr, Display, EnumCount, EnumIs, EnumString, FromRepr, IntoStaticStr, VariantArray, +}; + +/// An enumeration of address types. +/// +/// Used in `cuprate_p2p` and `cuprate_types` +/// +/// Original definition: +/// - +/// +/// # Serde +/// This type's `serde` implementation (de)serializes from a [`u8`]. +/// +/// ```rust +/// use cuprate_types::AddressType as A; +/// use serde_json::{to_string, from_str}; +/// +/// assert_eq!(from_str::(&"0").unwrap(), A::Invalid); +/// assert_eq!(from_str::(&"1").unwrap(), A::Ipv4); +/// assert_eq!(from_str::(&"2").unwrap(), A::Ipv6); +/// assert_eq!(from_str::(&"3").unwrap(), A::I2p); +/// assert_eq!(from_str::(&"4").unwrap(), A::Tor); +/// +/// assert_eq!(to_string(&A::Invalid).unwrap(), "0"); +/// assert_eq!(to_string(&A::Ipv4).unwrap(), "1"); +/// assert_eq!(to_string(&A::Ipv6).unwrap(), "2"); +/// assert_eq!(to_string(&A::I2p).unwrap(), "3"); +/// assert_eq!(to_string(&A::Tor).unwrap(), "4"); +/// ``` +#[derive( + Copy, + Clone, + Default, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + AsRefStr, + Display, + EnumCount, + EnumIs, + EnumString, + FromRepr, + IntoStaticStr, + VariantArray, +)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(untagged, try_from = "u8", into = "u8"))] +#[repr(u8)] +pub enum AddressType { + #[default] + Invalid, + Ipv4, + Ipv6, + I2p, + Tor, +} + +impl AddressType { + /// Convert [`Self`] to a [`u8`]. + /// + /// ```rust + /// use cuprate_types::AddressType as A; + /// + /// assert_eq!(A::Invalid.to_u8(), 0); + /// assert_eq!(A::Ipv4.to_u8(), 1); + /// assert_eq!(A::Ipv6.to_u8(), 2); + /// assert_eq!(A::I2p.to_u8(), 3); + /// assert_eq!(A::Tor.to_u8(), 4); + /// ``` + pub const fn to_u8(self) -> u8 { + self as u8 + } + + /// Convert a [`u8`] to a [`Self`]. + /// + /// # Errors + /// This returns [`None`] if `u > 4`. + /// + /// ```rust + /// use cuprate_types::AddressType as A; + /// + /// assert_eq!(A::from_u8(0), Some(A::Invalid)); + /// assert_eq!(A::from_u8(1), Some(A::Ipv4)); + /// assert_eq!(A::from_u8(2), Some(A::Ipv6)); + /// assert_eq!(A::from_u8(3), Some(A::I2p)); + /// assert_eq!(A::from_u8(4), Some(A::Tor)); + /// assert_eq!(A::from_u8(5), None); + /// ``` + pub const fn from_u8(u: u8) -> Option { + Some(match u { + 0 => Self::Invalid, + 1 => Self::Ipv4, + 2 => Self::Ipv6, + 3 => Self::I2p, + 4 => Self::Tor, + _ => return None, + }) + } +} + +impl From for u8 { + fn from(value: AddressType) -> Self { + value.to_u8() + } +} + +impl TryFrom for AddressType { + type Error = u8; + fn try_from(value: u8) -> Result { + match Self::from_u8(value) { + Some(s) => Ok(s), + None => Err(value), + } + } +} + +#[cfg(feature = "epee")] +impl EpeeValue for AddressType { + const MARKER: Marker = u8::MARKER; + + fn read(r: &mut B, marker: &Marker) -> error::Result { + let u = u8::read(r, marker)?; + Self::from_u8(u).ok_or(error::Error::Format("u8 was greater than 4")) + } + + fn write(self, w: &mut B) -> error::Result<()> { + let u = self.to_u8(); + u8::write(u, w)?; + Ok(()) + } +} diff --git a/types/src/connection_state.rs b/types/src/connection_state.rs new file mode 100644 index 00000000..69b8ed64 --- /dev/null +++ b/types/src/connection_state.rs @@ -0,0 +1,148 @@ +//! [`ConnectionState`]. + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + error, + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +use strum::{ + AsRefStr, Display, EnumCount, EnumIs, EnumString, FromRepr, IntoStaticStr, VariantArray, +}; + +/// An enumeration of P2P connection states. +/// +/// Used in `cuprate_p2p` and `cuprate_rpc_types`. +/// +/// Original definition: +/// - +/// +/// # Serde +/// This type's `serde` implementation depends on `snake_case`. +/// +/// ```rust +/// use cuprate_types::ConnectionState as C; +/// use serde_json::to_string; +/// +/// assert_eq!(to_string(&C::BeforeHandshake).unwrap(), r#""before_handshake""#); +/// assert_eq!(to_string(&C::Synchronizing).unwrap(), r#""synchronizing""#); +/// assert_eq!(to_string(&C::Standby).unwrap(), r#""standby""#); +/// assert_eq!(to_string(&C::Idle).unwrap(), r#""idle""#); +/// assert_eq!(to_string(&C::Normal).unwrap(), r#""normal""#); +/// +/// assert_eq!(C::BeforeHandshake.to_string(), "before_handshake"); +/// assert_eq!(C::Synchronizing.to_string(), "synchronizing"); +/// assert_eq!(C::Standby.to_string(), "standby"); +/// assert_eq!(C::Idle.to_string(), "idle"); +/// assert_eq!(C::Normal.to_string(), "normal"); +/// ``` +#[derive( + Copy, + Clone, + Default, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + AsRefStr, + Display, + EnumCount, + EnumIs, + EnumString, + FromRepr, + IntoStaticStr, + VariantArray, +)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "snake_case"))] // cuprate-rpc-types depends on snake_case +#[strum(serialize_all = "snake_case")] +#[repr(u8)] +pub enum ConnectionState { + BeforeHandshake, + Synchronizing, + Standby, + Idle, + #[default] + Normal, +} + +impl ConnectionState { + /// Convert [`Self`] to a [`u8`]. + /// + /// ```rust + /// use cuprate_types::ConnectionState as C; + /// + /// assert_eq!(C::BeforeHandshake.to_u8(), 0); + /// assert_eq!(C::Synchronizing.to_u8(), 1); + /// assert_eq!(C::Standby.to_u8(), 2); + /// assert_eq!(C::Idle.to_u8(), 3); + /// assert_eq!(C::Normal.to_u8(), 4); + /// ``` + pub const fn to_u8(self) -> u8 { + self as u8 + } + + /// Convert a [`u8`] to a [`Self`]. + /// + /// # Errors + /// This returns [`None`] if `u > 4`. + /// + /// ```rust + /// use cuprate_types::ConnectionState as C; + /// + /// assert_eq!(C::from_u8(0), Some(C::BeforeHandshake)); + /// assert_eq!(C::from_u8(1), Some(C::Synchronizing)); + /// assert_eq!(C::from_u8(2), Some(C::Standby)); + /// assert_eq!(C::from_u8(3), Some(C::Idle)); + /// assert_eq!(C::from_u8(4), Some(C::Normal)); + /// assert_eq!(C::from_u8(5), None); + /// ``` + pub const fn from_u8(u: u8) -> Option { + Some(match u { + 0 => Self::BeforeHandshake, + 1 => Self::Synchronizing, + 2 => Self::Standby, + 3 => Self::Idle, + 4 => Self::Normal, + _ => return None, + }) + } +} + +impl From for u8 { + fn from(value: ConnectionState) -> Self { + value.to_u8() + } +} + +impl TryFrom for ConnectionState { + type Error = u8; + fn try_from(value: u8) -> Result { + match Self::from_u8(value) { + Some(s) => Ok(s), + None => Err(value), + } + } +} + +#[cfg(feature = "epee")] +impl EpeeValue for ConnectionState { + const MARKER: Marker = u8::MARKER; + + fn read(r: &mut B, marker: &Marker) -> error::Result { + let u = u8::read(r, marker)?; + Self::from_u8(u).ok_or(error::Error::Format("u8 was greater than 4")) + } + + fn write(self, w: &mut B) -> error::Result<()> { + let u = self.to_u8(); + u8::write(u, w)?; + Ok(()) + } +} diff --git a/types/src/hex.rs b/types/src/hex.rs index 34da09d8..de4fc816 100644 --- a/types/src/hex.rs +++ b/types/src/hex.rs @@ -22,6 +22,7 @@ pub struct HexBytes( #[cfg_attr(feature = "serde", serde(with = "hex::serde"))] pub [u8; N], ); +#[cfg(feature = "serde")] impl<'de, const N: usize> Deserialize<'de> for HexBytes where [u8; N]: hex::FromHex, diff --git a/types/src/json/block.rs b/types/src/json/block.rs index 1397f6fd..88f134d5 100644 --- a/types/src/json/block.rs +++ b/types/src/json/block.rs @@ -51,17 +51,17 @@ impl From for Block { /// [`Block::miner_tx`]. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum MinerTransaction { V1 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: MinerTransactionPrefix, signatures: [(); 0], }, V2 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: MinerTransactionPrefix, rct_signatures: MinerTransactionRctSignatures, }, diff --git a/types/src/json/output.rs b/types/src/json/output.rs index 050132ae..182618cd 100644 --- a/types/src/json/output.rs +++ b/types/src/json/output.rs @@ -20,7 +20,7 @@ pub struct Output { /// [`Output::target`]. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum Target { Key { key: HexBytes<32> }, TaggedKey { tagged_key: TaggedKey }, diff --git a/types/src/json/tx.rs b/types/src/json/tx.rs index 46ec827b..a18dc89a 100644 --- a/types/src/json/tx.rs +++ b/types/src/json/tx.rs @@ -24,17 +24,17 @@ use crate::{ /// - [`/get_transaction_pool` -> `tx_json`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_transaction_pool) #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum Transaction { V1 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: TransactionPrefix, signatures: Vec>, }, V2 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: TransactionPrefix, rct_signatures: RctSignatures, /// This field is [`Some`] if [`Self::V2::rct_signatures`] diff --git a/types/src/lib.rs b/types/src/lib.rs index fa35153b..a5a04f9d 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -9,12 +9,16 @@ // // Documentation for each module is located in the respective file. +mod address_type; mod block_complete_entry; +mod connection_state; mod hard_fork; mod transaction_verification_data; mod types; +pub use address_type::AddressType; pub use block_complete_entry::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; +pub use connection_state::ConnectionState; pub use hard_fork::{HardFork, HardForkError}; pub use transaction_verification_data::{ CachedVerificationState, TransactionVerificationData, TxVersion,