diff --git a/Cargo.lock b/Cargo.lock index 543eea96..eaf5f994 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -527,11 +527,13 @@ dependencies = [ "multiexp", "proptest", "proptest-derive", + "rand", "randomx-rs", "rayon", "thiserror", "thread_local", "tokio", + "tokio-test", "tokio-util", "tower", "tracing", @@ -769,6 +771,8 @@ version = "0.0.0" dependencies = [ "cuprate-epee-encoding", "cuprate-fixed-bytes", + "cuprate-json-rpc", + "cuprate-test-utils", "cuprate-types", "monero-serai", "paste", diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index 74ecda68..3a8b3519 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -117,8 +117,8 @@ --- - [⚪️ Appendix](appendix/intro.md) + - [🟢 Crates](appendix/crates.md) - [🔴 Contributing](appendix/contributing.md) - - [🔴 Crate documentation](appendix/crate-documentation.md) - [🔴 Build targets](appendix/build-targets.md) - [🔴 Protocol book](appendix/protocol-book.md) - [⚪️ User book](appendix/user-book.md) \ No newline at end of file diff --git a/books/architecture/src/appendix/crate-documentation.md b/books/architecture/src/appendix/crate-documentation.md deleted file mode 100644 index 0f4d96d8..00000000 --- a/books/architecture/src/appendix/crate-documentation.md +++ /dev/null @@ -1,4 +0,0 @@ -# Crate documentation -```bash -cargo doc --package $CUPRATE_CRATE -``` \ No newline at end of file diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md new file mode 100644 index 00000000..224e678b --- /dev/null +++ b/books/architecture/src/appendix/crates.md @@ -0,0 +1,61 @@ +# Crates +This is an index of all of Cuprate's in-house crates it uses and maintains. + +They are categorized into groups. + +Crate documentation for each crate can be found by clicking the crate name or by visiting . Documentation can also be built manually by running this at the root of the `cuprate` repository: +```bash +cargo doc --package $CRATE +``` +For example, this will generate and open `cuprate-blockchain` documentation: +```bash +cargo doc --open --package cuprate-blockchain +``` + +## Consensus +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-consensus`](https://doc.cuprate.org/cuprate_consensus) | [`consensus/`](https://github.com/Cuprate/cuprate/tree/main/consensus) | TODO +| [`cuprate-consensus-rules`](https://doc.cuprate.org/cuprate_consensus_rules) | [`consensus/rules/`](https://github.com/Cuprate/cuprate/tree/main/consensus-rules) | TODO +| [`cuprate-fast-sync`](https://doc.cuprate.org/cuprate_fast_sync) | [`consensus/fast-sync/`](https://github.com/Cuprate/cuprate/tree/main/consensus/fast-sync) | Fast block synchronization + +## Networking +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-epee-encoding`](https://doc.cuprate.org/cuprate_epee_encoding) | [`net/epee-encoding/`](https://github.com/Cuprate/cuprate/tree/main/net/epee-encoding) | Epee (de)serialization +| [`cuprate-fixed-bytes`](https://doc.cuprate.org/cuprate_fixed_bytes) | [`net/fixed-bytes/`](https://github.com/Cuprate/cuprate/tree/main/net/fixed-bytes) | Fixed byte containers backed by `byte::Byte` +| [`cuprate-levin`](https://doc.cuprate.org/cuprate_levin) | [`net/levin/`](https://github.com/Cuprate/cuprate/tree/main/net/levin) | Levin bucket protocol implementation +| [`cuprate-wire`](https://doc.cuprate.org/cuprate_wire) | [`net/wire/`](https://github.com/Cuprate/cuprate/tree/main/net/wire) | TODO + +## P2P +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-address-book`](https://doc.cuprate.org/cuprate_address_book) | [`p2p/address-book/`](https://github.com/Cuprate/cuprate/tree/main/p2p/address-book) | TODO +| [`cuprate-async-buffer`](https://doc.cuprate.org/cuprate_async_buffer) | [`p2p/async-buffer/`](https://github.com/Cuprate/cuprate/tree/main/p2p/async-buffer) | A bounded SPSC, FIFO, asynchronous buffer that supports arbitrary weights for values +| [`cuprate-dandelion-tower`](https://doc.cuprate.org/cuprate_dandelion_tower) | [`p2p/dandelion-tower/`](https://github.com/Cuprate/cuprate/tree/main/p2p/dandelion-tower) | TODO +| [`cuprate-p2p`](https://doc.cuprate.org/cuprate_p2p) | [`p2p/p2p/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p) | TODO +| [`cuprate-p2p-core`](https://doc.cuprate.org/cuprate_p2p_core) | [`p2p/p2p-core/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p-core) | TODO + +## Storage +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-blockchain`](https://doc.cuprate.org/cuprate_blockchain) | [`storage/blockchain/`](https://github.com/Cuprate/cuprate/tree/main/storage/blockchain) | Blockchain database built on-top of `cuprate-database` & `cuprate-database-service` +| [`cuprate-database`](https://doc.cuprate.org/cuprate_database) | [`storage/database/`](https://github.com/Cuprate/cuprate/tree/main/storage/database) | Pure database abstraction +| [`cuprate-database-service`](https://doc.cuprate.org/cuprate_database_service) | [`storage/database-service/`](https://github.com/Cuprate/cuprate/tree/main/storage/database-service) | `tower::Service` + thread-pool abstraction built on-top of `cuprate-database` +| [`cuprate-txpool`](https://doc.cuprate.org/cuprate_txpool) | [`storage/txpool/`](https://github.com/Cuprate/cuprate/tree/main/storage/txpool) | Transaction pool database built on-top of `cuprate-database` & `cuprate-database-service` + +## RPC +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-json-rpc`](https://doc.cuprate.org/cuprate_json_rpc) | [`rpc/json-rpc/`](https://github.com/Cuprate/cuprate/tree/main/rpc/json-rpc) | JSON-RPC 2.0 implementation +| [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types) | [`rpc/types/`](https://github.com/Cuprate/cuprate/tree/main/rpc/types) | Monero RPC types and traits +| [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing + +## 1-off crates +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-cryptonight`](https://doc.cuprate.org/cuprate_cryptonight) | [`cryptonight/`](https://github.com/Cuprate/cuprate/tree/main/cryptonight) | CryptoNight hash functions +| [`cuprate-pruning`](https://doc.cuprate.org/cuprate_pruning) | [`pruning/`](https://github.com/Cuprate/cuprate/tree/main/pruning) | Monero pruning logic/types +| [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate +| [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate +| [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate diff --git a/books/architecture/src/intro.md b/books/architecture/src/intro.md deleted file mode 100644 index c708d613..00000000 --- a/books/architecture/src/intro.md +++ /dev/null @@ -1,4 +0,0 @@ -# Systems -Cuprate is made up of multiple distinct internal systems that work together. - -This section provides informal specifications and implementation details about each. \ No newline at end of file diff --git a/books/protocol/src/p2p_network/levin/admin.md b/books/protocol/src/p2p_network/levin/admin.md index 6f2b7160..a7186468 100644 --- a/books/protocol/src/p2p_network/levin/admin.md +++ b/books/protocol/src/p2p_network/levin/admin.md @@ -67,7 +67,7 @@ ID: `1007`[^support-flags] #### Request [^sf-req] { #support-flags-request } -No data is serialized for a ping request. +No data is serialized for a support flags request. #### Response [^sf-res] { #support-flags-response } diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 624eb637..521b98ca 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -29,6 +29,7 @@ tokio = { workspace = true, features = ["rt"] } tokio-util = { workspace = true } hex = { workspace = true } +rand = { workspace = true } [dev-dependencies] cuprate-test-utils = { path = "../test-utils" } @@ -37,5 +38,6 @@ cuprate-consensus-rules = {path = "./rules", features = ["proptest"]} hex-literal = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} +tokio-test = { workspace = true } proptest = { workspace = true } proptest-derive = { workspace = true } \ No newline at end of file diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 61138bcb..97b761ab 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -6,7 +6,10 @@ use tower::{Service, ServiceExt}; use cuprate_blockchain::{ config::ConfigBuilder, cuprate_database::RuntimeError, service::BCReadHandle, }; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes}; @@ -19,7 +22,7 @@ async fn read_batch( let mut block_ids = Vec::::with_capacity(BATCH_SIZE as usize); for height in height_from..(height_from + BATCH_SIZE) { - let request = BCReadRequest::BlockHash(height); + let request = BCReadRequest::BlockHash(height, Chain::Main); let response_channel = handle.ready().await?.call(request); let response = response_channel.await?; diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index cb0e3e45..ecd6a113 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -148,7 +148,7 @@ fn block_size_sanity_check( /// Sanity check on the block weight. /// /// ref: -fn check_block_weight( +pub fn check_block_weight( block_weight: usize, median_for_block_reward: usize, ) -> Result<(), BlockError> { @@ -184,7 +184,7 @@ fn check_prev_id(block: &Block, top_hash: &[u8; 32]) -> Result<(), BlockError> { /// Checks the blocks timestamp is in the valid range. /// /// ref: -fn check_timestamp(block: &Block, median_timestamp: u64) -> Result<(), BlockError> { +pub fn check_timestamp(block: &Block, median_timestamp: u64) -> Result<(), BlockError> { if block.header.timestamp < median_timestamp || block.header.timestamp > current_unix_timestamp() + BLOCK_FUTURE_TIME_LIMIT { diff --git a/consensus/rules/src/hard_forks.rs b/consensus/rules/src/hard_forks.rs index b34b93d7..016a51fd 100644 --- a/consensus/rules/src/hard_forks.rs +++ b/consensus/rules/src/hard_forks.rs @@ -38,7 +38,7 @@ pub enum HardForkError { } /// Information about a given hard-fork. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HFInfo { height: u64, threshold: u64, @@ -50,7 +50,7 @@ impl HFInfo { } /// Information about every hard-fork Monero has had. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HFsInfo([HFInfo; NUMB_OF_HARD_FORKS]); impl HFsInfo { @@ -243,7 +243,7 @@ impl HardFork { } /// A struct holding the current voting state of the blockchain. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct HFVotes { votes: [u64; NUMB_OF_HARD_FORKS], vote_list: VecDeque, @@ -293,6 +293,28 @@ impl HFVotes { } } + /// Pop a number of blocks from the top of the cache and push some values into the front of the cache, + /// i.e. the oldest blocks. + /// + /// `old_block_votes` should contain the HFs below the window that now will be in the window after popping + /// blocks from the top. + /// + /// # Panics + /// + /// This will panic if `old_block_votes` contains more HFs than `numb_blocks`. + pub fn reverse_blocks(&mut self, numb_blocks: usize, old_block_votes: Self) { + assert!(old_block_votes.vote_list.len() <= numb_blocks); + + for hf in self.vote_list.drain(self.vote_list.len() - numb_blocks..) { + self.votes[hf as usize - 1] -= 1; + } + + for old_vote in old_block_votes.vote_list.into_iter().rev() { + self.vote_list.push_front(old_vote); + self.votes[old_vote as usize - 1] += 1; + } + } + /// Returns the total votes for a hard-fork. /// /// ref: diff --git a/consensus/src/block.rs b/consensus/src/block.rs index d3d06722..1b36eb92 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -12,31 +12,35 @@ use monero_serai::{ block::Block, transaction::{Input, Transaction}, }; -use rayon::prelude::*; use tower::{Service, ServiceExt}; -use tracing::instrument; + +use cuprate_helper::asynch::rayon_spawn_async; +use cuprate_types::{ + AltBlockInformation, VerifiedBlockInformation, VerifiedTransactionInformation, +}; use cuprate_consensus_rules::{ blocks::{ - calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height, - randomx_seed_height, BlockError, RandomX, + calculate_pow_hash, check_block, check_block_pow, randomx_seed_height, BlockError, RandomX, }, - hard_forks::HardForkError, miner_tx::MinerTxError, ConsensusError, HardFork, }; -use cuprate_helper::asynch::rayon_spawn_async; -use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; use crate::{ - context::{ - rx_vms::RandomXVM, BlockChainContextRequest, BlockChainContextResponse, - RawBlockChainContext, - }, + context::{BlockChainContextRequest, BlockChainContextResponse, RawBlockChainContext}, transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse}, Database, ExtendedConsensusError, }; +mod alt_block; +mod batch_prepare; +mod free; + +use alt_block::sanity_check_alt_block; +use batch_prepare::batch_prepare_main_chain_block; +use free::pull_ordered_transactions; + /// A pre-prepared block with all data needed to verify it, except the block's proof of work. #[derive(Debug)] pub struct PreparedBlockExPow { @@ -124,7 +128,7 @@ impl PreparedBlock { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; - let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else { + let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else { Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, )))? @@ -191,6 +195,7 @@ pub enum VerifyBlockRequest { /// The already prepared block. block: PreparedBlock, /// The full list of transactions for this block, in the order given in `block`. + // TODO: Remove the Arc here txs: Vec>, }, /// Batch prepares a list of blocks and transactions for verification. @@ -198,6 +203,16 @@ pub enum VerifyBlockRequest { /// The list of blocks and their transactions (not necessarily in the order given in the block). blocks: Vec<(Block, Vec)>, }, + /// A request to sanity check an alt block, also returning the cumulative difficulty of the alt chain. + /// + /// Unlike requests to verify main chain blocks, you do not need to add the returned block to the context + /// service, you will still have to add it to the database though. + AltChain { + /// The alt block to sanity check. + block: Block, + /// The alt transactions. + prepared_txs: HashMap<[u8; 32], TransactionVerificationData>, + }, } /// A response from a verify block request. @@ -205,6 +220,8 @@ pub enum VerifyBlockRequest { pub enum VerifyBlockResponse { /// This block is valid. MainChain(VerifiedBlockInformation), + /// The sanity checked alt block. + AltChain(AltBlockInformation), /// A list of prepared blocks for verification, you should call [`VerifyBlockRequest::MainChainPrepped`] on each of the returned /// blocks to fully verify them. MainChainBatchPrepped(Vec<(PreparedBlock, Vec>)>), @@ -296,206 +313,20 @@ where verify_prepped_main_chain_block(block, txs, context_svc, tx_verifier_svc, None) .await } + VerifyBlockRequest::AltChain { + block, + prepared_txs, + } => sanity_check_alt_block(block, prepared_txs, context_svc).await, } } .boxed() } } -/// Batch prepares a list of blocks for verification. -#[instrument(level = "debug", name = "batch_prep_blocks", skip_all, fields(amt = blocks.len()))] -async fn batch_prepare_main_chain_block( - blocks: Vec<(Block, Vec)>, - mut context_svc: C, -) -> Result -where - C: Service< - BlockChainContextRequest, - Response = BlockChainContextResponse, - Error = tower::BoxError, - > + Send - + 'static, - C::Future: Send + 'static, -{ - let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip(); - - tracing::debug!("Calculating block hashes."); - let blocks: Vec = rayon_spawn_async(|| { - blocks - .into_iter() - .map(PreparedBlockExPow::new) - .collect::, _>>() - }) - .await?; - - let Some(last_block) = blocks.last() else { - return Err(ExtendedConsensusError::NoBlocksToVerify); - }; - - // hard-forks cannot be reversed, so the last block will contain the highest hard fork (provided the - // batch is valid). - let top_hf_in_batch = last_block.hf_version; - - // A Vec of (timestamp, HF) for each block to calculate the expected difficulty for each block. - let mut timestamps_hfs = Vec::with_capacity(blocks.len()); - let mut new_rx_vm = None; - - tracing::debug!("Checking blocks follow each other."); - - // For every block make sure they have the correct height and previous ID - for window in blocks.windows(2) { - let block_0 = &window[0]; - let block_1 = &window[1]; - - // Make sure no blocks in the batch have a higher hard fork than the last block. - if block_0.hf_version > top_hf_in_batch { - Err(ConsensusError::Block(BlockError::HardForkError( - HardForkError::VersionIncorrect, - )))?; - } - - if block_0.block_hash != block_1.block.header.previous - || block_0.height != block_1.height - 1 - { - tracing::debug!("Blocks do not follow each other, verification failed."); - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; - } - - // Cache any potential RX VM seeds as we may need them for future blocks in the batch. - if is_randomx_seed_height(block_0.height) && top_hf_in_batch >= HardFork::V12 { - new_rx_vm = Some((block_0.height, block_0.block_hash)); - } - - timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)) - } - - // Get the current blockchain context. - let BlockChainContextResponse::Context(checked_context) = context_svc - .ready() - .await? - .call(BlockChainContextRequest::GetContext) - .await - .map_err(Into::::into)? - else { - panic!("Context service returned wrong response!"); - }; - - // Calculate the expected difficulties for each block in the batch. - let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc - .ready() - .await? - .call(BlockChainContextRequest::BatchGetDifficulties( - timestamps_hfs, - )) - .await - .map_err(Into::::into)? - else { - panic!("Context service returned wrong response!"); - }; - - let context = checked_context.unchecked_blockchain_context().clone(); - - // Make sure the blocks follow the main chain. - - if context.chain_height != blocks[0].height { - tracing::debug!("Blocks do not follow main chain, verification failed."); - - Err(ConsensusError::Block(BlockError::MinerTxError( - MinerTxError::InputsHeightIncorrect, - )))?; - } - - if context.top_hash != blocks[0].block.header.previous { - tracing::debug!("Blocks do not follow main chain, verification failed."); - - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; - } - - let mut rx_vms = if top_hf_in_batch < HardFork::V12 { - HashMap::new() - } else { - let BlockChainContextResponse::RxVms(rx_vms) = context_svc - .ready() - .await? - .call(BlockChainContextRequest::GetCurrentRxVm) - .await? - else { - panic!("Blockchain context service returned wrong response!"); - }; - - rx_vms - }; - - // If we have a RX seed in the batch calculate it. - if let Some((new_vm_height, new_vm_seed)) = new_rx_vm { - tracing::debug!("New randomX seed in batch, initialising VM"); - - let new_vm = rayon_spawn_async(move || { - Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) - }) - .await; - - context_svc - .oneshot(BlockChainContextRequest::NewRXVM(( - new_vm_seed, - new_vm.clone(), - ))) - .await - .map_err(Into::::into)?; - - rx_vms.insert(new_vm_height, new_vm); - } - - tracing::debug!("Calculating PoW and prepping transaction"); - - let blocks = rayon_spawn_async(move || { - blocks - .into_par_iter() - .zip(difficulties) - .zip(txs) - .map(|((block, difficultly), txs)| { - // Calculate the PoW for the block. - let height = block.height; - let block = PreparedBlock::new_prepped( - block, - rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref), - )?; - - // Check the PoW - check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?; - - // Now setup the txs. - let mut txs = txs - .into_par_iter() - .map(|tx| { - let tx = TransactionVerificationData::new(tx)?; - Ok::<_, ConsensusError>((tx.tx_hash, tx)) - }) - .collect::, _>>()?; - - // Order the txs correctly. - let mut ordered_txs = Vec::with_capacity(txs.len()); - - for tx_hash in &block.block.txs { - let tx = txs - .remove(tx_hash) - .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; - ordered_txs.push(Arc::new(tx)); - } - - Ok((block, ordered_txs)) - }) - .collect::, ExtendedConsensusError>>() - }) - .await?; - - Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks)) -} - /// Verifies a prepared block. async fn verify_main_chain_block( block: Block, - mut txs: HashMap<[u8; 32], TransactionVerificationData>, + txs: HashMap<[u8; 32], TransactionVerificationData>, mut context_svc: C, tx_verifier_svc: TxV, ) -> Result @@ -557,20 +388,11 @@ where .map_err(ConsensusError::Block)?; // Check that the txs included are what we need and that there are not any extra. - - let mut ordered_txs = Vec::with_capacity(txs.len()); - - tracing::debug!("Ordering transactions for block."); - - if !prepped_block.block.txs.is_empty() { - for tx_hash in &prepped_block.block.txs { - let tx = txs - .remove(tx_hash) - .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; - ordered_txs.push(Arc::new(tx)); - } - drop(txs); - } + // TODO: Remove the Arc here + let ordered_txs = pull_ordered_transactions(&prepped_block.block, txs)? + .into_iter() + .map(Arc::new) + .collect(); verify_prepped_main_chain_block( prepped_block, @@ -604,8 +426,7 @@ where } else { let BlockChainContextResponse::Context(checked_context) = context_svc .oneshot(BlockChainContextRequest::GetContext) - .await - .map_err(Into::::into)? + .await? else { panic!("Context service returned wrong response!"); }; diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs new file mode 100644 index 00000000..cf6f2132 --- /dev/null +++ b/consensus/src/block/alt_block.rs @@ -0,0 +1,304 @@ +//! Alt Blocks +//! +//! Alt blocks are sanity checked by [`sanity_check_alt_block`], that function will also compute the cumulative +//! difficulty of the alt chain so callers will know if they should re-org to the alt chain. +use std::{collections::HashMap, sync::Arc}; + +use monero_serai::{block::Block, transaction::Input}; +use tower::{Service, ServiceExt}; + +use cuprate_consensus_rules::{ + blocks::{ + check_block_pow, check_block_weight, check_timestamp, randomx_seed_height, BlockError, + }, + miner_tx::MinerTxError, + ConsensusError, +}; +use cuprate_helper::asynch::rayon_spawn_async; +use cuprate_types::{AltBlockInformation, Chain, ChainId, VerifiedTransactionInformation}; + +use crate::{ + block::{free::pull_ordered_transactions, PreparedBlock}, + context::{ + difficulty::DifficultyCache, + rx_vms::RandomXVM, + weight::{self, BlockWeightsCache}, + AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, + }, + transactions::TransactionVerificationData, + BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, + VerifyBlockResponse, +}; + +/// This function sanity checks an alt-block. +/// +/// Returns [`AltBlockInformation`], which contains the cumulative difficulty of the alt chain. +/// +/// This function only checks the block's PoW and its weight. +pub async fn sanity_check_alt_block( + block: Block, + txs: HashMap<[u8; 32], TransactionVerificationData>, + mut context_svc: C, +) -> Result +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send + + 'static, + C::Future: Send + 'static, +{ + // Fetch the alt-chains context cache. + let BlockChainContextResponse::AltChainContextCache(mut alt_context_cache) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::AltChainContextCache { + prev_id: block.header.previous, + _token: AltChainRequestToken, + }) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + // Check if the block's miner input is formed correctly. + let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else { + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputNotOfTypeGen, + )))? + }; + + if *height != alt_context_cache.chain_height { + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputsHeightIncorrect, + )))? + } + + // prep the alt block. + let prepped_block = { + let rx_vm = alt_rx_vm( + alt_context_cache.chain_height, + block.header.major_version, + alt_context_cache.parent_chain, + &mut alt_context_cache, + &mut context_svc, + ) + .await?; + + rayon_spawn_async(move || PreparedBlock::new(block, rx_vm.as_deref())).await? + }; + + // get the difficulty cache for this alt chain. + let difficulty_cache = alt_difficulty_cache( + prepped_block.block.header.previous, + &mut alt_context_cache, + &mut context_svc, + ) + .await?; + + // Check the alt block timestamp is in the correct range. + if let Some(median_timestamp) = + difficulty_cache.median_timestamp(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.try_into().unwrap()) + { + check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)? + }; + + let next_difficulty = difficulty_cache.next_difficulty(&prepped_block.hf_version); + // make sure the block's PoW is valid for this difficulty. + check_block_pow(&prepped_block.pow_hash, next_difficulty).map_err(ConsensusError::Block)?; + + let cumulative_difficulty = difficulty_cache.cumulative_difficulty() + next_difficulty; + + let ordered_txs = pull_ordered_transactions(&prepped_block.block, txs)?; + + let block_weight = + prepped_block.miner_tx_weight + ordered_txs.iter().map(|tx| tx.tx_weight).sum::(); + + let alt_weight_cache = alt_weight_cache( + prepped_block.block.header.previous, + &mut alt_context_cache, + &mut context_svc, + ) + .await?; + + // Check the block weight is below the limit. + check_block_weight( + block_weight, + alt_weight_cache.median_for_block_reward(&prepped_block.hf_version), + ) + .map_err(ConsensusError::Block)?; + + let long_term_weight = weight::calculate_block_long_term_weight( + &prepped_block.hf_version, + block_weight, + alt_weight_cache.median_long_term_weight(), + ); + + // Get the chainID or generate a new one if this is the first alt block in this alt chain. + let chain_id = *alt_context_cache + .chain_id + .get_or_insert_with(|| ChainId(rand::random())); + + // Create the alt block info. + let block_info = AltBlockInformation { + block_hash: prepped_block.block_hash, + block: prepped_block.block, + block_blob: prepped_block.block_blob, + txs: ordered_txs + .into_iter() + .map(|tx| VerifiedTransactionInformation { + tx_blob: tx.tx_blob, + tx_weight: tx.tx_weight, + fee: tx.fee, + tx_hash: tx.tx_hash, + tx: tx.tx, + }) + .collect(), + pow_hash: prepped_block.pow_hash, + weight: block_weight, + height: alt_context_cache.chain_height, + long_term_weight, + cumulative_difficulty, + chain_id, + }; + + // Add this block to the cache. + alt_context_cache.add_new_block( + block_info.height, + block_info.block_hash, + block_info.weight, + block_info.long_term_weight, + block_info.block.header.timestamp, + ); + + // Add this alt cache back to the context service. + context_svc + .oneshot(BlockChainContextRequest::AddAltChainContextCache { + prev_id: block_info.block.header.previous, + cache: alt_context_cache, + _token: AltChainRequestToken, + }) + .await?; + + Ok(VerifyBlockResponse::AltChain(block_info)) +} + +/// Retrieves the alt RX VM for the chosen block height. +/// +/// If the `hf` is less than 12 (the height RX activates), then [`None`] is returned. +async fn alt_rx_vm( + block_height: u64, + hf: u8, + parent_chain: Chain, + alt_chain_context: &mut AltChainContextCache, + context_svc: C, +) -> Result>, ExtendedConsensusError> +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send, + C::Future: Send + 'static, +{ + if hf < 12 { + return Ok(None); + } + + let seed_height = randomx_seed_height(block_height); + + let cached_vm = match alt_chain_context.cached_rx_vm.take() { + // If the VM is cached and the height is the height we need, we can use this VM. + Some((cached_seed_height, vm)) if seed_height == cached_seed_height => { + (cached_seed_height, vm) + } + // Otherwise we need to make a new VM. + _ => { + let BlockChainContextResponse::AltChainRxVM(vm) = context_svc + .oneshot(BlockChainContextRequest::AltChainRxVM { + height: block_height, + chain: parent_chain, + _token: AltChainRequestToken, + }) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + (seed_height, vm) + } + }; + + Ok(Some( + alt_chain_context.cached_rx_vm.insert(cached_vm).1.clone(), + )) +} + +/// Returns the [`DifficultyCache`] for the alt chain. +async fn alt_difficulty_cache( + prev_id: [u8; 32], + alt_chain_context: &mut AltChainContextCache, + context_svc: C, +) -> Result<&mut DifficultyCache, ExtendedConsensusError> +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send, + C::Future: Send + 'static, +{ + // First look to see if the difficulty cache for this alt chain is already cached. + match &mut alt_chain_context.difficulty_cache { + Some(cache) => Ok(cache), + // Otherwise make a new one. + difficulty_cache => { + let BlockChainContextResponse::AltChainDifficultyCache(cache) = context_svc + .oneshot(BlockChainContextRequest::AltChainDifficultyCache { + prev_id, + _token: AltChainRequestToken, + }) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + Ok(difficulty_cache.insert(cache)) + } + } +} + +/// Returns the [`BlockWeightsCache`] for the alt chain. +async fn alt_weight_cache( + prev_id: [u8; 32], + alt_chain_context: &mut AltChainContextCache, + context_svc: C, +) -> Result<&mut BlockWeightsCache, ExtendedConsensusError> +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send, + C::Future: Send + 'static, +{ + // First look to see if the weight cache for this alt chain is already cached. + match &mut alt_chain_context.weight_cache { + Some(cache) => Ok(cache), + // Otherwise make a new one. + weight_cache => { + let BlockChainContextResponse::AltChainWeightCache(cache) = context_svc + .oneshot(BlockChainContextRequest::AltChainWeightCache { + prev_id, + _token: AltChainRequestToken, + }) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + Ok(weight_cache.insert(cache)) + } + } +} diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs new file mode 100644 index 00000000..64d1ccb5 --- /dev/null +++ b/consensus/src/block/batch_prepare.rs @@ -0,0 +1,207 @@ +use std::{collections::HashMap, sync::Arc}; + +use monero_serai::{block::Block, transaction::Transaction}; +use rayon::prelude::*; +use tower::{Service, ServiceExt}; +use tracing::instrument; + +use cuprate_consensus_rules::{ + blocks::{check_block_pow, is_randomx_seed_height, randomx_seed_height, BlockError}, + hard_forks::HardForkError, + miner_tx::MinerTxError, + ConsensusError, HardFork, +}; +use cuprate_helper::asynch::rayon_spawn_async; + +use crate::{ + block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow}, + context::rx_vms::RandomXVM, + transactions::TransactionVerificationData, + BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, + VerifyBlockResponse, +}; + +/// Batch prepares a list of blocks for verification. +#[instrument(level = "debug", name = "batch_prep_blocks", skip_all, fields(amt = blocks.len()))] +pub(crate) async fn batch_prepare_main_chain_block( + blocks: Vec<(Block, Vec)>, + mut context_svc: C, +) -> Result +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send + + 'static, + C::Future: Send + 'static, +{ + let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip(); + + tracing::debug!("Calculating block hashes."); + let blocks: Vec = rayon_spawn_async(|| { + blocks + .into_iter() + .map(PreparedBlockExPow::new) + .collect::, _>>() + }) + .await?; + + let Some(last_block) = blocks.last() else { + return Err(ExtendedConsensusError::NoBlocksToVerify); + }; + + // hard-forks cannot be reversed, so the last block will contain the highest hard fork (provided the + // batch is valid). + let top_hf_in_batch = last_block.hf_version; + + // A Vec of (timestamp, HF) for each block to calculate the expected difficulty for each block. + let mut timestamps_hfs = Vec::with_capacity(blocks.len()); + let mut new_rx_vm = None; + + tracing::debug!("Checking blocks follow each other."); + + // For every block make sure they have the correct height and previous ID + for window in blocks.windows(2) { + let block_0 = &window[0]; + let block_1 = &window[1]; + + // Make sure no blocks in the batch have a higher hard fork than the last block. + if block_0.hf_version > top_hf_in_batch { + Err(ConsensusError::Block(BlockError::HardForkError( + HardForkError::VersionIncorrect, + )))?; + } + + if block_0.block_hash != block_1.block.header.previous + || block_0.height != block_1.height - 1 + { + tracing::debug!("Blocks do not follow each other, verification failed."); + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + } + + // Cache any potential RX VM seeds as we may need them for future blocks in the batch. + if is_randomx_seed_height(block_0.height) && top_hf_in_batch >= HardFork::V12 { + new_rx_vm = Some((block_0.height, block_0.block_hash)); + } + + timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)) + } + + // Get the current blockchain context. + let BlockChainContextResponse::Context(checked_context) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::GetContext) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + // Calculate the expected difficulties for each block in the batch. + let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::BatchGetDifficulties( + timestamps_hfs, + )) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + let context = checked_context.unchecked_blockchain_context().clone(); + + // Make sure the blocks follow the main chain. + + if context.chain_height != blocks[0].height { + tracing::debug!("Blocks do not follow main chain, verification failed."); + + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputsHeightIncorrect, + )))?; + } + + if context.top_hash != blocks[0].block.header.previous { + tracing::debug!("Blocks do not follow main chain, verification failed."); + + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + } + + let mut rx_vms = if top_hf_in_batch < HardFork::V12 { + HashMap::new() + } else { + let BlockChainContextResponse::RxVms(rx_vms) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::GetCurrentRxVm) + .await? + else { + panic!("Blockchain context service returned wrong response!"); + }; + + rx_vms + }; + + // If we have a RX seed in the batch calculate it. + if let Some((new_vm_height, new_vm_seed)) = new_rx_vm { + tracing::debug!("New randomX seed in batch, initialising VM"); + + let new_vm = rayon_spawn_async(move || { + Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) + }) + .await; + + // Give the new VM to the context service, so it can cache it. + context_svc + .oneshot(BlockChainContextRequest::NewRXVM(( + new_vm_seed, + new_vm.clone(), + ))) + .await?; + + rx_vms.insert(new_vm_height, new_vm); + } + + tracing::debug!("Calculating PoW and prepping transaction"); + + let blocks = rayon_spawn_async(move || { + blocks + .into_par_iter() + .zip(difficulties) + .zip(txs) + .map(|((block, difficultly), txs)| { + // Calculate the PoW for the block. + let height = block.height; + let block = PreparedBlock::new_prepped( + block, + rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref), + )?; + + // Check the PoW + check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?; + + // Now setup the txs. + let txs = txs + .into_par_iter() + .map(|tx| { + let tx = TransactionVerificationData::new(tx)?; + Ok::<_, ConsensusError>((tx.tx_hash, tx)) + }) + .collect::, _>>()?; + + // Order the txs correctly. + // TODO: Remove the Arc here + let ordered_txs = pull_ordered_transactions(&block.block, txs)? + .into_iter() + .map(Arc::new) + .collect(); + + Ok((block, ordered_txs)) + }) + .collect::, ExtendedConsensusError>>() + }) + .await?; + + Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks)) +} diff --git a/consensus/src/block/free.rs b/consensus/src/block/free.rs new file mode 100644 index 00000000..8a61e801 --- /dev/null +++ b/consensus/src/block/free.rs @@ -0,0 +1,32 @@ +//! Free functions for block verification +use std::collections::HashMap; + +use monero_serai::block::Block; + +use crate::{transactions::TransactionVerificationData, ExtendedConsensusError}; + +/// Returns a list of transactions, pulled from `txs` in the order they are in the [`Block`]. +/// +/// Will error if a tx need is not in `txs` or if `txs` contain more txs than needed. +pub(crate) fn pull_ordered_transactions( + block: &Block, + mut txs: HashMap<[u8; 32], TransactionVerificationData>, +) -> Result, ExtendedConsensusError> { + if block.txs.len() != txs.len() { + return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); + } + + let mut ordered_txs = Vec::with_capacity(txs.len()); + + if !block.txs.is_empty() { + for tx_hash in &block.txs { + let tx = txs + .remove(tx_hash) + .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; + ordered_txs.push(tx); + } + drop(txs); + } + + Ok(ordered_txs) +} diff --git a/consensus/src/context.rs b/consensus/src/context.rs index 0752b8bf..fffbe90b 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -27,16 +27,22 @@ pub(crate) mod hardforks; pub(crate) mod rx_vms; pub(crate) mod weight; +mod alt_chains; mod task; mod tokens; +use cuprate_types::Chain; +use difficulty::DifficultyCache; +use rx_vms::RandomXVM; +use weight::BlockWeightsCache; + +pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache}; pub use difficulty::DifficultyCacheConfig; pub use hardforks::HardForkConfig; -use rx_vms::RandomXVM; pub use tokens::*; pub use weight::BlockWeightsCacheConfig; -const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; +pub(crate) const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; /// Config for the context service. pub struct ContextConfig { @@ -233,6 +239,74 @@ pub enum BlockChainContextRequest { NewRXVM(([u8; 32], Arc)), /// A request to add a new block to the cache. Update(NewBlockData), + /// Pop blocks from the cache to the specified height. + PopBlocks { + /// The number of blocks to pop from the top of the chain. + /// + /// # Panics + /// + /// This will panic if the number of blocks will pop the genesis block. + numb_blocks: u64, + }, + /// Clear the alt chain context caches. + ClearAltCache, + //----------------------------------------------------------------------------------------------------------- AltChainRequests + /// A request for an alt chain context cache. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle getting the alt cache. + AltChainContextCache { + /// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader). + prev_id: [u8; 32], + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, + /// A request for a difficulty cache of an alternative chin. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle getting the difficulty cache of an alt chain. + AltChainDifficultyCache { + /// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader). + prev_id: [u8; 32], + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, + /// A request for a block weight cache of an alternative chin. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle getting the weight cache of an alt chain. + AltChainWeightCache { + /// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader). + prev_id: [u8; 32], + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, + /// A request for a RX VM for an alternative chin. + /// + /// Response variant: [`BlockChainContextResponse::AltChainRxVM`]. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle getting the randomX VM of an alt chain. + AltChainRxVM { + /// The height the RandomX VM is needed for. + height: u64, + /// The chain to look in for the seed. + chain: Chain, + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, + /// A request to add an alt chain context cache to the context cache. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle returning the alt cache to the context service. + AddAltChainContextCache { + /// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader). + prev_id: [u8; 32], + /// The cache. + cache: Box, + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, } pub enum BlockChainContextResponse { @@ -242,7 +316,15 @@ pub enum BlockChainContextResponse { RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), - /// Ok response. + /// An alt chain context cache. + AltChainContextCache(Box), + /// A difficulty cache for an alt chain. + AltChainDifficultyCache(DifficultyCache), + /// A randomX VM for an alt chain. + AltChainRxVM(Arc), + /// A weight cache for an alt chain + AltChainWeightCache(BlockWeightsCache), + /// A generic Ok response. Ok, } diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs new file mode 100644 index 00000000..71af8a1e --- /dev/null +++ b/consensus/src/context/alt_chains.rs @@ -0,0 +1,215 @@ +use std::{collections::HashMap, sync::Arc}; + +use tower::ServiceExt; + +use cuprate_consensus_rules::{blocks::BlockError, ConsensusError}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, ChainId, +}; + +use crate::{ + ExtendedConsensusError, + __private::Database, + context::{difficulty::DifficultyCache, rx_vms::RandomXVM, weight::BlockWeightsCache}, +}; + +pub(crate) mod sealed { + /// A token that should be hard to create from outside this crate. + /// + /// It is currently possible to safely create this from outside this crate, **DO NOT** rely on this + /// as it will be broken once we find a way to completely seal this. + #[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] + pub struct AltChainRequestToken; +} + +/// The context cache of an alternative chain. +#[derive(Debug, Clone)] +pub struct AltChainContextCache { + /// The alt chain weight cache, [`None`] if it has not been built yet. + pub weight_cache: Option, + /// The alt chain difficulty cache, [`None`] if it has not been built yet. + pub difficulty_cache: Option, + + /// A cached RX VM. + pub cached_rx_vm: Option<(u64, Arc)>, + + /// The chain height of the alt chain. + pub chain_height: u64, + /// The top hash of the alt chain. + pub top_hash: [u8; 32], + /// The [`ChainID`] of the alt chain. + pub chain_id: Option, + /// The parent [`Chain`] of this alt chain. + pub parent_chain: Chain, +} + +impl AltChainContextCache { + /// Add a new block to the cache. + pub fn add_new_block( + &mut self, + height: u64, + block_hash: [u8; 32], + block_weight: usize, + long_term_block_weight: usize, + timestamp: u64, + ) { + if let Some(difficulty_cache) = &mut self.difficulty_cache { + difficulty_cache.new_block(height, timestamp, difficulty_cache.cumulative_difficulty()); + } + + if let Some(weight_cache) = &mut self.weight_cache { + weight_cache.new_block(height, block_weight, long_term_block_weight); + } + + self.chain_height += 1; + self.top_hash = block_hash; + } +} + +/// A map of top IDs to alt chains. +pub struct AltChainMap { + alt_cache_map: HashMap<[u8; 32], Box>, +} + +impl AltChainMap { + pub fn new() -> Self { + Self { + alt_cache_map: HashMap::new(), + } + } + + pub fn clear(&mut self) { + self.alt_cache_map.clear(); + } + + /// Add an alt chain cache to the map. + pub fn add_alt_cache(&mut self, prev_id: [u8; 32], alt_cache: Box) { + self.alt_cache_map.insert(prev_id, alt_cache); + } + + /// Attempts to take an [`AltChainContextCache`] from the map, returning [`None`] if no cache is + /// present. + pub async fn get_alt_chain_context( + &mut self, + prev_id: [u8; 32], + database: D, + ) -> Result, ExtendedConsensusError> { + if let Some(cache) = self.alt_cache_map.remove(&prev_id) { + return Ok(cache); + } + + // find the block with hash == prev_id. + let BCResponse::FindBlock(res) = + database.oneshot(BCReadRequest::FindBlock(prev_id)).await? + else { + panic!("Database returned wrong response"); + }; + + let Some((parent_chain, top_height)) = res else { + // Couldn't find prev_id + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + }; + + Ok(Box::new(AltChainContextCache { + weight_cache: None, + difficulty_cache: None, + cached_rx_vm: None, + chain_height: top_height, + top_hash: prev_id, + chain_id: None, + parent_chain, + })) + } +} + +/// Builds a [`DifficultyCache`] for an alt chain. +pub async fn get_alt_chain_difficulty_cache( + prev_id: [u8; 32], + main_chain_difficulty_cache: &DifficultyCache, + mut database: D, +) -> Result { + // find the block with hash == prev_id. + let BCResponse::FindBlock(res) = database + .ready() + .await? + .call(BCReadRequest::FindBlock(prev_id)) + .await? + else { + panic!("Database returned wrong response"); + }; + + let Some((chain, top_height)) = res else { + // Can't find prev_id + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + }; + + Ok(match chain { + Chain::Main => { + // prev_id is in main chain, we can use the fast path and clone the main chain cache. + let mut difficulty_cache = main_chain_difficulty_cache.clone(); + difficulty_cache + .pop_blocks_main_chain( + difficulty_cache.last_accounted_height - top_height, + database, + ) + .await?; + + difficulty_cache + } + Chain::Alt(_) => { + // prev_id is in an alt chain, completely rebuild the cache. + DifficultyCache::init_from_chain_height( + top_height + 1, + main_chain_difficulty_cache.config, + database, + chain, + ) + .await? + } + }) +} + +/// Builds a [`BlockWeightsCache`] for an alt chain. +pub async fn get_alt_chain_weight_cache( + prev_id: [u8; 32], + main_chain_weight_cache: &BlockWeightsCache, + mut database: D, +) -> Result { + // find the block with hash == prev_id. + let BCResponse::FindBlock(res) = database + .ready() + .await? + .call(BCReadRequest::FindBlock(prev_id)) + .await? + else { + panic!("Database returned wrong response"); + }; + + let Some((chain, top_height)) = res else { + // Can't find prev_id + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + }; + + Ok(match chain { + Chain::Main => { + // prev_id is in main chain, we can use the fast path and clone the main chain cache. + let mut weight_cache = main_chain_weight_cache.clone(); + weight_cache + .pop_blocks_main_chain(weight_cache.tip_height - top_height, database) + .await?; + + weight_cache + } + Chain::Alt(_) => { + // prev_id is in an alt chain, completely rebuild the cache. + BlockWeightsCache::init_from_chain_height( + top_height + 1, + main_chain_weight_cache.config, + database, + chain, + ) + .await? + } + }) +} diff --git a/consensus/src/context/difficulty.rs b/consensus/src/context/difficulty.rs index 9c8321f0..b025dfcd 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/src/context/difficulty.rs @@ -12,7 +12,10 @@ use tower::ServiceExt; use tracing::instrument; use cuprate_helper::num::median; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use crate::{Database, ExtendedConsensusError, HardFork}; @@ -28,7 +31,7 @@ const DIFFICULTY_LAG: usize = 15; /// Configuration for the difficulty cache. /// -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct DifficultyCacheConfig { pub(crate) window: usize, pub(crate) cut: usize, @@ -68,7 +71,7 @@ impl DifficultyCacheConfig { /// This struct is able to calculate difficulties from blockchain information. /// #[derive(Debug, Clone, Eq, PartialEq)] -pub(crate) struct DifficultyCache { +pub struct DifficultyCache { /// The list of timestamps in the window. /// len <= [`DIFFICULTY_BLOCKS_COUNT`] pub(crate) timestamps: VecDeque, @@ -87,6 +90,7 @@ impl DifficultyCache { chain_height: u64, config: DifficultyCacheConfig, database: D, + chain: Chain, ) -> Result { tracing::info!("Initializing difficulty cache this may take a while."); @@ -98,7 +102,9 @@ impl DifficultyCache { } let (timestamps, cumulative_difficulties) = - get_blocks_in_pow_info(database.clone(), block_start..chain_height).await?; + get_blocks_in_pow_info(database.clone(), block_start..chain_height, chain).await?; + + debug_assert_eq!(timestamps.len() as u64, chain_height - block_start); tracing::info!( "Current chain height: {}, accounting for {} blocks timestamps", @@ -116,6 +122,70 @@ impl DifficultyCache { Ok(diff) } + /// Pop some blocks from the top of the cache. + /// + /// The cache will be returned to the state it would have been in `numb_blocks` ago. + /// + /// # Invariant + /// + /// This _must_ only be used on a main-chain cache. + #[instrument(name = "pop_blocks_diff_cache", skip_all, fields(numb_blocks = numb_blocks))] + pub async fn pop_blocks_main_chain( + &mut self, + numb_blocks: u64, + database: D, + ) -> Result<(), ExtendedConsensusError> { + let Some(retained_blocks) = self + .timestamps + .len() + .checked_sub(usize::try_from(numb_blocks).unwrap()) + else { + // More blocks to pop than we have in the cache, so just restart a new cache. + *self = Self::init_from_chain_height( + self.last_accounted_height - numb_blocks + 1, + self.config, + database, + Chain::Main, + ) + .await?; + + return Ok(()); + }; + + let current_chain_height = self.last_accounted_height + 1; + + let mut new_start_height = current_chain_height + .saturating_sub(self.config.total_block_count()) + .saturating_sub(numb_blocks); + + // skip the genesis block. + if new_start_height == 0 { + new_start_height = 1; + } + + let (mut timestamps, mut cumulative_difficulties) = get_blocks_in_pow_info( + database, + new_start_height + // current_chain_height - self.timestamps.len() blocks are already in the cache. + ..(current_chain_height - u64::try_from(self.timestamps.len()).unwrap()), + Chain::Main, + ) + .await?; + + self.timestamps.drain(retained_blocks..); + self.cumulative_difficulties.drain(retained_blocks..); + timestamps.append(&mut self.timestamps); + cumulative_difficulties.append(&mut self.cumulative_difficulties); + + self.timestamps = timestamps; + self.cumulative_difficulties = cumulative_difficulties; + self.last_accounted_height -= numb_blocks; + + assert_eq!(self.timestamps.len(), self.cumulative_difficulties.len()); + + Ok(()) + } + /// Add a new block to the difficulty cache. pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) { assert_eq!(self.last_accounted_height + 1, height); @@ -200,7 +270,7 @@ impl DifficultyCache { if self.last_accounted_height + 1 == u64::try_from(numb_blocks).unwrap() { // if the chain height is equal to `numb_blocks` add the genesis block. // otherwise if the chain height is less than `numb_blocks` None is returned - // and if its more than it would be excluded from calculations. + // and if it's more it would be excluded from calculations. let mut timestamps = self.timestamps.clone(); // all genesis blocks have a timestamp of 0. // https://cuprate.github.io/monero-book/consensus_rules/genesis_block.html @@ -299,11 +369,15 @@ fn get_window_start_and_end( async fn get_blocks_in_pow_info( database: D, block_heights: Range, + chain: Chain, ) -> Result<(VecDeque, VecDeque), ExtendedConsensusError> { tracing::info!("Getting blocks timestamps"); let BCResponse::BlockExtendedHeaderInRange(ext_header) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights)) + .oneshot(BCReadRequest::BlockExtendedHeaderInRange( + block_heights, + chain, + )) .await? else { panic!("Database sent incorrect response"); diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 92182c7b..22433500 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -4,7 +4,10 @@ use tower::ServiceExt; use tracing::instrument; use cuprate_consensus_rules::{HFVotes, HFsInfo, HardFork}; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use crate::{Database, ExtendedConsensusError}; @@ -15,7 +18,7 @@ const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a /// Configuration for hard-forks. /// -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HardForkConfig { /// The network we are on. pub(crate) info: HFsInfo, @@ -50,7 +53,7 @@ impl HardForkConfig { } /// A struct that keeps track of the current hard-fork and current votes. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct HardForkState { /// The current active hard-fork. pub(crate) current_hardfork: HardFork, @@ -117,6 +120,50 @@ impl HardForkState { Ok(hfs) } + /// Pop some blocks from the top of the cache. + /// + /// The cache will be returned to the state it would have been in `numb_blocks` ago. + /// + /// # Invariant + /// + /// This _must_ only be used on a main-chain cache. + pub async fn pop_blocks_main_chain( + &mut self, + numb_blocks: u64, + database: D, + ) -> Result<(), ExtendedConsensusError> { + let Some(retained_blocks) = self.votes.total_votes().checked_sub(self.config.window) else { + *self = Self::init_from_chain_height( + self.last_height + 1 - numb_blocks, + self.config, + database, + ) + .await?; + + return Ok(()); + }; + + let current_chain_height = self.last_height + 1; + + let oldest_votes = get_votes_in_range( + database, + current_chain_height + .saturating_sub(self.config.window) + .saturating_sub(numb_blocks) + ..current_chain_height + .saturating_sub(numb_blocks) + .saturating_sub(retained_blocks), + usize::try_from(numb_blocks).unwrap(), + ) + .await?; + + self.votes + .reverse_blocks(usize::try_from(numb_blocks).unwrap(), oldest_votes); + self.last_height -= numb_blocks; + + Ok(()) + } + /// Add a new block to the cache. pub fn new_block(&mut self, vote: HardFork, height: u64) { // We don't _need_ to take in `height` but it's for safety, so we don't silently loose track @@ -168,7 +215,10 @@ async fn get_votes_in_range( let mut votes = HFVotes::new(window_size); let BCResponse::BlockExtendedHeaderInRange(vote_list) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights)) + .oneshot(BCReadRequest::BlockExtendedHeaderInRange( + block_heights, + Chain::Main, + )) .await? else { panic!("Database sent incorrect response!"); diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index 08ecb957..31546486 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -15,12 +15,16 @@ use thread_local::ThreadLocal; use tower::ServiceExt; use tracing::instrument; +use cuprate_consensus_rules::blocks::randomx_seed_height; use cuprate_consensus_rules::{ blocks::{is_randomx_seed_height, RandomX, RX_SEEDHASH_EPOCH_BLOCKS}, HardFork, }; use cuprate_helper::asynch::rayon_spawn_async; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use crate::{Database, ExtendedConsensusError}; @@ -124,7 +128,39 @@ impl RandomXVMCache { self.cached_vm.replace(vm); } - /// Get the RandomX VMs. + /// Creates a RX VM for an alt chain, looking at the main chain RX VMs to see if we can use one + /// of them first. + pub async fn get_alt_vm( + &mut self, + height: u64, + chain: Chain, + database: D, + ) -> Result, ExtendedConsensusError> { + let seed_height = randomx_seed_height(height); + + let BCResponse::BlockHash(seed_hash) = database + .oneshot(BCReadRequest::BlockHash(seed_height, chain)) + .await? + else { + panic!("Database returned wrong response!"); + }; + + for (vm_main_chain_height, vm_seed_hash) in &self.seeds { + if vm_seed_hash == &seed_hash { + let Some(vm) = self.vms.get(vm_main_chain_height) else { + break; + }; + + return Ok(vm.clone()); + } + } + + let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVM::new(&seed_hash).unwrap())).await; + + Ok(alt_vm) + } + + /// Get the main-chain RandomX VMs. pub async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. @@ -176,6 +212,12 @@ impl RandomXVMCache { self.vms.clone() } + /// Removes all the RandomX VMs above the `new_height`. + pub fn pop_blocks_main_chain(&mut self, new_height: u64) { + self.seeds.retain(|(height, _)| *height < new_height); + self.vms.retain(|height, _| *height < new_height); + } + /// Add a new block to the VM cache. /// /// hash is the block hash not the blocks PoW hash. @@ -231,8 +273,10 @@ async fn get_block_hashes( for height in heights { let db = database.clone(); fut.push_back(async move { - let BCResponse::BlockHash(hash) = - db.clone().oneshot(BCReadRequest::BlockHash(height)).await? + let BCResponse::BlockHash(hash) = db + .clone() + .oneshot(BCReadRequest::BlockHash(height, Chain::Main)) + .await? else { panic!("Database sent incorrect response!"); }; diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 108922d7..1fa68a2f 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -9,14 +9,20 @@ use tower::ServiceExt; use tracing::Instrument; use cuprate_consensus_rules::blocks::ContextToVerifyBlock; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; - -use super::{ - difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest, - BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken, - BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; + +use crate::{ + context::{ + alt_chains::{get_alt_chain_difficulty_cache, get_alt_chain_weight_cache, AltChainMap}, + difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest, + BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken, + BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, + }, + Database, ExtendedConsensusError, }; -use crate::{Database, ExtendedConsensusError}; /// A request from the context service to the context task. pub(super) struct ContextTaskRequest { @@ -29,7 +35,7 @@ pub(super) struct ContextTaskRequest { } /// The Context task that keeps the blockchain context and handles requests. -pub struct ContextTask { +pub struct ContextTask { /// A token used to invalidate previous contexts when a new /// block is added to the chain. current_validity_token: ValidityToken, @@ -43,25 +49,25 @@ pub struct ContextTask { /// The hard-fork state cache. hardfork_state: hardforks::HardForkState, + alt_chain_cache_map: AltChainMap, + /// The current chain height. chain_height: u64, /// The top block hash. top_block_hash: [u8; 32], /// The total amount of coins generated. already_generated_coins: u64, + + database: D, } -impl ContextTask { +impl ContextTask { /// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a /// while to complete. - pub async fn init_context( + pub async fn init_context( cfg: ContextConfig, mut database: D, - ) -> Result - where - D: Database + Clone + Send + Sync + 'static, - D::Future: Send + 'static, - { + ) -> Result { let ContextConfig { difficulty_cfg, weights_config, @@ -82,7 +88,7 @@ impl ContextTask { let BCResponse::GeneratedCoins(already_generated_coins) = database .ready() .await? - .call(BCReadRequest::GeneratedCoins) + .call(BCReadRequest::GeneratedCoins(chain_height - 1)) .await? else { panic!("Database sent incorrect response!"); @@ -95,14 +101,24 @@ impl ContextTask { let db = database.clone(); let difficulty_cache_handle = tokio::spawn(async move { - difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db) - .await + difficulty::DifficultyCache::init_from_chain_height( + chain_height, + difficulty_cfg, + db, + Chain::Main, + ) + .await }); let db = database.clone(); let weight_cache_handle = tokio::spawn(async move { - weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db) - .await + weight::BlockWeightsCache::init_from_chain_height( + chain_height, + weights_config, + db, + Chain::Main, + ) + .await }); // Wait for the hardfork state to finish first as we need it to start the randomX VM cache. @@ -120,9 +136,11 @@ impl ContextTask { weight_cache: weight_cache_handle.await.unwrap()?, rx_vm_cache: rx_seed_handle.await.unwrap()?, hardfork_state, + alt_chain_cache_map: AltChainMap::new(), chain_height, already_generated_coins, top_block_hash, + database, }; Ok(context_svc) @@ -211,6 +229,98 @@ impl ContextTask { BlockChainContextResponse::Ok } + BlockChainContextRequest::PopBlocks { numb_blocks } => { + assert!(numb_blocks < self.chain_height); + + self.difficulty_cache + .pop_blocks_main_chain(numb_blocks, self.database.clone()) + .await?; + self.weight_cache + .pop_blocks_main_chain(numb_blocks, self.database.clone()) + .await?; + self.rx_vm_cache + .pop_blocks_main_chain(self.chain_height - numb_blocks - 1); + self.hardfork_state + .pop_blocks_main_chain(numb_blocks, self.database.clone()) + .await?; + + self.alt_chain_cache_map.clear(); + + self.chain_height -= numb_blocks; + + let BCResponse::GeneratedCoins(already_generated_coins) = self + .database + .ready() + .await? + .call(BCReadRequest::GeneratedCoins(self.chain_height - 1)) + .await? + else { + panic!("Database sent incorrect response!"); + }; + + let BCResponse::BlockHash(top_block_hash) = self + .database + .ready() + .await? + .call(BCReadRequest::BlockHash(self.chain_height - 1, Chain::Main)) + .await? + else { + panic!("Database returned incorrect response!"); + }; + + self.already_generated_coins = already_generated_coins; + self.top_block_hash = top_block_hash; + + std::mem::replace(&mut self.current_validity_token, ValidityToken::new()) + .set_data_invalid(); + + BlockChainContextResponse::Ok + } + BlockChainContextRequest::ClearAltCache => { + self.alt_chain_cache_map.clear(); + + BlockChainContextResponse::Ok + } + BlockChainContextRequest::AltChainContextCache { prev_id, _token } => { + BlockChainContextResponse::AltChainContextCache( + self.alt_chain_cache_map + .get_alt_chain_context(prev_id, &mut self.database) + .await?, + ) + } + BlockChainContextRequest::AltChainDifficultyCache { prev_id, _token } => { + BlockChainContextResponse::AltChainDifficultyCache( + get_alt_chain_difficulty_cache( + prev_id, + &self.difficulty_cache, + self.database.clone(), + ) + .await?, + ) + } + BlockChainContextRequest::AltChainWeightCache { prev_id, _token } => { + BlockChainContextResponse::AltChainWeightCache( + get_alt_chain_weight_cache(prev_id, &self.weight_cache, self.database.clone()) + .await?, + ) + } + BlockChainContextRequest::AltChainRxVM { + height, + chain, + _token, + } => BlockChainContextResponse::AltChainRxVM( + self.rx_vm_cache + .get_alt_vm(height, chain, &mut self.database) + .await?, + ), + BlockChainContextRequest::AddAltChainContextCache { + prev_id, + cache, + _token, + } => { + self.alt_chain_cache_map.add_alt_cache(prev_id, cache); + BlockChainContextResponse::Ok + } }) } diff --git a/consensus/src/context/weight.rs b/consensus/src/context/weight.rs index 2511c59d..10840863 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/src/context/weight.rs @@ -8,17 +8,18 @@ //! use std::{ cmp::{max, min}, - collections::VecDeque, ops::Range, }; -use rayon::prelude::*; use tower::ServiceExt; use tracing::instrument; use cuprate_consensus_rules::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5}; -use cuprate_helper::{asynch::rayon_spawn_async, num::median}; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_helper::{asynch::rayon_spawn_async, num::RollingMedian}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use crate::{Database, ExtendedConsensusError, HardFork}; @@ -29,7 +30,7 @@ const LONG_TERM_WINDOW: u64 = 100000; /// Configuration for the block weight cache. /// -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct BlockWeightsCacheConfig { short_term_window: u64, long_term_window: u64, @@ -58,25 +59,17 @@ impl BlockWeightsCacheConfig { /// /// These calculations require a lot of data from the database so by caching /// this data it reduces the load on the database. -#[derive(Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct BlockWeightsCache { /// The short term block weights. - short_term_block_weights: VecDeque, + short_term_block_weights: RollingMedian, /// The long term block weights. - long_term_weights: VecDeque, - - /// The short term block weights sorted so we don't have to sort them every time we need - /// the median. - cached_sorted_long_term_weights: Vec, - /// The long term block weights sorted so we don't have to sort them every time we need - /// the median. - cached_sorted_short_term_weights: Vec, + long_term_weights: RollingMedian, /// The height of the top block. - tip_height: u64, + pub(crate) tip_height: u64, - /// The block weight config. - config: BlockWeightsCacheConfig, + pub(crate) config: BlockWeightsCacheConfig, } impl BlockWeightsCache { @@ -86,45 +79,109 @@ impl BlockWeightsCache { chain_height: u64, config: BlockWeightsCacheConfig, database: D, + chain: Chain, ) -> Result { tracing::info!("Initializing weight cache this may take a while."); let long_term_weights = get_long_term_weight_in_range( chain_height.saturating_sub(config.long_term_window)..chain_height, database.clone(), + chain, ) .await?; let short_term_block_weights = get_blocks_weight_in_range( chain_height.saturating_sub(config.short_term_window)..chain_height, database, + chain, ) .await?; tracing::info!("Initialized block weight cache, chain-height: {:?}, long term weights length: {:?}, short term weights length: {:?}", chain_height, long_term_weights.len(), short_term_block_weights.len()); - let mut cloned_short_term_weights = short_term_block_weights.clone(); - let mut cloned_long_term_weights = long_term_weights.clone(); Ok(BlockWeightsCache { - short_term_block_weights: short_term_block_weights.into(), - long_term_weights: long_term_weights.into(), - - cached_sorted_long_term_weights: rayon_spawn_async(|| { - cloned_long_term_weights.par_sort_unstable(); - cloned_long_term_weights + short_term_block_weights: rayon_spawn_async(move || { + RollingMedian::from_vec( + short_term_block_weights, + usize::try_from(config.short_term_window).unwrap(), + ) }) .await, - cached_sorted_short_term_weights: rayon_spawn_async(|| { - cloned_short_term_weights.par_sort_unstable(); - cloned_short_term_weights + long_term_weights: rayon_spawn_async(move || { + RollingMedian::from_vec( + long_term_weights, + usize::try_from(config.long_term_window).unwrap(), + ) }) .await, - tip_height: chain_height - 1, config, }) } + /// Pop some blocks from the top of the cache. + /// + /// The cache will be returned to the state it would have been in `numb_blocks` ago. + #[instrument(name = "pop_blocks_weight_cache", skip_all, fields(numb_blocks = numb_blocks))] + pub async fn pop_blocks_main_chain( + &mut self, + numb_blocks: u64, + database: D, + ) -> Result<(), ExtendedConsensusError> { + if self.long_term_weights.window_len() <= usize::try_from(numb_blocks).unwrap() { + // More blocks to pop than we have in the cache, so just restart a new cache. + *self = Self::init_from_chain_height( + self.tip_height - numb_blocks + 1, + self.config, + database, + Chain::Main, + ) + .await?; + + return Ok(()); + } + + let chain_height = self.tip_height + 1; + + let new_long_term_start_height = chain_height + .saturating_sub(self.config.long_term_window) + .saturating_sub(numb_blocks); + + let old_long_term_weights = get_long_term_weight_in_range( + new_long_term_start_height + // current_chain_height - self.long_term_weights.len() blocks are already in the cache. + ..(chain_height - u64::try_from(self.long_term_weights.window_len()).unwrap()), + database.clone(), + Chain::Main, + ) + .await?; + + let new_short_term_start_height = chain_height + .saturating_sub(self.config.short_term_window) + .saturating_sub(numb_blocks); + + let old_short_term_weights = get_blocks_weight_in_range( + new_short_term_start_height + // current_chain_height - self.long_term_weights.len() blocks are already in the cache. + ..(chain_height - u64::try_from(self.short_term_block_weights.window_len()).unwrap()), + database, + Chain::Main + ) + .await?; + + for _ in 0..numb_blocks { + self.short_term_block_weights.pop_back(); + self.long_term_weights.pop_back(); + } + + self.long_term_weights.append_front(old_long_term_weights); + self.short_term_block_weights + .append_front(old_short_term_weights); + self.tip_height -= numb_blocks; + + Ok(()) + } + /// Add a new block to the cache. /// /// The block_height **MUST** be one more than the last height the cache has @@ -139,74 +196,19 @@ impl BlockWeightsCache { long_term_weight ); - // add the new block to the `long_term_weights` list and the sorted `cached_sorted_long_term_weights` list. - self.long_term_weights.push_back(long_term_weight); - match self - .cached_sorted_long_term_weights - .binary_search(&long_term_weight) - { - Ok(idx) | Err(idx) => self - .cached_sorted_long_term_weights - .insert(idx, long_term_weight), - } + self.long_term_weights.push(long_term_weight); - // If the list now has too many entries remove the oldest. - if u64::try_from(self.long_term_weights.len()).unwrap() > self.config.long_term_window { - let val = self - .long_term_weights - .pop_front() - .expect("long term window can't be negative"); - - match self.cached_sorted_long_term_weights.binary_search(&val) { - Ok(idx) => self.cached_sorted_long_term_weights.remove(idx), - Err(_) => panic!("Long term cache has incorrect values!"), - }; - } - - // add the block to the short_term_block_weights and the sorted cached_sorted_short_term_weights list. - self.short_term_block_weights.push_back(block_weight); - match self - .cached_sorted_short_term_weights - .binary_search(&block_weight) - { - Ok(idx) | Err(idx) => self - .cached_sorted_short_term_weights - .insert(idx, block_weight), - } - - // If there are now too many entries remove the oldest. - if u64::try_from(self.short_term_block_weights.len()).unwrap() - > self.config.short_term_window - { - let val = self - .short_term_block_weights - .pop_front() - .expect("short term window can't be negative"); - - match self.cached_sorted_short_term_weights.binary_search(&val) { - Ok(idx) => self.cached_sorted_short_term_weights.remove(idx), - Err(_) => panic!("Short term cache has incorrect values"), - }; - } - - debug_assert_eq!( - self.cached_sorted_long_term_weights.len(), - self.long_term_weights.len() - ); - debug_assert_eq!( - self.cached_sorted_short_term_weights.len(), - self.short_term_block_weights.len() - ); + self.short_term_block_weights.push(block_weight); } /// Returns the median long term weight over the last [`LONG_TERM_WINDOW`] blocks, or custom amount of blocks in the config. pub fn median_long_term_weight(&self) -> usize { - median(&self.cached_sorted_long_term_weights) + self.long_term_weights.median() } /// Returns the median weight over the last [`SHORT_TERM_WINDOW`] blocks, or custom amount of blocks in the config. pub fn median_short_term_weight(&self) -> usize { - median(&self.cached_sorted_short_term_weights) + self.short_term_block_weights.median() } /// Returns the effective median weight, used for block reward calculations and to calculate @@ -290,11 +292,12 @@ pub fn calculate_block_long_term_weight( async fn get_blocks_weight_in_range( range: Range, database: D, + chain: Chain, ) -> Result, ExtendedConsensusError> { tracing::info!("getting block weights."); let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range)) + .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range, chain)) .await? else { panic!("Database sent incorrect response!") @@ -311,11 +314,12 @@ async fn get_blocks_weight_in_range( async fn get_long_term_weight_in_range( range: Range, database: D, + chain: Chain, ) -> Result, ExtendedConsensusError> { tracing::info!("getting block long term weights."); let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range)) + .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range, chain)) .await? else { panic!("Database sent incorrect response!") diff --git a/consensus/src/tests/context/difficulty.rs b/consensus/src/tests/context/difficulty.rs index c9886f3c..b59f62ef 100644 --- a/consensus/src/tests/context/difficulty.rs +++ b/consensus/src/tests/context/difficulty.rs @@ -1,15 +1,15 @@ use std::collections::VecDeque; -use proptest::collection::size_range; +use proptest::collection::{size_range, vec}; use proptest::{prelude::*, prop_assert_eq, prop_compose, proptest}; -use cuprate_helper::num::median; - use crate::{ context::difficulty::*, tests::{context::data::DIF_3000000_3002000, mock_db::*}, HardFork, }; +use cuprate_helper::num::median; +use cuprate_types::Chain; const TEST_WINDOW: usize = 72; const TEST_CUT: usize = 6; @@ -26,9 +26,13 @@ async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> { let genesis = DummyBlockExtendedHeader::default().with_difficulty_info(0, 1); db_builder.add_block(genesis); - let mut difficulty_cache = - DifficultyCache::init_from_chain_height(1, TEST_DIFFICULTY_CONFIG, db_builder.finish(None)) - .await?; + let mut difficulty_cache = DifficultyCache::init_from_chain_height( + 1, + TEST_DIFFICULTY_CONFIG, + db_builder.finish(None), + Chain::Main, + ) + .await?; for height in 1..3 { assert_eq!(difficulty_cache.next_difficulty(&HardFork::V1), 1); @@ -42,9 +46,13 @@ async fn genesis_block_skipped() -> Result<(), tower::BoxError> { let mut db_builder = DummyDatabaseBuilder::default(); let genesis = DummyBlockExtendedHeader::default().with_difficulty_info(0, 1); db_builder.add_block(genesis); - let diff_cache = - DifficultyCache::init_from_chain_height(1, TEST_DIFFICULTY_CONFIG, db_builder.finish(None)) - .await?; + let diff_cache = DifficultyCache::init_from_chain_height( + 1, + TEST_DIFFICULTY_CONFIG, + db_builder.finish(None), + Chain::Main, + ) + .await?; assert!(diff_cache.cumulative_difficulties.is_empty()); assert!(diff_cache.timestamps.is_empty()); Ok(()) @@ -66,8 +74,9 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { let mut diff_cache = DifficultyCache::init_from_chain_height( 3_000_720, - cfg.clone(), + cfg, db_builder.finish(Some(3_000_720)), + Chain::Main, ) .await?; @@ -208,4 +217,52 @@ proptest! { } } + + #[test] + fn pop_blocks_below_total_blocks( + mut database in arb_dummy_database(20), + new_blocks in vec(any::<(u64, u128)>(), 0..500) + ) { + tokio_test::block_on(async move { + let old_cache = DifficultyCache::init_from_chain_height(19, TEST_DIFFICULTY_CONFIG, database.clone(), Chain::Main).await.unwrap(); + + let blocks_to_pop = new_blocks.len(); + + let mut new_cache = old_cache.clone(); + for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); + new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); + } + + new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?; + + prop_assert_eq!(new_cache, old_cache); + + Ok::<_, TestCaseError>(()) + })?; + } + + #[test] + fn pop_blocks_above_total_blocks( + mut database in arb_dummy_database(2000), + new_blocks in vec(any::<(u64, u128)>(), 0..5_000) + ) { + tokio_test::block_on(async move { + let old_cache = DifficultyCache::init_from_chain_height(1999, TEST_DIFFICULTY_CONFIG, database.clone(), Chain::Main).await.unwrap(); + + let blocks_to_pop = new_blocks.len(); + + let mut new_cache = old_cache.clone(); + for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); + new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); + } + + new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?; + + prop_assert_eq!(new_cache, old_cache); + + Ok::<_, TestCaseError>(()) + })?; + } } diff --git a/consensus/src/tests/context/hardforks.rs b/consensus/src/tests/context/hardforks.rs index f6f0f234..d003b3cc 100644 --- a/consensus/src/tests/context/hardforks.rs +++ b/consensus/src/tests/context/hardforks.rs @@ -1,3 +1,5 @@ +use proptest::{collection::vec, prelude::*}; + use cuprate_consensus_rules::hard_forks::{HFInfo, HFsInfo, HardFork, NUMB_OF_HARD_FORKS}; use crate::{ @@ -82,3 +84,44 @@ async fn hf_v15_v16_correct() { assert_eq!(state.current_hardfork, HardFork::V16); } + +proptest! { + fn pop_blocks( + hfs in vec(any::(), 0..100), + extra_hfs in vec(any::(), 0..100) + ) { + tokio_test::block_on(async move { + let numb_hfs = hfs.len() as u64; + let numb_pop_blocks = extra_hfs.len() as u64; + + let mut db_builder = DummyDatabaseBuilder::default(); + + for hf in hfs { + db_builder.add_block( + DummyBlockExtendedHeader::default().with_hard_fork_info(hf, hf), + ); + } + + let db = db_builder.finish(Some(numb_hfs as usize)); + + let mut state = HardForkState::init_from_chain_height( + numb_hfs, + TEST_HARD_FORK_CONFIG, + db.clone(), + ) + .await?; + + let state_clone = state.clone(); + + for (i, hf) in extra_hfs.into_iter().enumerate() { + state.new_block(hf, state.last_height + u64::try_from(i).unwrap() + 1); + } + + state.pop_blocks_main_chain(numb_pop_blocks, db).await?; + + prop_assert_eq!(state_clone, state); + + Ok::<(), TestCaseError>(()) + })?; + } +} diff --git a/consensus/src/tests/context/weight.rs b/consensus/src/tests/context/weight.rs index 902d446a..83c8bb95 100644 --- a/consensus/src/tests/context/weight.rs +++ b/consensus/src/tests/context/weight.rs @@ -6,6 +6,7 @@ use crate::{ tests::{context::data::BW_2850000_3050000, mock_db::*}, HardFork, }; +use cuprate_types::Chain; pub const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000); @@ -21,6 +22,7 @@ async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> { 5000, TEST_WEIGHT_CONFIG, db_builder.finish(None), + Chain::Main, ) .await?; assert_eq!(weight_cache.median_long_term_weight(), 2500); @@ -37,6 +39,74 @@ async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> { Ok(()) } +#[tokio::test] +async fn pop_blocks_greater_than_window() -> Result<(), tower::BoxError> { + let mut db_builder = DummyDatabaseBuilder::default(); + for weight in 1..=5000 { + let block = DummyBlockExtendedHeader::default().with_weight_into(weight, weight); + db_builder.add_block(block); + } + + let database = db_builder.finish(None); + + let mut weight_cache = BlockWeightsCache::init_from_chain_height( + 5000, + TEST_WEIGHT_CONFIG, + database.clone(), + Chain::Main, + ) + .await?; + + let old_cache = weight_cache.clone(); + + weight_cache.new_block(5000, 0, 0); + weight_cache.new_block(5001, 0, 0); + weight_cache.new_block(5002, 0, 0); + + weight_cache + .pop_blocks_main_chain(3, database) + .await + .unwrap(); + + assert_eq!(weight_cache, old_cache); + + Ok(()) +} + +#[tokio::test] +async fn pop_blocks_less_than_window() -> Result<(), tower::BoxError> { + let mut db_builder = DummyDatabaseBuilder::default(); + for weight in 1..=500 { + let block = DummyBlockExtendedHeader::default().with_weight_into(weight, weight); + db_builder.add_block(block); + } + + let database = db_builder.finish(None); + + let mut weight_cache = BlockWeightsCache::init_from_chain_height( + 500, + TEST_WEIGHT_CONFIG, + database.clone(), + Chain::Main, + ) + .await?; + + let old_cache = weight_cache.clone(); + + weight_cache.new_block(500, 0, 0); + weight_cache.new_block(501, 0, 0); + weight_cache.new_block(502, 0, 0); + + weight_cache + .pop_blocks_main_chain(3, database) + .await + .unwrap(); + + assert_eq!(weight_cache, old_cache); + + Ok(()) +} + #[tokio::test] async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError> { let mut db_builder = DummyDatabaseBuilder::default(); @@ -44,9 +114,13 @@ async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError> let block = DummyBlockExtendedHeader::default().with_weight_into(0, 0); db_builder.add_block(block); - let mut weight_cache = - BlockWeightsCache::init_from_chain_height(1, TEST_WEIGHT_CONFIG, db_builder.finish(None)) - .await?; + let mut weight_cache = BlockWeightsCache::init_from_chain_height( + 1, + TEST_WEIGHT_CONFIG, + db_builder.finish(None), + Chain::Main, + ) + .await?; for height in 1..=100 { weight_cache.new_block(height as u64, height, height); @@ -76,6 +150,7 @@ async fn calc_bw_ltw_2850000_3050000() { 2950000, TEST_WEIGHT_CONFIG, db_builder.finish(Some(2950000)), + Chain::Main, ) .await .unwrap(); diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index d1c62550..c4fd75d1 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -127,6 +127,12 @@ pub struct DummyDatabase { dummy_height: Option, } +impl DummyDatabase { + pub fn add_block(&mut self, block: DummyBlockExtendedHeader) { + self.blocks.write().unwrap().push(block) + } +} + impl Service for DummyDatabase { type Response = BCResponse; type Error = BoxError; @@ -161,12 +167,12 @@ impl Service for DummyDatabase { .ok_or("block not in database!")?, ) } - BCReadRequest::BlockHash(id) => { + BCReadRequest::BlockHash(id, _) => { let mut hash = [0; 32]; hash[0..8].copy_from_slice(&id.to_le_bytes()); BCResponse::BlockHash(hash) } - BCReadRequest::BlockExtendedHeaderInRange(range) => { + BCReadRequest::BlockExtendedHeaderInRange(range, _) => { let mut end = usize::try_from(range.end).unwrap(); let mut start = usize::try_from(range.start).unwrap(); @@ -200,7 +206,7 @@ impl Service for DummyDatabase { BCResponse::ChainHeight(height, top_hash) } - BCReadRequest::GeneratedCoins => BCResponse::GeneratedCoins(0), + BCReadRequest::GeneratedCoins(_) => BCResponse::GeneratedCoins(0), _ => unimplemented!("the context svc should not need these requests!"), }) } diff --git a/helper/src/num.rs b/helper/src/num.rs index cc1feb1b..f90357e9 100644 --- a/helper/src/num.rs +++ b/helper/src/num.rs @@ -8,6 +8,9 @@ use core::{ ops::{Add, Div, Mul, Sub}, }; +#[cfg(feature = "std")] +mod rolling_median; + //---------------------------------------------------------------------------------------------------- Types // INVARIANT: must be private. // Protects against outside-crate implementations. @@ -15,6 +18,9 @@ mod private { pub trait Sealed: Copy + PartialOrd + core::fmt::Display {} } +#[cfg(feature = "std")] +pub use rolling_median::RollingMedian; + /// Non-floating point numbers /// /// This trait is sealed and is only implemented on: diff --git a/helper/src/num/rolling_median.rs b/helper/src/num/rolling_median.rs new file mode 100644 index 00000000..2babda2c --- /dev/null +++ b/helper/src/num/rolling_median.rs @@ -0,0 +1,150 @@ +use std::{ + collections::VecDeque, + ops::{Add, Div, Mul, Sub}, +}; + +use crate::num::median; + +/// A rolling median type. +/// +/// This keeps track of a window of items and allows calculating the [`RollingMedian::median`] of them. +/// +/// Example: +/// ```rust +/// # use cuprate_helper::num::RollingMedian; +/// let mut rolling_median = RollingMedian::new(2); +/// +/// rolling_median.push(1); +/// assert_eq!(rolling_median.median(), 1); +/// assert_eq!(rolling_median.window_len(), 1); +/// +/// rolling_median.push(3); +/// assert_eq!(rolling_median.median(), 2); +/// assert_eq!(rolling_median.window_len(), 2); +/// +/// rolling_median.push(5); +/// assert_eq!(rolling_median.median(), 4); +/// assert_eq!(rolling_median.window_len(), 2); +/// ``` +/// +// TODO: a more efficient structure is probably possible. +#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone)] +pub struct RollingMedian { + /// The window of items, in order of insertion. + window: VecDeque, + /// The window of items, sorted. + sorted_window: Vec, + + /// The target window length. + target_window: usize, +} + +impl RollingMedian +where + T: Ord + + PartialOrd + + Add + + Sub + + Div + + Mul + + Copy + + From, +{ + /// Creates a new [`RollingMedian`] with a certain target window length. + /// + /// `target_window` is the maximum amount of items to keep in the rolling window. + pub fn new(target_window: usize) -> Self { + Self { + window: VecDeque::with_capacity(target_window), + sorted_window: Vec::with_capacity(target_window), + target_window, + } + } + + /// Creates a new [`RollingMedian`] from a [`Vec`] with a certain target window length. + /// + /// `target_window` is the maximum amount of items to keep in the rolling window. + /// + /// # Panics + /// This function panics if `vec.len() > target_window`. + pub fn from_vec(vec: Vec, target_window: usize) -> Self { + assert!(vec.len() <= target_window); + + let mut sorted_window = vec.clone(); + sorted_window.sort_unstable(); + + Self { + window: vec.into(), + sorted_window, + target_window, + } + } + + /// Pops the front of the window, i.e. the oldest item. + /// + /// This is often not needed as [`RollingMedian::push`] will handle popping old values when they fall + /// out of the window. + pub fn pop_front(&mut self) { + if let Some(item) = self.window.pop_front() { + match self.sorted_window.binary_search(&item) { + Ok(idx) => { + self.sorted_window.remove(idx); + } + Err(_) => panic!("Value expected to be in sorted_window was not there"), + } + } + } + + /// Pops the back of the window, i.e. the youngest item. + pub fn pop_back(&mut self) { + if let Some(item) = self.window.pop_back() { + match self.sorted_window.binary_search(&item) { + Ok(idx) => { + self.sorted_window.remove(idx); + } + Err(_) => panic!("Value expected to be in sorted_window was not there"), + } + } + } + + /// Push an item to the _back_ of the window. + /// + /// This will pop the oldest item in the window if the target length has been exceeded. + pub fn push(&mut self, item: T) { + if self.window.len() >= self.target_window { + self.pop_front(); + } + + self.window.push_back(item); + match self.sorted_window.binary_search(&item) { + Ok(idx) | Err(idx) => self.sorted_window.insert(idx, item), + } + } + + /// Append some values to the _front_ of the window. + /// + /// These new values will be the oldest items in the window. The order of the inputted items will be + /// kept, i.e. the first item in the [`Vec`] will be the oldest item in the queue. + pub fn append_front(&mut self, items: Vec) { + for item in items.into_iter().rev() { + self.window.push_front(item); + match self.sorted_window.binary_search(&item) { + Ok(idx) | Err(idx) => self.sorted_window.insert(idx, item), + } + + if self.window.len() > self.target_window { + self.pop_back(); + } + } + } + + /// Returns the number of items currently in the [`RollingMedian`]. + pub fn window_len(&self) -> usize { + self.window.len() + } + + /// Calculates the median of the values currently in the [`RollingMedian`]. + pub fn median(&self) -> T { + median(&self.sorted_window) + } +} diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index fcec4536..9c996818 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -23,5 +23,8 @@ paste = { workspace = true } serde = { workspace = true, optional = true } [dev-dependencies] +cuprate-test-utils = { path = "../../test-utils" } +cuprate-json-rpc = { path = "../json-rpc" } + serde_json = { workspace = true } -pretty_assertions = { workspace = true } +pretty_assertions = { workspace = true } \ No newline at end of file diff --git a/rpc/types/src/base.rs b/rpc/types/src/base.rs index 4990cdd6..c131e41e 100644 --- a/rpc/types/src/base.rs +++ b/rpc/types/src/base.rs @@ -57,6 +57,64 @@ pub struct ResponseBase { pub untrusted: bool, } +impl ResponseBase { + /// `const` version of [`Default::default`]. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let new = ResponseBase::new(); + /// assert_eq!(new, ResponseBase { + /// status: Status::Ok, + /// untrusted: false, + /// }); + /// ``` + pub const fn new() -> Self { + Self { + status: Status::Ok, + untrusted: false, + } + } + + /// Returns OK and trusted [`Self`]. + /// + /// This is the most common version of [`Self`]. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let ok = ResponseBase::ok(); + /// assert_eq!(ok, ResponseBase { + /// status: Status::Ok, + /// untrusted: false, + /// }); + /// ``` + pub const fn ok() -> Self { + Self { + status: Status::Ok, + untrusted: false, + } + } + + /// Same as [`Self::ok`] but with [`Self::untrusted`] set to `true`. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let ok_untrusted = ResponseBase::ok_untrusted(); + /// assert_eq!(ok_untrusted, ResponseBase { + /// status: Status::Ok, + /// untrusted: true, + /// }); + /// ``` + pub const fn ok_untrusted() -> Self { + Self { + status: Status::Ok, + untrusted: true, + } + } +} + #[cfg(feature = "epee")] epee_object! { ResponseBase, @@ -80,6 +138,74 @@ pub struct AccessResponseBase { pub top_hash: String, } +impl AccessResponseBase { + /// Creates a new [`Self`] with default values. + /// + /// Since RPC payment is semi-deprecated, [`Self::credits`] + /// and [`Self::top_hash`] will always be set to the default + /// values. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let new = AccessResponseBase::new(ResponseBase::ok()); + /// assert_eq!(new, AccessResponseBase { + /// response_base: ResponseBase::ok(), + /// credits: 0, + /// top_hash: "".into(), + /// }); + /// ``` + pub const fn new(response_base: ResponseBase) -> Self { + Self { + response_base, + credits: 0, + top_hash: String::new(), + } + } + + /// Returns OK and trusted [`Self`]. + /// + /// This is the most common version of [`Self`]. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let ok = AccessResponseBase::ok(); + /// assert_eq!(ok, AccessResponseBase { + /// response_base: ResponseBase::ok(), + /// credits: 0, + /// top_hash: "".into(), + /// }); + /// ``` + pub const fn ok() -> Self { + Self { + response_base: ResponseBase::ok(), + credits: 0, + top_hash: String::new(), + } + } + + /// Same as [`Self::ok`] but with `untrusted` set to `true`. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let ok_untrusted = AccessResponseBase::ok_untrusted(); + /// assert_eq!(ok_untrusted, AccessResponseBase { + /// response_base: ResponseBase::ok_untrusted(), + /// credits: 0, + /// top_hash: "".into(), + /// }); + /// ``` + pub const fn ok_untrusted() -> Self { + Self { + response_base: ResponseBase::ok_untrusted(), + credits: 0, + top_hash: String::new(), + } + } +} + #[cfg(feature = "epee")] epee_object! { AccessResponseBase, diff --git a/rpc/types/src/defaults.rs b/rpc/types/src/defaults.rs index 9366a266..6addd0ab 100644 --- a/rpc/types/src/defaults.rs +++ b/rpc/types/src/defaults.rs @@ -53,6 +53,12 @@ pub(crate) fn default_zero>() -> T { T::from(0) } +/// Default `1` value used in request/response types. +#[inline] +pub(crate) fn default_one>() -> T { + T::from(1) +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index f4bca993..dd2e6483 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -5,15 +5,78 @@ //---------------------------------------------------------------------------------------------------- Import use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_height, default_string, default_vec, default_zero}, + defaults::{ + default_false, default_height, default_one, default_string, default_true, default_vec, + default_zero, + }, free::{is_one, is_zero}, macros::define_request_and_response, misc::{ - AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, HardforkEntry, - HistogramEntry, OutputDistributionData, SetBan, Span, Status, SyncInfoPeer, TxBacklogEntry, + AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, + GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, OutputDistributionData, SetBan, + Span, Status, SyncInfoPeer, TxBacklogEntry, }, }; +//---------------------------------------------------------------------------------------------------- Macro +/// Adds a (de)serialization doc-test to a type in `json.rs`. +/// +/// It expects a const string from `cuprate_test_utils::rpc::data` +/// and the expected value it should (de)serialize into/from. +/// +/// It tests that the provided const JSON string can properly +/// (de)serialize into the expected value. +/// +/// See below for example usage. This macro is only used in this file. +macro_rules! serde_doc_test { + ( + // `const` string from `cuprate_test_utils::rpc::data` + // v + $cuprate_test_utils_rpc_const:ident => $expected:expr + // ^ + // Expected value as an expression + ) => { + paste::paste! { + concat!( + "```rust\n", + "use cuprate_test_utils::rpc::data::json::*;\n", + "use cuprate_rpc_types::{misc::*, base::*, json::*};\n", + "use serde_json::{Value, from_str, from_value};\n", + "\n", + "// The expected data.\n", + "let expected = ", + stringify!($expected), + ";\n", + "\n", + "// Assert it can be turned into a JSON value.\n", + "let value = from_str::(", + stringify!($cuprate_test_utils_rpc_const), + ").unwrap();\n", + "let Value::Object(map) = value else {\n", + " panic!();\n", + "};\n", + "\n", + "// If a request...\n", + "if let Some(params) = map.get(\"params\") {\n", + " let response = from_value::<", + stringify!([<$cuprate_test_utils_rpc_const:camel>]), + ">(params.clone()).unwrap();\n", + " assert_eq!(response, expected);\n", + " return;\n", + "}\n", + "\n", + "// Else, if a response...\n", + "let result = map.get(\"result\").unwrap().clone();\n", + "let response = from_value::<", + stringify!([<$cuprate_test_utils_rpc_const:camel>]), + ">(result.clone()).unwrap();\n", + "assert_eq!(response, expected);\n", + "```\n", + ) + } + }; +} + //---------------------------------------------------------------------------------------------------- Definitions // This generates 2 structs: // @@ -41,7 +104,22 @@ define_request_and_response! { // // If there are any additional attributes (`/// docs` or `#[derive]`s) // for the struct, they go here, e.g.: - // #[derive(Copy)] + // + #[doc = serde_doc_test!( + // ^ This is a macro that adds a doc-test to this type. + // It is optional but it is added to nearly all types. + // The syntax is: + // `$const` => `$expected` + // where `$const` is a `const` string from + // `cuprate_test_utils::rpc::data` and `$expected` is an + // actual expression that the string _should_ (de)serialize into/from. + GET_BLOCK_TEMPLATE_REQUEST => GetBlockTemplateRequest { + extra_nonce: String::default(), + prev_block: String::default(), + reserve_size: 60, + wallet_address: "44GBHzv6ZyQdJkjqZje6KLZ3xSyN1hBSFAnLP6EAqJtCRVzMzZmeXTC2AHKDS9aEDTRKmo6a6o9r9j86pYfhCWDkKjbtcns".into(), + } + )] Request { // Within the `{}` is an infinite matching pattern of: // ``` @@ -66,17 +144,16 @@ define_request_and_response! { // // This is a HACK since `serde`'s default attribute only takes in // string literals and macros (stringify) within attributes do not work. - extra_nonce: String /* = default_expression, "default_literal" */, + extra_nonce: String = default_string(), "default_string", + prev_block: String = default_string(), "default_string", // Another optional expression: // This indicates to the macro to (de)serialize // this field as another type in epee. // // See `cuprate_epee_encoding::epee_object` for info. - prev_block: String /* as Type */, + reserve_size: u64 /* as Type */, - // Regular fields. - reserve_size: u64, wallet_address: String, }, @@ -92,6 +169,23 @@ define_request_and_response! { // "Flatten" means the field(s) of a struct gets inlined // directly into the struct during (de)serialization, see: // . + #[doc = serde_doc_test!( + GET_BLOCK_TEMPLATE_RESPONSE => GetBlockTemplateResponse { + base: ResponseBase::ok(), + blockhashing_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a00000000e0c20372be23d356347091025c5b5e8f2abf83ab618378565cce2b703491523401".into(), + blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(), + difficulty_top64: 0, + difficulty: 283305047039, + expected_reward: 600000000000, + height: 3195018, + next_seed_hash: "".into(), + prev_hash: "9d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a".into(), + reserved_offset: 131, + seed_hash: "e2aa0b7b55042cd48b02e395d78fa66a29815ccc1584e38db2d1f0e8485cd44f".into(), + seed_height: 3194880, + wide_difficulty: "0x41f64bf3ff".into(), + } + )] ResponseBase { // This is using [`crate::base::ResponseBase`], // so the type we generate will contain this field: @@ -131,6 +225,12 @@ define_request_and_response! { // type alias to `()` instead of a `struct`. Request {}, + #[doc = serde_doc_test!( + GET_BLOCK_COUNT_RESPONSE => GetBlockCountResponse { + base: ResponseBase::ok(), + count: 3195019, + } + )] ResponseBase { count: u64, } @@ -140,15 +240,14 @@ define_request_and_response! { on_get_block_hash, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 935..=939, + OnGetBlockHash, - /// ```rust - /// use serde_json::*; - /// use cuprate_rpc_types::json::*; - /// - /// let x = OnGetBlockHashRequest { block_height: [3] }; - /// let x = to_string(&x).unwrap(); - /// assert_eq!(x, "[3]"); - /// ``` + + #[doc = serde_doc_test!( + ON_GET_BLOCK_HASH_REQUEST => OnGetBlockHashRequest { + block_height: [912345], + } + )] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] #[derive(Copy)] @@ -157,14 +256,12 @@ define_request_and_response! { // it must be a 1 length array or else it will error. block_height: [u64; 1], }, - /// ```rust - /// use serde_json::*; - /// use cuprate_rpc_types::json::*; - /// - /// let x = OnGetBlockHashResponse { block_hash: String::from("asdf") }; - /// let x = to_string(&x).unwrap(); - /// assert_eq!(x, "\"asdf\""); - /// ``` + + #[doc = serde_doc_test!( + ON_GET_BLOCK_HASH_RESPONSE => OnGetBlockHashResponse { + block_hash: "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6".into(), + } + )] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] Response { @@ -176,15 +273,14 @@ define_request_and_response! { submit_block, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1114..=1128, + SubmitBlock, - /// ```rust - /// use serde_json::*; - /// use cuprate_rpc_types::json::*; - /// - /// let x = SubmitBlockRequest { block_blob: ["a".into()] }; - /// let x = to_string(&x).unwrap(); - /// assert_eq!(x, r#"["a"]"#); - /// ``` + + #[doc = serde_doc_test!( + SUBMIT_BLOCK_REQUEST => SubmitBlockRequest { + block_blob: ["0707e6bdfedc053771512f1bc27c62731ae9e8f2443db64ce742f4e57f5cf8d393de28551e441a0000000002fb830a01ffbf830a018cfe88bee283060274c0aae2ef5730e680308d9c00b6da59187ad0352efe3c71d36eeeb28782f29f2501bd56b952c3ddc3e350c2631d3a5086cac172c56893831228b17de296ff4669de020200000000".into()], + } + )] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] Request { @@ -192,6 +288,8 @@ define_request_and_response! { // it must be a 1 length array or else it will error. block_blob: [String; 1], }, + + // FIXME: `cuprate_test_utils` only has an `error` response for this. ResponseBase { block_id: String, } @@ -201,13 +299,31 @@ define_request_and_response! { generateblocks, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1130..=1161, + GenerateBlocks, + + #[doc = serde_doc_test!( + GENERATE_BLOCKS_REQUEST => GenerateBlocksRequest { + amount_of_blocks: 1, + prev_block: String::default(), + wallet_address: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A".into(), + starting_nonce: 0 + } + )] Request { amount_of_blocks: u64, - prev_block: String, + prev_block: String = default_string(), "default_string", starting_nonce: u32, wallet_address: String, }, + + #[doc = serde_doc_test!( + GENERATE_BLOCKS_RESPONSE => GenerateBlocksResponse { + base: ResponseBase::ok(), + blocks: vec!["49b712db7760e3728586f8434ee8bc8d7b3d410dac6bb6e98bf5845c83b917e4".into()], + height: 9783, + } + )] ResponseBase { blocks: Vec, height: u64, @@ -218,11 +334,43 @@ define_request_and_response! { get_last_block_header, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1214..=1238, + GetLastBlockHeader, + #[derive(Copy)] Request { fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_LAST_BLOCK_HEADER_RESPONSE => GetLastBlockHeaderResponse { + base: AccessResponseBase::ok(), + block_header: BlockHeader { + block_size: 200419, + block_weight: 200419, + cumulative_difficulty: 366125734645190820, + cumulative_difficulty_top64: 0, + depth: 0, + difficulty: 282052561854, + difficulty_top64: 0, + hash: "57238217820195ac4c08637a144a885491da167899cf1d20e8e7ce0ae0a3434e".into(), + height: 3195020, + long_term_weight: 200419, + major_version: 16, + miner_tx_hash: "7a42667237d4f79891bb407c49c712a9299fb87fce799833a7b633a3a9377dbd".into(), + minor_version: 16, + nonce: 1885649739, + num_txes: 37, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "22c72248ae9c5a2863c94735d710a3525c499f70707d1c2f395169bc5c8a0da3".into(), + reward: 615702960000, + timestamp: 1721245548, + wide_cumulative_difficulty: "0x514bd6a74a7d0a4".into(), + wide_difficulty: "0x41aba48bbe".into() + } + } + )] AccessResponseBase { block_header: BlockHeader, } @@ -233,14 +381,52 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1240..=1269, GetBlockHeaderByHash, + #[doc = serde_doc_test!( + GET_BLOCK_HEADER_BY_HASH_REQUEST => GetBlockHeaderByHashRequest { + hash: "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6".into(), + hashes: vec![], + fill_pow_hash: false, + } + )] Request { hash: String, - hashes: Vec, + hashes: Vec = default_vec::(), "default_vec", fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_BLOCK_HEADER_BY_HASH_RESPONSE => GetBlockHeaderByHashResponse { + base: AccessResponseBase::ok(), + block_headers: vec![], + block_header: BlockHeader { + block_size: 210, + block_weight: 210, + cumulative_difficulty: 754734824984346, + cumulative_difficulty_top64: 0, + depth: 2282676, + difficulty: 815625611, + difficulty_top64: 0, + hash: "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6".into(), + height: 912345, + long_term_weight: 210, + major_version: 1, + miner_tx_hash: "c7da3965f25c19b8eb7dd8db48dcd4e7c885e2491db77e289f0609bf8e08ec30".into(), + minor_version: 2, + nonce: 1646, + num_txes: 0, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "b61c58b2e0be53fad5ef9d9731a55e8a81d972b8d90ed07c04fd37ca6403ff78".into(), + reward: 7388968946286, + timestamp: 1452793716, + wide_cumulative_difficulty: "0x2ae6d65248f1a".into(), + wide_difficulty: "0x309d758b".into() + }, + } + )] AccessResponseBase { block_header: BlockHeader, - block_headers: Vec, + block_headers: Vec = default_vec::(), "default_vec", } } @@ -248,12 +434,50 @@ define_request_and_response! { get_block_header_by_height, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1271..=1296, + GetBlockHeaderByHeight, + #[derive(Copy)] + #[doc = serde_doc_test!( + GET_BLOCK_HEADER_BY_HEIGHT_REQUEST => GetBlockHeaderByHeightRequest { + height: 912345, + fill_pow_hash: false, + } + )] Request { height: u64, fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_BLOCK_HEADER_BY_HEIGHT_RESPONSE => GetBlockHeaderByHeightResponse { + base: AccessResponseBase::ok(), + block_header: BlockHeader { + block_size: 210, + block_weight: 210, + cumulative_difficulty: 754734824984346, + cumulative_difficulty_top64: 0, + depth: 2282677, + difficulty: 815625611, + difficulty_top64: 0, + hash: "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6".into(), + height: 912345, + long_term_weight: 210, + major_version: 1, + miner_tx_hash: "c7da3965f25c19b8eb7dd8db48dcd4e7c885e2491db77e289f0609bf8e08ec30".into(), + minor_version: 2, + nonce: 1646, + num_txes: 0, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "b61c58b2e0be53fad5ef9d9731a55e8a81d972b8d90ed07c04fd37ca6403ff78".into(), + reward: 7388968946286, + timestamp: 1452793716, + wide_cumulative_difficulty: "0x2ae6d65248f1a".into(), + wide_difficulty: "0x309d758b".into() + }, + } + )] AccessResponseBase { block_header: BlockHeader, } @@ -263,13 +487,78 @@ define_request_and_response! { get_block_headers_range, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1756..=1783, + GetBlockHeadersRange, + #[derive(Copy)] + #[doc = serde_doc_test!( + GET_BLOCK_HEADERS_RANGE_REQUEST => GetBlockHeadersRangeRequest { + start_height: 1545999, + end_height: 1546000, + fill_pow_hash: false, + } + )] Request { start_height: u64, end_height: u64, fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_BLOCK_HEADERS_RANGE_RESPONSE => GetBlockHeadersRangeResponse { + base: AccessResponseBase::ok(), + headers: vec![ + BlockHeader { + block_size: 301413, + block_weight: 301413, + cumulative_difficulty: 13185267971483472, + cumulative_difficulty_top64: 0, + depth: 1649024, + difficulty: 134636057921, + difficulty_top64: 0, + hash: "86d1d20a40cefcf3dd410ff6967e0491613b77bf73ea8f1bf2e335cf9cf7d57a".into(), + height: 1545999, + long_term_weight: 301413, + major_version: 6, + miner_tx_hash: "9909c6f8a5267f043c3b2b079fb4eacc49ef9c1dee1c028eeb1a259b95e6e1d9".into(), + minor_version: 6, + nonce: 3246403956, + num_txes: 20, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "0ef6e948f77b8f8806621003f5de24b1bcbea150bc0e376835aea099674a5db5".into(), + reward: 5025593029981, + timestamp: 1523002893, + wide_cumulative_difficulty: "0x2ed7ee6db56750".into(), + wide_difficulty: "0x1f58ef3541".into() + }, + BlockHeader { + block_size: 13322, + block_weight: 13322, + cumulative_difficulty: 13185402687569710, + cumulative_difficulty_top64: 0, + depth: 1649023, + difficulty: 134716086238, + difficulty_top64: 0, + hash: "b408bf4cfcd7de13e7e370c84b8314c85b24f0ba4093ca1d6eeb30b35e34e91a".into(), + height: 1546000, + long_term_weight: 13322, + major_version: 7, + miner_tx_hash: "7f749c7c64acb35ef427c7454c45e6688781fbead9bbf222cb12ad1a96a4e8f6".into(), + minor_version: 7, + nonce: 3737164176, + num_txes: 1, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "86d1d20a40cefcf3dd410ff6967e0491613b77bf73ea8f1bf2e335cf9cf7d57a".into(), + reward: 4851952181070, + timestamp: 1523002931, + wide_cumulative_difficulty: "0x2ed80dcb69bf2e".into(), + wide_difficulty: "0x1f5db457de".into() + } + ], + } + )] AccessResponseBase { headers: Vec, } @@ -280,6 +569,14 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1298..=1313, GetBlock, + + #[doc = serde_doc_test!( + GET_BLOCK_REQUEST => GetBlockRequest { + height: 2751506, + hash: String::default(), + fill_pow_hash: false, + } + )] Request { // `monerod` has both `hash` and `height` fields. // In the RPC handler, if `hash.is_empty()`, it will use it, else, it uses `height`. @@ -288,12 +585,46 @@ define_request_and_response! { height: u64 = default_height(), "default_height", fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_BLOCK_RESPONSE => GetBlockResponse { + base: AccessResponseBase::ok(), + blob: "1010c58bab9b06b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7807e07f502cef8a70101ff92f8a7010180e0a596bb1103d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85d034019f629d8b36bd16a2bfce3ea80c31dc4d8762c67165aec21845494e32b7582fe00211000000297a787a000000000000000000000000".into(), + block_header: BlockHeader { + block_size: 106, + block_weight: 106, + cumulative_difficulty: 236046001376524168, + cumulative_difficulty_top64: 0, + depth: 443517, + difficulty: 313732272488, + difficulty_top64: 0, + hash: "43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428".into(), + height: 2751506, + long_term_weight: 176470, + major_version: 16, + miner_tx_hash: "e49b854c5f339d7410a77f2a137281d8042a0ffc7ef9ab24cd670b67139b24cd".into(), + minor_version: 16, + nonce: 4110909056, + num_txes: 0, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7".into(), + reward: 600000000000, + timestamp: 1667941829, + wide_cumulative_difficulty: "0x3469a966eb2f788".into(), + wide_difficulty: "0x490be69168".into() + }, + json: "{\n \"major_version\": 16, \n \"minor_version\": 16, \n \"timestamp\": 1667941829, \n \"prev_id\": \"b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7\", \n \"nonce\": 4110909056, \n \"miner_tx\": {\n \"version\": 2, \n \"unlock_time\": 2751566, \n \"vin\": [ {\n \"gen\": {\n \"height\": 2751506\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 600000000000, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85\", \n \"view_tag\": \"d0\"\n }\n }\n }\n ], \n \"extra\": [ 1, 159, 98, 157, 139, 54, 189, 22, 162, 191, 206, 62, 168, 12, 49, 220, 77, 135, 98, 198, 113, 101, 174, 194, 24, 69, 73, 78, 50, 183, 88, 47, 224, 2, 17, 0, 0, 0, 41, 122, 120, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ], \n \"rct_signatures\": {\n \"type\": 0\n }\n }, \n \"tx_hashes\": [ ]\n}".into(), + miner_tx_hash: "e49b854c5f339d7410a77f2a137281d8042a0ffc7ef9ab24cd670b67139b24cd".into(), + tx_hashes: vec![], + } + )] AccessResponseBase { blob: String, block_header: BlockHeader, json: String, // FIXME: this should be defined in a struct, it has many fields. miner_tx_hash: String, - tx_hashes: Vec, + tx_hashes: Vec = default_vec::(), "default_vec", } } @@ -301,8 +632,72 @@ define_request_and_response! { get_connections, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1734..=1754, + GetConnections, + Request {}, + + #[doc = serde_doc_test!( + GET_CONNECTIONS_RESPONSE => GetConnectionsResponse { + base: ResponseBase::ok(), + connections: vec![ + ConnectionInfo { + address: "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion:18083".into(), + address_type: 4, + avg_download: 0, + avg_upload: 0, + connection_id: "22ef856d0f1d44cc95e84fecfd065fe2".into(), + current_download: 0, + current_upload: 0, + height: 3195026, + host: "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion".into(), + incoming: false, + ip: "".into(), + live_time: 76651, + local_ip: false, + localhost: false, + peer_id: "0000000000000001".into(), + port: "".into(), + pruning_seed: 0, + recv_count: 240328, + recv_idle_time: 34, + rpc_credits_per_hash: 0, + rpc_port: 0, + send_count: 3406572, + send_idle_time: 30, + state: "normal".into(), + support_flags: 0 + }, + ConnectionInfo { + address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), + address_type: 4, + avg_download: 0, + avg_upload: 0, + connection_id: "c7734e15936f485a86d2b0534f87e499".into(), + current_download: 0, + current_upload: 0, + height: 3195024, + host: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion".into(), + incoming: false, + ip: "".into(), + live_time: 76755, + local_ip: false, + localhost: false, + peer_id: "0000000000000001".into(), + port: "".into(), + pruning_seed: 389, + recv_count: 237657, + recv_idle_time: 120, + rpc_credits_per_hash: 0, + rpc_port: 0, + send_count: 3370566, + send_idle_time: 120, + state: "normal".into(), + support_flags: 0 + } + ], + } + )] ResponseBase { // FIXME: This is a `std::list` in `monerod` because...? connections: Vec, @@ -315,6 +710,51 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 693..=789, GetInfo, Request {}, + + #[doc = serde_doc_test!( + GET_INFO_RESPONSE => GetInfoResponse { + base: AccessResponseBase::ok(), + adjusted_time: 1721245289, + alt_blocks_count: 16, + block_size_limit: 600000, + block_size_median: 300000, + block_weight_limit: 600000, + block_weight_median: 300000, + bootstrap_daemon_address: "".into(), + busy_syncing: false, + cumulative_difficulty: 366127702242611947, + cumulative_difficulty_top64: 0, + database_size: 235169075200, + difficulty: 280716748706, + difficulty_top64: 0, + free_space: 30521749504, + grey_peerlist_size: 4996, + height: 3195028, + height_without_bootstrap: 3195028, + incoming_connections_count: 62, + mainnet: true, + nettype: "mainnet".into(), + offline: false, + outgoing_connections_count: 1143, + restricted: false, + rpc_connections_count: 1, + stagenet: false, + start_time: 1720462427, + synchronized: true, + target: 120, + target_height: 0, + testnet: false, + top_block_hash: "bdf06d18ed1931a8ee62654e9b6478cc459bc7072628b8e36f4524d339552946".into(), + tx_count: 43205750, + tx_pool_size: 12, + update_available: false, + version: "0.18.3.3-release".into(), + was_bootstrap_ever_used: false, + white_peerlist_size: 1000, + wide_cumulative_difficulty: "0x514bf349299d2eb".into(), + wide_difficulty: "0x415c05a7a2".into() + } + )] AccessResponseBase { adjusted_time: u64, alt_blocks_count: u64, @@ -364,6 +804,20 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1958..=1995, HardForkInfo, Request {}, + + #[doc = serde_doc_test!( + HARD_FORK_INFO_RESPONSE => HardForkInfoResponse { + base: AccessResponseBase::ok(), + earliest_height: 2689608, + enabled: true, + state: 0, + threshold: 0, + version: 16, + votes: 10080, + voting: 16, + window: 10080 + } + )] AccessResponseBase { earliest_height: u64, enabled: bool, @@ -381,9 +835,26 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2032..=2067, SetBans, + + #[doc = serde_doc_test!( + SET_BANS_REQUEST => SetBansRequest { + bans: vec![ SetBan { + host: "192.168.1.51".into(), + ip: 0, + ban: true, + seconds: 30 + }] + } + )] Request { bans: Vec, }, + + #[doc = serde_doc_test!( + SET_BANS_RESPONSE => SetBansResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -393,6 +864,24 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1997..=2030, GetBans, Request {}, + + #[doc = serde_doc_test!( + GET_BANS_RESPONSE => GetBansResponse { + base: ResponseBase::ok(), + bans: vec![ + GetBan { + host: "104.248.206.131".into(), + ip: 2211379304, + seconds: 689754 + }, + GetBan { + host: "209.222.252.0/24".into(), + ip: 0, + seconds: 689754 + } + ] + } + )] ResponseBase { bans: Vec, } @@ -403,11 +892,23 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2069..=2094, Banned, - #[cfg_attr(feature = "serde", serde(transparent))] - #[repr(transparent)] + + #[doc = serde_doc_test!( + BANNED_REQUEST => BannedRequest { + address: "95.216.203.255".into(), + } + )] Request { address: String, }, + + #[doc = serde_doc_test!( + BANNED_RESPONSE => BannedResponse { + banned: true, + seconds: 689655, + status: Status::Ok, + } + )] Response { banned: bool, seconds: u32, @@ -420,10 +921,21 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2096..=2116, FlushTransactionPool, + + #[doc = serde_doc_test!( + FLUSH_TRANSACTION_POOL_REQUEST => FlushTransactionPoolRequest { + txids: vec!["dc16fa8eaffe1484ca9014ea050e13131d3acf23b419f33bb4cc0b32b6c49308".into()], + } + )] Request { txids: Vec = default_vec::(), "default_vec", }, - #[cfg_attr(feature = "serde", serde(transparent))] + + #[doc = serde_doc_test!( + FLUSH_TRANSACTION_POOL_RESPONSE => FlushTransactionPoolResponse { + status: Status::Ok, + } + )] #[repr(transparent)] Response { status: Status, @@ -435,13 +947,35 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2118..=2168, GetOutputHistogram, + + #[doc = serde_doc_test!( + GET_OUTPUT_HISTOGRAM_REQUEST => GetOutputHistogramRequest { + amounts: vec![20000000000], + min_count: 0, + max_count: 0, + unlocked: false, + recent_cutoff: 0, + } + )] Request { amounts: Vec, - min_count: u64, - max_count: u64, - unlocked: bool, - recent_cutoff: u64, + min_count: u64 = default_zero::(), "default_zero", + max_count: u64 = default_zero::(), "default_zero", + unlocked: bool = default_false(), "default_false", + recent_cutoff: u64 = default_zero::(), "default_zero", }, + + #[doc = serde_doc_test!( + GET_OUTPUT_HISTOGRAM_RESPONSE => GetOutputHistogramResponse { + base: AccessResponseBase::ok(), + histogram: vec![HistogramEntry { + amount: 20000000000, + recent_instances: 0, + total_instances: 381490, + unlocked_instances: 0 + }] + } + )] AccessResponseBase { histogram: Vec, } @@ -451,11 +985,31 @@ define_request_and_response! { get_coinbase_tx_sum, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2213..=2248, + GetCoinbaseTxSum, + + #[doc = serde_doc_test!( + GET_COINBASE_TX_SUM_REQUEST => GetCoinbaseTxSumRequest { + height: 1563078, + count: 2 + } + )] Request { height: u64, count: u64, }, + + #[doc = serde_doc_test!( + GET_COINBASE_TX_SUM_RESPONSE => GetCoinbaseTxSumResponse { + base: AccessResponseBase::ok(), + emission_amount: 9387854817320, + emission_amount_top64: 0, + fee_amount: 83981380000, + fee_amount_top64: 0, + wide_emission_amount: "0x889c7c06828".into(), + wide_fee_amount: "0x138dae29a0".into() + } + )] AccessResponseBase { emission_amount: u64, emission_amount_top64: u64, @@ -470,16 +1024,90 @@ define_request_and_response! { get_version, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2170..=2211, + GetVersion, Request {}, + + #[doc = serde_doc_test!( + GET_VERSION_RESPONSE => GetVersionResponse { + base: ResponseBase::ok(), + current_height: 3195051, + hard_forks: vec![ + HardforkEntry { + height: 1, + hf_version: 1 + }, + HardforkEntry { + height: 1009827, + hf_version: 2 + }, + HardforkEntry { + height: 1141317, + hf_version: 3 + }, + HardforkEntry { + height: 1220516, + hf_version: 4 + }, + HardforkEntry { + height: 1288616, + hf_version: 5 + }, + HardforkEntry { + height: 1400000, + hf_version: 6 + }, + HardforkEntry { + height: 1546000, + hf_version: 7 + }, + HardforkEntry { + height: 1685555, + hf_version: 8 + }, + HardforkEntry { + height: 1686275, + hf_version: 9 + }, + HardforkEntry { + height: 1788000, + hf_version: 10 + }, + HardforkEntry { + height: 1788720, + hf_version: 11 + }, + HardforkEntry { + height: 1978433, + hf_version: 12 + }, + HardforkEntry { + height: 2210000, + hf_version: 13 + }, + HardforkEntry { + height: 2210720, + hf_version: 14 + }, + HardforkEntry { + height: 2688888, + hf_version: 15 + }, + HardforkEntry { + height: 2689608, + hf_version: 16 + } + ], + release: true, + version: 196621, + target_height: 0, + } + )] ResponseBase { version: u32, release: bool, - #[serde(skip_serializing_if = "is_zero")] current_height: u64 = default_zero::(), "default_zero", - #[serde(skip_serializing_if = "is_zero")] target_height: u64 = default_zero::(), "default_zero", - #[serde(skip_serializing_if = "Vec::is_empty")] hard_forks: Vec = default_vec(), "default_vec", } } @@ -490,11 +1118,19 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 2250..=2277, GetFeeEstimate, Request {}, + + #[doc = serde_doc_test!( + GET_FEE_ESTIMATE_RESPONSE => GetFeeEstimateResponse { + base: AccessResponseBase::ok(), + fee: 20000, + fees: vec![20000,80000,320000,4000000], + quantization_mask: 10000, + } + )] AccessResponseBase { fee: u64, fees: Vec, - #[serde(skip_serializing_if = "is_one")] - quantization_mask: u64, + quantization_mask: u64 = default_one::(), "default_one", } } @@ -504,6 +1140,34 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 2279..=2310, GetAlternateChains, Request {}, + + #[doc = serde_doc_test!( + GET_ALTERNATE_CHAINS_RESPONSE => GetAlternateChainsResponse { + base: ResponseBase::ok(), + chains: vec![ + ChainInfo { + block_hash: "4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce".into(), + block_hashes: vec!["4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce".into()], + difficulty: 357404825113208373, + difficulty_top64: 0, + height: 3167471, + length: 1, + main_chain_parent_block: "69b5075ea627d6ba06b1c30b7e023884eeaef5282cf58ec847dab838ddbcdd86".into(), + wide_difficulty: "0x4f5c1cb79e22635".into(), + }, + ChainInfo { + block_hash: "33ee476f5a1c5b9d889274cbbe171f5e0112df7ed69021918042525485deb401".into(), + block_hashes: vec!["33ee476f5a1c5b9d889274cbbe171f5e0112df7ed69021918042525485deb401".into()], + difficulty: 354736121711617293, + difficulty_top64: 0, + height: 3157465, + length: 1, + main_chain_parent_block: "fd522fcc4cefe5c8c0e5c5600981b3151772c285df3a4e38e5c4011cf466d2cb".into(), + wide_difficulty: "0x4ec469f8b9ee50d".into(), + } + ], + } + )] ResponseBase { chains: Vec, } @@ -513,11 +1177,23 @@ define_request_and_response! { relay_tx, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2361..=2381, + RelayTx, + + #[doc = serde_doc_test!( + RELAY_TX_REQUEST => RelayTxRequest { + txids: vec!["9fd75c429cbe52da9a52f2ffc5fbd107fe7fd2099c0d8de274dc8a67e0c98613".into()] + } + )] Request { txids: Vec, }, - #[cfg_attr(feature = "serde", serde(transparent))] + + #[doc = serde_doc_test!( + RELAY_TX_RESPONSE => RelayTxResponse { + status: Status::Ok, + } + )] #[repr(transparent)] Response { status: Status, @@ -528,16 +1204,88 @@ define_request_and_response! { sync_info, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2383..=2443, + SyncInfo, Request {}, + + #[doc = serde_doc_test!( + SYNC_INFO_RESPONSE => SyncInfoResponse { + base: AccessResponseBase::ok(), + height: 3195157, + next_needed_pruning_seed: 0, + overview: "[]".into(), + spans: vec![], + peers: vec![ + SyncInfoPeer { + info: ConnectionInfo { + address: "142.93.128.65:44986".into(), + address_type: 1, + avg_download: 1, + avg_upload: 1, + connection_id: "a5803c4c2dac49e7b201dccdef54c862".into(), + current_download: 2, + current_upload: 1, + height: 3195157, + host: "142.93.128.65".into(), + incoming: true, + ip: "142.93.128.65".into(), + live_time: 18, + local_ip: false, + localhost: false, + peer_id: "6830e9764d3e5687".into(), + port: "44986".into(), + pruning_seed: 0, + recv_count: 20340, + recv_idle_time: 0, + rpc_credits_per_hash: 0, + rpc_port: 18089, + send_count: 32235, + send_idle_time: 6, + state: "normal".into(), + support_flags: 1 + } + }, + SyncInfoPeer { + info: ConnectionInfo { + address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), + address_type: 4, + avg_download: 0, + avg_upload: 0, + connection_id: "277f7c821bc546878c8bd29977e780f5".into(), + current_download: 0, + current_upload: 0, + height: 3195157, + host: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion".into(), + incoming: false, + ip: "".into(), + live_time: 2246, + local_ip: false, + localhost: false, + peer_id: "0000000000000001".into(), + port: "".into(), + pruning_seed: 389, + recv_count: 65164, + recv_idle_time: 15, + rpc_credits_per_hash: 0, + rpc_port: 0, + send_count: 99120, + send_idle_time: 15, + state: "normal".into(), + support_flags: 0 + } + } + ], + target_height: 0, + } + )] AccessResponseBase { height: u64, next_needed_pruning_seed: u32, overview: String, // FIXME: This is a `std::list` in `monerod` because...? - peers: Vec, + peers: Vec = default_vec::(), "default_vec", // FIXME: This is a `std::list` in `monerod` because...? - spans: Vec, + spans: Vec = default_vec::(), "default_vec", target_height: u64, } } @@ -548,6 +1296,14 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1637..=1664, GetTransactionPoolBacklog, Request {}, + + // TODO: enable test after binary string impl. + // #[doc = serde_doc_test!( + // GET_TRANSACTION_POOL_BACKLOG_RESPONSE => GetTransactionPoolBacklogResponse { + // base: ResponseBase::ok(), + // backlog: "...Binary...".into(), + // } + // )] ResponseBase { // TODO: this is a [`BinaryString`]. backlog: Vec, @@ -558,18 +1314,44 @@ define_request_and_response! { get_output_distribution, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2445..=2520, + /// This type is also used in the (undocumented) /// [`/get_output_distribution.bin`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server.h#L138) /// binary endpoint. GetOutputDistribution, + + #[doc = serde_doc_test!( + GET_OUTPUT_DISTRIBUTION_REQUEST => GetOutputDistributionRequest { + amounts: vec![628780000], + from_height: 1462078, + binary: true, + compress: false, + cumulative: false, + to_height: 0, + } + )] Request { amounts: Vec, - binary: bool, - compress: bool, - cumulative: bool, - from_height: u64, - to_height: u64, + binary: bool = default_true(), "default_true", + compress: bool = default_false(), "default_false", + cumulative: bool = default_false(), "default_false", + from_height: u64 = default_zero::(), "default_zero", + to_height: u64 = default_zero::(), "default_zero", }, + + // TODO: enable test after binary string impl. + // #[doc = serde_doc_test!( + // GET_OUTPUT_DISTRIBUTION_RESPONSE => GetOutputDistributionResponse { + // base: AccessResponseBase::ok(), + // distributions: vec![Distribution::Uncompressed(DistributionUncompressed { + // start_height: 1462078, + // base: 0, + // distribution: vec![], + // amount: 2628780000, + // binary: true, + // })], + // } + // )] AccessResponseBase { distributions: Vec, } @@ -581,6 +1363,31 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 996..=1044, GetMinerData, Request {}, + + #[doc = serde_doc_test!( + GET_MINER_DATA_RESPONSE => GetMinerDataResponse { + base: ResponseBase::ok(), + already_generated_coins: 18186022843595960691, + difficulty: "0x48afae42de".into(), + height: 2731375, + major_version: 16, + median_weight: 300000, + prev_id: "78d50c5894d187c4946d54410990ca59a75017628174a9e8c7055fa4ca5c7c6d".into(), + seed_hash: "a6b869d50eca3a43ec26fe4c369859cf36ae37ce6ecb76457d31ffeb8a6ca8a6".into(), + tx_backlog: vec![ + GetMinerDataTxBacklogEntry { + fee: 30700000, + id: "9868490d6bb9207fdd9cf17ca1f6c791b92ca97de0365855ea5c089f67c22208".into(), + weight: 1535 + }, + GetMinerDataTxBacklogEntry { + fee: 44280000, + id: "b6000b02bbec71e18ad704bcae09fb6e5ae86d897ced14a718753e76e86c0a0a".into(), + weight: 2214 + }, + ], + } + )] ResponseBase { major_version: u8, height: u64, @@ -589,6 +1396,7 @@ define_request_and_response! { difficulty: String, median_weight: u64, already_generated_coins: u64, + tx_backlog: Vec, } } @@ -596,11 +1404,26 @@ define_request_and_response! { prune_blockchain, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2747..=2772, + PruneBlockchain, + #[derive(Copy)] + #[doc = serde_doc_test!( + PRUNE_BLOCKCHAIN_REQUEST => PruneBlockchainRequest { + check: true + } + )] Request { check: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + PRUNE_BLOCKCHAIN_RESPONSE => PruneBlockchainResponse { + base: ResponseBase::ok(), + pruned: true, + pruning_seed: 387, + } + )] ResponseBase { pruned: bool, pruning_seed: u32, @@ -611,13 +1434,29 @@ define_request_and_response! { calc_pow, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1046..=1066, + CalcPow, + + #[doc = serde_doc_test!( + CALC_POW_REQUEST => CalcPowRequest { + major_version: 14, + height: 2286447, + block_blob: "0e0ed286da8006ecdc1aab3033cf1716c52f13f9d8ae0051615a2453643de94643b550d543becd0000000002abc78b0101ffefc68b0101fcfcf0d4b422025014bb4a1eade6622fd781cb1063381cad396efa69719b41aa28b4fce8c7ad4b5f019ce1dc670456b24a5e03c2d9058a2df10fec779e2579753b1847b74ee644f16b023c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000051399a1bc46a846474f5b33db24eae173a26393b976054ee14f9feefe99925233802867097564c9db7a36af5bb5ed33ab46e63092bd8d32cef121608c3258edd55562812e21cc7e3ac73045745a72f7d74581d9a0849d6f30e8b2923171253e864f4e9ddea3acb5bc755f1c4a878130a70c26297540bc0b7a57affb6b35c1f03d8dbd54ece8457531f8cba15bb74516779c01193e212050423020e45aa2c15dcb".into(), + seed_hash: "d432f499205150873b2572b5f033c9c6e4b7c6f3394bd2dd93822cd7085e7307".into(), + } + )] Request { major_version: u8, height: u64, block_blob: String, seed_hash: String, }, + + #[doc = serde_doc_test!( + CALC_POW_RESPONSE => CalcPowResponse { + pow_hash: "d0402d6834e26fb94a9ce38c6424d27d2069896a9b8b1ce685d79936bca6e0a8".into(), + } + )] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] Response { @@ -629,12 +1468,26 @@ define_request_and_response! { flush_cache, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2774..=2796, + FlushCache, + #[derive(Copy)] + #[doc = serde_doc_test!( + FLUSH_CACHE_REQUEST => FlushCacheRequest { + bad_txs: true, + bad_blocks: true + } + )] Request { bad_txs: bool = default_false(), "default_false", bad_blocks: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + FLUSH_CACHE_RESPONSE => FlushCacheResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -642,11 +1495,36 @@ define_request_and_response! { add_aux_pow, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1068..=1112, + AddAuxPow, + + #[doc = serde_doc_test!( + ADD_AUX_POW_REQUEST => AddAuxPowRequest { + blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(), + aux_pow: vec![AuxPow { + id: "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8".into(), + hash: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into() + }] + } + )] Request { blocktemplate_blob: String, aux_pow: Vec, }, + + #[doc = serde_doc_test!( + ADD_AUX_POW_RESPONSE => AddAuxPowResponse { + base: ResponseBase::ok(), + aux_pow: vec![AuxPow { + hash: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into(), + id: "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8".into(), + }], + blockhashing_blob: "1010ee97e2a106e9f8ebe8887e5b609949ac8ea6143e560ed13552b110cb009b21f0cfca1eaccf00000000b2685c1283a646bc9020c758daa443be145b7370ce5a6efacb3e614117032e2c22".into(), + blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(), + merkle_root: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into(), + merkle_tree_depth: 0, + } + )] ResponseBase { blocktemplate_blob: String, blockhashing_blob: String, diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 4643ecc5..2b31cabf 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -22,7 +22,7 @@ use crate::{ CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, CORE_RPC_STATUS_PAYMENT_REQUIRED, }, - defaults::default_zero, + defaults::{default_string, default_zero}, macros::monero_definition_link, }; @@ -51,9 +51,9 @@ macro_rules! define_struct_and_impl_epee { )* } ) => { - $( #[$struct_attr] )* #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + $( #[$struct_attr] )* pub struct $struct_name { $( $( #[$field_attr] )* @@ -142,7 +142,9 @@ define_struct_and_impl_epee! { rpc_port: u16, send_count: u64, send_idle_time: u64, - ssl: bool, + // Exists in the original definition, but isn't + // used or (de)serialized for RPC purposes. + // ssl: bool, state: String, support_flags: u32, } @@ -156,7 +158,9 @@ define_struct_and_impl_epee! { )] /// Used in [`crate::json::SetBansRequest`]. SetBan { + #[cfg_attr(feature = "serde", serde(default = "default_string"))] host: String, + #[cfg_attr(feature = "serde", serde(default = "default_zero"))] ip: u32, ban: bool, seconds: u32, diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 41530cba..c1407778 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -5,7 +5,7 @@ //---------------------------------------------------------------------------------------------------- Import use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_string, default_true}, + defaults::{default_false, default_string, default_true, default_vec, default_zero}, macros::define_request_and_response, misc::{ GetOutputsOut, KeyImageSpentStatus, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, @@ -13,6 +13,81 @@ use crate::{ }, }; +//---------------------------------------------------------------------------------------------------- Macro +/// Adds a (de)serialization doc-test to a type in `other.rs`. +/// +/// It expects a const string from `cuprate_test_utils::rpc::data` +/// and the expected value it should (de)serialize into/from. +/// +/// It tests that the provided const JSON string can properly +/// (de)serialize into the expected value. +/// +/// See below for example usage. This macro is only used in this file. +macro_rules! serde_doc_test { + // This branch _only_ tests that the type can be deserialize + // from the string, not that any value is correct. + // + // Practically, this is used for structs that have + // many values that are complicated to test, e.g. `GET_TRANSACTIONS_RESPONSE`. + // + // HACK: + // The type itself doesn't need to be specified because it happens + // to just be the `CamelCase` version of the provided const. + ( + // `const` string from `cuprate_test_utils::rpc::data`. + $cuprate_test_utils_rpc_const:ident + ) => { + paste::paste! { + concat!( + "```rust\n", + "use cuprate_test_utils::rpc::data::other::*;\n", + "use cuprate_rpc_types::{misc::*, base::*, other::*};\n", + "use serde_json::{Value, from_str, from_value};\n", + "\n", + "let string = from_str::<", + stringify!([<$cuprate_test_utils_rpc_const:camel>]), + ">(", + stringify!($cuprate_test_utils_rpc_const), + ").unwrap();\n", + "```\n", + ) + } + }; + + // This branch tests that the type can be deserialize + // from the string AND that values are correct. + ( + // `const` string from `cuprate_test_utils::rpc::data` + // v + $cuprate_test_utils_rpc_const:ident => $expected:expr + // ^ + // Expected value as an expression + ) => { + paste::paste! { + concat!( + "```rust\n", + "use cuprate_test_utils::rpc::data::other::*;\n", + "use cuprate_rpc_types::{misc::*, base::*, other::*};\n", + "use serde_json::{Value, from_str, from_value};\n", + "\n", + "// The expected data.\n", + "let expected = ", + stringify!($expected), + ";\n", + "\n", + "let string = from_str::<", + stringify!([<$cuprate_test_utils_rpc_const:camel>]), + ">(", + stringify!($cuprate_test_utils_rpc_const), + ").unwrap();\n", + "\n", + "assert_eq!(string, expected);\n", + "```\n", + ) + } + }; +} + //---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { get_height, @@ -20,6 +95,14 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 138..=160, GetHeight, Request {}, + + #[doc = serde_doc_test!( + GET_HEIGHT_RESPONSE => GetHeightResponse { + base: ResponseBase::ok(), + hash: "68bb1a1cff8e2a44c3221e8e1aff80bc6ca45d06fa8eff4d2a3a7ac31d4efe3f".into(), + height: 3195160, + } + )] ResponseBase { hash: String, height: u64, @@ -31,6 +114,15 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 370..=451, GetTransactions, + + #[doc = serde_doc_test!( + GET_TRANSACTIONS_REQUEST => GetTransactionsRequest { + txs_hashes: vec!["d6e48158472848e6687173a91ae6eebfa3e1d778e65252ee99d7515d63090408".into()], + decode_as_json: false, + prune: false, + split: false, + } + )] Request { txs_hashes: Vec, // FIXME: this is documented as optional but it isn't serialized as an optional @@ -40,11 +132,13 @@ define_request_and_response! { prune: bool = default_false(), "default_false", split: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!(GET_TRANSACTIONS_RESPONSE)] AccessResponseBase { - txs_as_hex: Vec, - txs_as_json: Vec, - missed_tx: Vec, - txs: Vec, + txs_as_hex: Vec = default_vec::(), "default_vec", + txs_as_json: Vec = default_vec::(), "default_vec", + missed_tx: Vec = default_vec::(), "default_vec", + txs: Vec = default_vec::(), "default_vec", } } @@ -54,6 +148,13 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 288..=308, GetAltBlocksHashes, Request {}, + + #[doc = serde_doc_test!( + GET_ALT_BLOCKS_HASHES_RESPONSE => GetAltBlocksHashesResponse { + base: AccessResponseBase::ok(), + blks_hashes: vec!["8ee10db35b1baf943f201b303890a29e7d45437bd76c2bd4df0d2f2ee34be109".into()], + } + )] AccessResponseBase { blks_hashes: Vec, } @@ -63,10 +164,27 @@ define_request_and_response! { is_key_image_spent, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 454..=484, + IsKeyImageSpent, + + #[doc = serde_doc_test!( + IS_KEY_IMAGE_SPENT_REQUEST => IsKeyImageSpentRequest { + key_images: vec![ + "8d1bd8181bf7d857bdb281e0153d84cd55a3fcaa57c3e570f4a49f935850b5e3".into(), + "7319134bfc50668251f5b899c66b005805ee255c136f0e1cecbb0f3a912e09d4".into() + ] + } + )] Request { key_images: Vec, }, + + #[doc = serde_doc_test!( + IS_KEY_IMAGE_SPENT_RESPONSE => IsKeyImageSpentResponse { + base: AccessResponseBase::ok(), + spent_status: vec![1, 1], + } + )] AccessResponseBase { /// FIXME: These are [`KeyImageSpentStatus`] in [`u8`] form. spent_status: Vec, @@ -77,19 +195,54 @@ define_request_and_response! { send_raw_transaction, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 370..=451, + SendRawTransaction, + + #[doc = serde_doc_test!( + SEND_RAW_TRANSACTION_REQUEST => SendRawTransactionRequest { + tx_as_hex: "dc16fa8eaffe1484ca9014ea050e13131d3acf23b419f33bb4cc0b32b6c49308".into(), + do_not_relay: false, + do_sanity_checks: true, + } + )] Request { tx_as_hex: String, do_not_relay: bool = default_false(), "default_false", do_sanity_checks: bool = default_true(), "default_true", }, + + #[doc = serde_doc_test!( + SEND_RAW_TRANSACTION_RESPONSE => SendRawTransactionResponse { + base: AccessResponseBase { + response_base: ResponseBase { + status: Status::Other("Failed".into()), + untrusted: false, + }, + credits: 0, + top_hash: "".into(), + }, + double_spend: false, + fee_too_low: false, + invalid_input: false, + invalid_output: false, + low_mixin: false, + not_relayed: false, + overspend: false, + reason: "".into(), + sanity_check_failed: false, + too_big: false, + too_few_outputs: false, + tx_extra_too_big: false, + nonzero_unlock_time: false, + } + )] AccessResponseBase { double_spend: bool, fee_too_low: bool, invalid_input: bool, invalid_output: bool, low_mixin: bool, - nonzero_unlock_time: bool, + nonzero_unlock_time: bool = default_false(), "default_false", not_relayed: bool, overspend: bool, reason: String, @@ -104,13 +257,29 @@ define_request_and_response! { start_mining, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 665..=691, + StartMining, + + #[doc = serde_doc_test!( + START_MINING_REQUEST => StartMiningRequest { + do_background_mining: false, + ignore_battery: true, + miner_address: "47xu3gQpF569au9C2ajo5SSMrWji6xnoE5vhr94EzFRaKAGw6hEGFXYAwVADKuRpzsjiU1PtmaVgcjUJF89ghGPhUXkndHc".into(), + threads_count: 1 + } + )] Request { miner_address: String, threads_count: u64, do_background_mining: bool, ignore_battery: bool, }, + + #[doc = serde_doc_test!( + START_MINING_RESPONSE => StartMiningResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -120,6 +289,12 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 825..=843, StopMining, Request {}, + + #[doc = serde_doc_test!( + STOP_MINING_RESPONSE => StopMiningResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -129,6 +304,27 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 846..=895, MiningStatus, Request {}, + + #[doc = serde_doc_test!( + MINING_STATUS_RESPONSE => MiningStatusResponse { + base: ResponseBase::ok(), + active: false, + address: "".into(), + bg_idle_threshold: 0, + bg_ignore_battery: false, + bg_min_idle_seconds: 0, + bg_target: 0, + block_reward: 0, + block_target: 120, + difficulty: 292022797663, + difficulty_top64: 0, + is_background_mining_enabled: false, + pow_algorithm: "RandomX".into(), + speed: 0, + threads_count: 0, + wide_difficulty: "0x43fdea455f".into(), + } + )] ResponseBase { active: bool, address: String, @@ -154,6 +350,12 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 898..=916, SaveBc, Request {}, + + #[doc = serde_doc_test!( + SAVE_BC_RESPONSE => SaveBcResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -161,11 +363,79 @@ define_request_and_response! { get_peer_list, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1369..=1417, + GetPeerList, + + #[doc = serde_doc_test!( + GET_PEER_LIST_REQUEST => GetPeerListRequest { + public_only: true, + include_blocked: false, + } + )] Request { public_only: bool = default_true(), "default_true", include_blocked: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_PEER_LIST_RESPONSE => GetPeerListResponse { + base: ResponseBase::ok(), + gray_list: vec![ + Peer { + host: "161.97.193.0".into(), + id: 18269586253849566614, + ip: 12673441, + last_seen: 0, + port: 18080, + rpc_port: 0, + rpc_credits_per_hash: 0, + pruning_seed: 0, + }, + Peer { + host: "193.142.4.2".into(), + id: 10865563782170056467, + ip: 33853121, + last_seen: 0, + port: 18085, + pruning_seed: 387, + rpc_port: 19085, + rpc_credits_per_hash: 0, + } + ], + white_list: vec![ + Peer { + host: "78.27.98.0".into(), + id: 11368279936682035606, + ip: 6429518, + last_seen: 1721246387, + port: 18080, + pruning_seed: 384, + rpc_port: 0, + rpc_credits_per_hash: 0, + }, + Peer { + host: "67.4.163.2".into(), + id: 16545113262826842499, + ip: 44237891, + last_seen: 1721246387, + port: 18080, + rpc_port: 0, + rpc_credits_per_hash: 0, + pruning_seed: 0, + }, + Peer { + host: "70.52.75.3".into(), + id: 3863337548778177169, + ip: 55260230, + last_seen: 1721246387, + port: 18080, + rpc_port: 18081, + rpc_credits_per_hash: 0, + pruning_seed: 0, + } + ] + } + )] ResponseBase { white_list: Vec, gray_list: Vec, @@ -177,10 +447,22 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1450..=1470, SetLogHashRate, + #[derive(Copy)] + #[doc = serde_doc_test!( + SET_LOG_HASH_RATE_REQUEST => SetLogHashRateRequest { + visible: true, + } + )] Request { - visible: bool, + visible: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + SET_LOG_HASH_RATE_RESPONSE => SetLogHashRateResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -188,11 +470,24 @@ define_request_and_response! { set_log_level, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1450..=1470, + SetLogLevel, + #[derive(Copy)] + #[doc = serde_doc_test!( + SET_LOG_LEVEL_REQUEST => SetLogLevelRequest { + level: 1 + } + )] Request { level: u8, }, + + #[doc = serde_doc_test!( + SET_LOG_LEVEL_RESPONSE => SetLogLevelResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -200,10 +495,24 @@ define_request_and_response! { set_log_categories, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1494..=1517, + SetLogCategories, + + #[doc = serde_doc_test!( + SET_LOG_CATEGORIES_REQUEST => SetLogCategoriesRequest { + categories: "*:INFO".into(), + } + )] Request { categories: String = default_string(), "default_string", }, + + #[doc = serde_doc_test!( + SET_LOG_CATEGORIES_RESPONSE => SetLogCategoriesResponse { + base: ResponseBase::ok(), + categories: "*:INFO".into(), + } + )] ResponseBase { categories: String, } @@ -213,13 +522,29 @@ define_request_and_response! { set_bootstrap_daemon, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1785..=1812, + SetBootstrapDaemon, + + #[doc = serde_doc_test!( + SET_BOOTSTRAP_DAEMON_REQUEST => SetBootstrapDaemonRequest { + address: "http://getmonero.org:18081".into(), + username: String::new(), + password: String::new(), + proxy: String::new(), + } + )] Request { address: String, - username: String, - password: String, - proxy: String, + username: String = default_string(), "default_string", + password: String = default_string(), "default_string", + proxy: String = default_string(), "default_string", }, + + #[doc = serde_doc_test!( + SET_BOOTSTRAP_DAEMON_RESPONSE => SetBootstrapDaemonResponse { + status: Status::Ok, + } + )] Response { status: Status, } @@ -229,8 +554,11 @@ define_request_and_response! { get_transaction_pool, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1569..=1591, + GetTransactionPool, Request {}, + + #[doc = serde_doc_test!(GET_TRANSACTION_POOL_RESPONSE)] AccessResponseBase { transactions: Vec, spent_key_images: Vec, @@ -241,8 +569,41 @@ define_request_and_response! { get_transaction_pool_stats, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1712..=1732, + GetTransactionPoolStats, Request {}, + + #[doc = serde_doc_test!( + GET_TRANSACTION_POOL_STATS_RESPONSE => GetTransactionPoolStatsResponse { + base: AccessResponseBase::ok(), + pool_stats: TxpoolStats { + bytes_max: 11843, + bytes_med: 2219, + bytes_min: 1528, + bytes_total: 144192, + fee_total: 7018100000, + histo: vec![ + TxpoolHisto { bytes: 11219, txs: 4 }, + TxpoolHisto { bytes: 9737, txs: 5 }, + TxpoolHisto { bytes: 8757, txs: 4 }, + TxpoolHisto { bytes: 14763, txs: 4 }, + TxpoolHisto { bytes: 15007, txs: 6 }, + TxpoolHisto { bytes: 15924, txs: 6 }, + TxpoolHisto { bytes: 17869, txs: 8 }, + TxpoolHisto { bytes: 10894, txs: 5 }, + TxpoolHisto { bytes: 38485, txs: 10 }, + TxpoolHisto { bytes: 1537, txs: 1 }, + ], + histo_98pc: 186, + num_10m: 0, + num_double_spends: 0, + num_failing: 0, + num_not_relayed: 0, + oldest: 1721261651, + txs_total: 53 + } + } + )] AccessResponseBase { pool_stats: TxpoolStats, } @@ -252,9 +613,16 @@ define_request_and_response! { stop_daemon, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1814..=1831, + StopDaemon, Request {}, - ResponseBase { + + #[doc = serde_doc_test!( + STOP_DAEMON_RESPONSE => StopDaemonResponse { + status: Status::Ok, + } + )] + Response { status: Status, } } @@ -263,8 +631,17 @@ define_request_and_response! { get_limit, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1852..=1874, + GetLimit, Request {}, + + #[doc = serde_doc_test!( + GET_LIMIT_RESPONSE => GetLimitResponse { + base: ResponseBase::ok(), + limit_down: 1280000, + limit_up: 1280000, + } + )] ResponseBase { limit_down: u64, limit_up: u64, @@ -275,11 +652,27 @@ define_request_and_response! { set_limit, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1876..=1903, + SetLimit, + #[doc = serde_doc_test!( + SET_LIMIT_REQUEST => SetLimitRequest { + limit_down: 1024, + limit_up: 0, + } + )] Request { - limit_down: i64, - limit_up: i64, + // FIXME: These may need to be `Option`. + limit_down: i64 = default_zero::(), "default_zero", + limit_up: i64 = default_zero::(), "default_zero", }, + + #[doc = serde_doc_test!( + SET_LIMIT_RESPONSE => SetLimitResponse { + base: ResponseBase::ok(), + limit_down: 1024, + limit_up: 128, + } + )] ResponseBase { limit_down: i64, limit_up: i64, @@ -290,11 +683,26 @@ define_request_and_response! { out_peers, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1876..=1903, + OutPeers, + + #[doc = serde_doc_test!( + OUT_PEERS_REQUEST => OutPeersRequest { + out_peers: 3232235535, + set: true, + } + )] Request { set: bool = default_true(), "default_true", out_peers: u32, }, + + #[doc = serde_doc_test!( + OUT_PEERS_RESPONSE => OutPeersResponse { + base: ResponseBase::ok(), + out_peers: 3232235535, + } + )] ResponseBase { out_peers: u32, } @@ -304,8 +712,20 @@ define_request_and_response! { get_net_stats, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 793..=822, + GetNetStats, Request {}, + + #[doc = serde_doc_test!( + GET_NET_STATS_RESPONSE => GetNetStatsResponse { + base: ResponseBase::ok(), + start_time: 1721251858, + total_bytes_in: 16283817214, + total_bytes_out: 34225244079, + total_packets_in: 5981922, + total_packets_out: 3627107, + } + )] ResponseBase { start_time: u64, total_packets_in: u64, @@ -319,11 +739,43 @@ define_request_and_response! { get_outs, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 567..=609, + GetOuts, + #[doc = serde_doc_test!( + GET_OUTS_REQUEST => GetOutsRequest { + outputs: vec![ + GetOutputsOut { amount: 1, index: 0 }, + GetOutputsOut { amount: 1, index: 1 }, + ], + get_txid: true + } + )] Request { outputs: Vec, get_txid: bool, }, + + #[doc = serde_doc_test!( + GET_OUTS_RESPONSE => GetOutsResponse { + base: ResponseBase::ok(), + outs: vec![ + OutKey { + height: 51941, + key: "08980d939ec297dd597119f498ad69fed9ca55e3a68f29f2782aae887ef0cf8e".into(), + mask: "1738eb7a677c6149228a2beaa21bea9e3370802d72a3eec790119580e02bd522".into(), + txid: "9d651903b80fb70b9935b72081cd967f543662149aed3839222511acd9100601".into(), + unlocked: true + }, + OutKey { + height: 51945, + key: "454fe46c405be77625fa7e3389a04d3be392346983f27603561ac3a3a74f4a75".into(), + mask: "1738eb7a677c6149228a2beaa21bea9e3370802d72a3eec790119580e02bd522".into(), + txid: "230bff732dc5f225df14fff82aadd1bf11b3fb7ad3a03413c396a617e843f7d0".into(), + unlocked: true + }, + ] + } + )] ResponseBase { outs: Vec, } @@ -333,11 +785,31 @@ define_request_and_response! { update, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2324..=2359, + Update, + + #[doc = serde_doc_test!( + UPDATE_REQUEST => UpdateRequest { + command: "check".into(), + path: "".into(), + } + )] Request { command: String, path: String = default_string(), "default_string", }, + + #[doc = serde_doc_test!( + UPDATE_RESPONSE => UpdateResponse { + base: ResponseBase::ok(), + auto_uri: "".into(), + hash: "".into(), + path: "".into(), + update: false, + user_uri: "".into(), + version: "".into(), + } + )] ResponseBase { auto_uri: String, hash: String, @@ -352,35 +824,62 @@ define_request_and_response! { pop_blocks, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2722..=2745, + PopBlocks, + + #[doc = serde_doc_test!( + POP_BLOCKS_REQUEST => PopBlocksRequest { + nblocks: 6 + } + )] Request { nblocks: u64, }, + + #[doc = serde_doc_test!( + POP_BLOCKS_RESPONSE => PopBlocksResponse { + base: ResponseBase::ok(), + height: 76482, + } + )] ResponseBase { height: u64, } } -define_request_and_response! { - UNDOCUMENTED_ENDPOINT, - cc73fe71162d564ffda8e549b79a350bca53c454 => - core_rpc_server_commands_defs.h => 2798..=2823, - GetTxIdsLoose, - Request { - txid_template: String, - num_matching_bits: u32, - }, - ResponseBase { - txids: Vec, - } -} - define_request_and_response! { UNDOCUMENTED_ENDPOINT, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1615..=1635, + GetTransactionPoolHashes, Request {}, + + #[doc = serde_doc_test!( + GET_TRANSACTION_POOL_HASHES_RESPONSE => GetTransactionPoolHashesResponse { + base: ResponseBase::ok(), + tx_hashes: vec![ + "aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03".into(), + "794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11".into(), + "1e9d2ae11f2168a228942077483e70940d34e8658c972bbc3e7f7693b90edf17".into(), + "7375c928f261d00f07197775eb0bfa756e5f23319819152faa0b3c670fe54c1b".into(), + "2e4d5f8c5a45498f37fb8b6ca4ebc1efa0c371c38c901c77e66b08c072287329".into(), + "eee6d596cf855adfb10e1597d2018e3a61897ac467ef1d4a5406b8d20bfbd52f".into(), + "59c574d7ba9bb4558470f74503c7518946a85ea22c60fccfbdec108ce7d8f236".into(), + "0d57bec1e1075a9e1ac45cf3b3ced1ad95ccdf2a50ce360190111282a0178655".into(), + "60d627b2369714a40009c07d6185ebe7fa4af324fdfa8d95a37a936eb878d062".into(), + "661d7e728a901a8cb4cf851447d9cd5752462687ed0b776b605ba706f06bdc7d".into(), + "b80e1f09442b00b3fffe6db5d263be6267c7586620afff8112d5a8775a6fc58e".into(), + "974063906d1ddfa914baf85176b0f689d616d23f3d71ed4798458c8b4f9b9d8f".into(), + "d2575ae152a180be4981a9d2fc009afcd073adaa5c6d8b022c540a62d6c905bb".into(), + "3d78aa80ee50f506683bab9f02855eb10257a08adceda7cbfbdfc26b10f6b1bb".into(), + "8b5bc125bdb73b708500f734501d55088c5ac381a0879e1141634eaa72b6a4da".into(), + "11c06f4d2f00c912ca07313ed2ea5366f3cae914a762bed258731d3d9e3706df".into(), + "b3644dc7c9a3a53465fe80ad3769e516edaaeb7835e16fdd493aac110d472ae1".into(), + "ed2478ad793b923dbf652c8612c40799d764e5468897021234a14a37346bc6ee".into() + ], + } + )] ResponseBase { tx_hashes: Vec, } @@ -391,14 +890,43 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1419..=1448, GetPublicNodes, + + #[doc = serde_doc_test!( + GET_PUBLIC_NODES_REQUEST => GetPublicNodesRequest { + gray: false, + white: true, + include_blocked: false, + } + )] Request { gray: bool = default_false(), "default_false", white: bool = default_true(), "default_true", include_blocked: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_PUBLIC_NODES_RESPONSE => GetPublicNodesResponse { + base: ResponseBase::ok(), + gray: vec![], + white: vec![ + PublicNode { + host: "70.52.75.3".into(), + last_seen: 1721246387, + rpc_credits_per_hash: 0, + rpc_port: 18081, + }, + PublicNode { + host: "zbjkbsxc5munw3qusl7j2hpcmikhqocdf4pqhnhtpzw5nt5jrmofptid.onion:18083".into(), + last_seen: 1720186288, + rpc_credits_per_hash: 0, + rpc_port: 18089, + } + ] + } + )] ResponseBase { - gray: Vec, - white: Vec, + gray: Vec = default_vec::(), "default_vec", + white: Vec = default_vec::(), "default_vec", } } diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 38a7374b..5938e412 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -64,7 +64,7 @@ //! use hex_literal::hex; //! use tower::{Service, ServiceExt}; //! -//! use cuprate_types::blockchain::{BCReadRequest, BCWriteRequest, BCResponse}; +//! use cuprate_types::{blockchain::{BCReadRequest, BCWriteRequest, BCResponse}, Chain}; //! use cuprate_test_utils::data::block_v16_tx0; //! //! use cuprate_blockchain::{ @@ -101,7 +101,7 @@ //! //! // Now, let's try getting the block hash //! // of the block we just wrote. -//! let request = BCReadRequest::BlockHash(0); +//! let request = BCReadRequest::BlockHash(0, Chain::Main); //! let response_channel = read_handle.ready().await?.call(request); //! let response = response_channel.await?; //! assert_eq!( diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 22c79e49..f943c8d5 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -17,7 +17,7 @@ use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThre use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{ blockchain::{BCReadRequest, BCResponse}, - ExtendedBlockHeader, OutputOnChain, + Chain, ExtendedBlockHeader, OutputOnChain, }; use crate::{ @@ -83,11 +83,14 @@ fn map_request( match request { R::BlockExtendedHeader(block) => block_extended_header(env, block), - R::BlockHash(block) => block_hash(env, block), + R::BlockHash(block, chain) => block_hash(env, block, chain), + R::FindBlock(_) => todo!("Add alt blocks to DB"), R::FilterUnknownHashes(hashes) => filter_unknown_hashes(env, hashes), - R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range), + R::BlockExtendedHeaderInRange(range, chain) => { + block_extended_header_in_range(env, range, chain) + } R::ChainHeight => chain_height(env), - R::GeneratedCoins => generated_coins(env), + R::GeneratedCoins(height) => generated_coins(env, height), R::Outputs(map) => outputs(env, map), R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::KeyImagesSpent(set) => key_images_spent(env, set), @@ -184,15 +187,18 @@ fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> Respon /// [`BCReadRequest::BlockHash`]. #[inline] -fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { +fn block_hash(env: &ConcreteEnv, block_height: BlockHeight, chain: Chain) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; let table_block_infos = env_inner.open_db_ro::(&tx_ro)?; - Ok(BCResponse::BlockHash( - get_block_info(&block_height, &table_block_infos)?.block_hash, - )) + let block_hash = match chain { + Chain::Main => get_block_info(&block_height, &table_block_infos)?.block_hash, + Chain::Alt(_) => todo!("Add alt blocks to DB"), + }; + + Ok(BCResponse::BlockHash(block_hash)) } /// [`BCReadRequest::FilterUnknownHashes`]. @@ -228,6 +234,7 @@ fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet) -> R fn block_extended_header_in_range( env: &ConcreteEnv, range: std::ops::Range, + chain: Chain, ) -> ResponseResult { // Prepare tx/tables in `ThreadLocal`. let env_inner = env.env_inner(); @@ -235,14 +242,17 @@ fn block_extended_header_in_range( let tables = thread_local(env); // Collect results using `rayon`. - let vec = range - .into_par_iter() - .map(|block_height| { - let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; - let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); - get_block_extended_header_from_height(&block_height, tables) - }) - .collect::, RuntimeError>>()?; + let vec = match chain { + Chain::Main => range + .into_par_iter() + .map(|block_height| { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + get_block_extended_header_from_height(&block_height, tables) + }) + .collect::, RuntimeError>>()?, + Chain::Alt(_) => todo!("Add alt blocks to DB"), + }; Ok(BCResponse::BlockExtendedHeaderInRange(vec)) } @@ -265,17 +275,14 @@ fn chain_height(env: &ConcreteEnv) -> ResponseResult { /// [`BCReadRequest::GeneratedCoins`]. #[inline] -fn generated_coins(env: &ConcreteEnv) -> ResponseResult { +fn generated_coins(env: &ConcreteEnv, height: u64) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; - let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; let table_block_infos = env_inner.open_db_ro::(&tx_ro)?; - let top_height = top_block_height(&table_block_heights)?; - Ok(BCResponse::GeneratedCoins(cumulative_generated_coins( - &top_height, + &height, &table_block_infos, )?)) } diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 4bff452a..8158b8ea 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -19,7 +19,7 @@ use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, Run use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; use cuprate_types::{ blockchain::{BCReadRequest, BCResponse, BCWriteRequest}, - OutputOnChain, VerifiedBlockInformation, + Chain, OutputOnChain, VerifiedBlockInformation, }; use crate::{ @@ -137,10 +137,15 @@ async fn test_template( Err(RuntimeError::KeyNotFound) }; + let test_chain_height = chain_height(tables.block_heights()).unwrap(); + let chain_height = { - let height = chain_height(tables.block_heights()).unwrap(); - let block_info = get_block_info(&height.saturating_sub(1), tables.block_infos()).unwrap(); - Ok(BCResponse::ChainHeight(height, block_info.block_hash)) + let block_info = + get_block_info(&test_chain_height.saturating_sub(1), tables.block_infos()).unwrap(); + Ok(BCResponse::ChainHeight( + test_chain_height, + block_info.block_hash, + )) }; let cumulative_generated_coins = Ok(BCResponse::GeneratedCoins(cumulative_generated_coins)); @@ -181,12 +186,21 @@ async fn test_template( BCReadRequest::BlockExtendedHeader(1), extended_block_header_1, ), - (BCReadRequest::BlockHash(0), block_hash_0), - (BCReadRequest::BlockHash(1), block_hash_1), - (BCReadRequest::BlockExtendedHeaderInRange(0..1), range_0_1), - (BCReadRequest::BlockExtendedHeaderInRange(0..2), range_0_2), + (BCReadRequest::BlockHash(0, Chain::Main), block_hash_0), + (BCReadRequest::BlockHash(1, Chain::Main), block_hash_1), + ( + BCReadRequest::BlockExtendedHeaderInRange(0..1, Chain::Main), + range_0_1, + ), + ( + BCReadRequest::BlockExtendedHeaderInRange(0..2, Chain::Main), + range_0_2, + ), (BCReadRequest::ChainHeight, chain_height), - (BCReadRequest::GeneratedCoins, cumulative_generated_coins), + ( + BCReadRequest::GeneratedCoins(test_chain_height), + cumulative_generated_coins, + ), (BCReadRequest::NumberOutputsWithAmount(num_req), num_resp), (BCReadRequest::KeyImagesSpent(ki_req), ki_resp), ] { diff --git a/test-utils/src/rpc/data/json.rs b/test-utils/src/rpc/data/json.rs index 2463e456..a05af670 100644 --- a/test-utils/src/rpc/data/json.rs +++ b/test-utils/src/rpc/data/json.rs @@ -771,7 +771,7 @@ r#"{ "id": "0", "method": "get_output_histogram", "params": { - "amounts": ["20000000000"] + "amounts": [20000000000] } }"#; Response = @@ -1106,13 +1106,17 @@ r#"{ "id": "0", "jsonrpc": "2.0", "result": { + "credits": 0, "distributions": [{ "amount": 2628780000, "base": 0, "distribution": "", - "start_height": 1462078 + "start_height": 1462078, + "binary": false }], - "status": "OK" + "status": "OK", + "top_hash": "", + "untrusted": false } }"#; } diff --git a/test-utils/src/rpc/data/other.rs b/test-utils/src/rpc/data/other.rs index 2559bbe3..80a48ab1 100644 --- a/test-utils/src/rpc/data/other.rs +++ b/test-utils/src/rpc/data/other.rs @@ -234,11 +234,13 @@ define_request_and_response! { set_log_hash_rate (other), SET_LOG_HASH_RATE: &str, Request = -r#"{}"#; +r#"{ + "visible": true +}"#; Response = r#" { - "status": "OK" + "status": "OK", "untrusted": false }"#; } @@ -252,7 +254,7 @@ r#"{ }"#; Response = r#"{ - "status": "OK" + "status": "OK", "untrusted": false }"#; } @@ -673,7 +675,7 @@ r#"{ "limit_down": 1280000, "limit_up": 1280000, "status": "OK", - "untrusted": true + "untrusted": false }"#; } @@ -688,7 +690,7 @@ r#"{ r#"{ "limit_down": 1024, "limit_up": 128, - "status": "OK" + "status": "OK", "untrusted": false }"#; } @@ -712,13 +714,15 @@ define_request_and_response! { get_net_stats (other), GET_NET_STATS: &str, Request = -r#"{ - "in_peers": 3232235535 -}"#; +r#"{}"#; Response = r#"{ - "in_peers": 3232235535, + "start_time": 1721251858, "status": "OK", + "total_bytes_in": 16283817214, + "total_bytes_out": 34225244079, + "total_packets_in": 5981922, + "total_packets_out": 3627107, "untrusted": false }"#; } @@ -804,7 +808,26 @@ r#"{ "credits": 0, "status": "OK", "top_hash": "", - "tx_hashes": ["aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03","794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11","1e9d2ae11f2168a228942077483e70940d34e8658c972bbc3e7f7693b90edf17","7375c928f261d00f07197775eb0bfa756e5f23319819152faa0b3c670fe54c1b","2e4d5f8c5a45498f37fb8b6ca4ebc1efa0c371c38c901c77e66b08c072287329","eee6d596cf855adfb10e1597d2018e3a61897ac467ef1d4a5406b8d20bfbd52f","59c574d7ba9bb4558470f74503c7518946a85ea22c60fccfbdec108ce7d8f236","0d57bec1e1075a9e1ac45cf3b3ced1ad95ccdf2a50ce360190111282a0178655","60d627b2369714a40009c07d6185ebe7fa4af324fdfa8d95a37a936eb878d062","661d7e728a901a8cb4cf851447d9cd5752462687ed0b776b605ba706f06bdc7d","b80e1f09442b00b3fffe6db5d263be6267c7586620afff8112d5a8775a6fc58e","974063906d1ddfa914baf85176b0f689d616d23f3d71ed4798458c8b4f9b9d8f","d2575ae152a180be4981a9d2fc009afcd073adaa5c6d8b022c540a62d6c905bb","3d78aa80ee50f506683bab9f02855eb10257a08adceda7cbfbdfc26b10f6b1bb","8b5bc125bdb73b708500f734501d55088c5ac381a0879e1141634eaa72b6a4da","11c06f4d2f00c912ca07313ed2ea5366f3cae914a762bed258731d3d9e3706df","b3644dc7c9a3a53465fe80ad3769e516edaaeb7835e16fdd493aac110d472ae1","ed2478ad793b923dbf652c8612c40799d764e5468897021234a14a37346bc6ee"], + "tx_hashes": [ + "aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03", + "794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11", + "1e9d2ae11f2168a228942077483e70940d34e8658c972bbc3e7f7693b90edf17", + "7375c928f261d00f07197775eb0bfa756e5f23319819152faa0b3c670fe54c1b", + "2e4d5f8c5a45498f37fb8b6ca4ebc1efa0c371c38c901c77e66b08c072287329", + "eee6d596cf855adfb10e1597d2018e3a61897ac467ef1d4a5406b8d20bfbd52f", + "59c574d7ba9bb4558470f74503c7518946a85ea22c60fccfbdec108ce7d8f236", + "0d57bec1e1075a9e1ac45cf3b3ced1ad95ccdf2a50ce360190111282a0178655", + "60d627b2369714a40009c07d6185ebe7fa4af324fdfa8d95a37a936eb878d062", + "661d7e728a901a8cb4cf851447d9cd5752462687ed0b776b605ba706f06bdc7d", + "b80e1f09442b00b3fffe6db5d263be6267c7586620afff8112d5a8775a6fc58e", + "974063906d1ddfa914baf85176b0f689d616d23f3d71ed4798458c8b4f9b9d8f", + "d2575ae152a180be4981a9d2fc009afcd073adaa5c6d8b022c540a62d6c905bb", + "3d78aa80ee50f506683bab9f02855eb10257a08adceda7cbfbdfc26b10f6b1bb", + "8b5bc125bdb73b708500f734501d55088c5ac381a0879e1141634eaa72b6a4da", + "11c06f4d2f00c912ca07313ed2ea5366f3cae914a762bed258731d3d9e3706df", + "b3644dc7c9a3a53465fe80ad3769e516edaaeb7835e16fdd493aac110d472ae1", + "ed2478ad793b923dbf652c8612c40799d764e5468897021234a14a37346bc6ee" + ], "untrusted": false }"#; } diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index 4a280bec..1ff06c29 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -9,7 +9,7 @@ use std::{ ops::Range, }; -use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}; +use crate::types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}; //---------------------------------------------------------------------------------------------------- ReadRequest /// A read request to the blockchain database. @@ -29,8 +29,13 @@ pub enum BCReadRequest { /// Request a block's hash. /// - /// The input is the block's height. - BlockHash(u64), + /// The input is the block's height and the chain it is on. + BlockHash(u64, Chain), + + /// Request to check if we have a block and which [`Chain`] it is on. + /// + /// The input is the block's hash. + FindBlock([u8; 32]), /// Removes the block hashes that are not in the _main_ chain. /// @@ -40,15 +45,15 @@ pub enum BCReadRequest { /// Request a range of block extended headers. /// /// The input is a range of block heights. - BlockExtendedHeaderInRange(Range), + BlockExtendedHeaderInRange(Range, Chain), /// Request the current chain height. /// /// Note that this is not the top-block height. ChainHeight, - /// Request the total amount of generated coins (atomic units) so far. - GeneratedCoins, + /// Request the total amount of generated coins (atomic units) at this height. + GeneratedCoins(u64), /// Request data for multiple outputs. /// @@ -129,6 +134,11 @@ pub enum BCResponse { /// Inner value is the hash of the requested block. BlockHash([u8; 32]), + /// Response to [`BCReadRequest::FindBlock`]. + /// + /// Inner value is the chain and height of the block if found. + FindBlock(Option<(Chain, u64)>), + /// Response to [`BCReadRequest::FilterUnknownHashes`]. /// /// Inner value is the list of hashes that were in the main chain. @@ -146,7 +156,7 @@ pub enum BCResponse { /// Response to [`BCReadRequest::GeneratedCoins`]. /// - /// Inner value is the total amount of generated coins so far, in atomic units. + /// Inner value is the total amount of generated coins up to and including the chosen height, in atomic units. GeneratedCoins(u64), /// Response to [`BCReadRequest::Outputs`]. diff --git a/types/src/lib.rs b/types/src/lib.rs index 1cdb9d57..bcf6a45d 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -84,7 +84,8 @@ mod types; pub use block_complete_entry::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; pub use types::{ - ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation, VerifiedTransactionInformation, + AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, OutputOnChain, + VerifiedBlockInformation, VerifiedTransactionInformation, }; //---------------------------------------------------------------------------------------------------- Feature-gated diff --git a/types/src/types.rs b/types/src/types.rs index 76ffd57a..db315075 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -38,7 +38,8 @@ pub struct ExtendedBlockHeader { //---------------------------------------------------------------------------------------------------- VerifiedTransactionInformation /// Verified information of a transaction. /// -/// This represents a transaction in a valid block. +/// - If this is in a [`VerifiedBlockInformation`] this represents a valid transaction +/// - If this is in an [`AltBlockInformation`] this represents a potentially valid transaction #[derive(Clone, Debug, PartialEq, Eq)] pub struct VerifiedTransactionInformation { /// The transaction itself. @@ -91,6 +92,53 @@ pub struct VerifiedBlockInformation { pub cumulative_difficulty: u128, } +//---------------------------------------------------------------------------------------------------- ChainID +/// A unique ID for an alt chain. +/// +/// The inner value is meaningless. +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub struct ChainId(pub u64); + +//---------------------------------------------------------------------------------------------------- Chain +/// An identifier for a chain. +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub enum Chain { + /// The main chain. + Main, + /// An alt chain. + Alt(ChainId), +} + +//---------------------------------------------------------------------------------------------------- AltBlockInformation +/// A block on an alternative chain. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct AltBlockInformation { + /// The block itself. + pub block: Block, + /// The serialized byte form of [`Self::block`]. + /// + /// [`Block::serialize`]. + pub block_blob: Vec, + /// All the transactions in the block, excluding the [`Block::miner_tx`]. + pub txs: Vec, + /// The block's hash. + /// + /// [`Block::hash`]. + pub block_hash: [u8; 32], + /// The block's proof-of-work hash. + pub pow_hash: [u8; 32], + /// The block's height. + pub height: u64, + /// The adjusted block size, in bytes. + pub weight: usize, + /// The long term block weight, which is the weight factored in with previous block weights. + pub long_term_weight: usize, + /// The cumulative difficulty of all blocks up until and including this block. + pub cumulative_difficulty: u128, + /// The [`ChainId`] of the chain this alt block is on. + pub chain_id: ChainId, +} + //---------------------------------------------------------------------------------------------------- OutputOnChain /// An already existing transaction output. #[derive(Clone, Copy, Debug, PartialEq, Eq)] diff --git a/typos.toml b/typos.toml index 0317c404..fbd66d09 100644 --- a/typos.toml +++ b/typos.toml @@ -18,4 +18,5 @@ extend-exclude = [ "/misc/gpg_keys/", "cryptonight/", "/test-utils/src/rpc/data/json.rs", + "rpc/types/src/json.rs", ]