Consensus: use cuprate-types types (#145)
Some checks failed
Audit / audit (push) Has been cancelled
CI / fmt (push) Has been cancelled
CI / typo (push) Has been cancelled
CI / ci (macos-latest, stable, bash) (push) Has been cancelled
CI / ci (ubuntu-latest, stable, bash) (push) Has been cancelled
CI / ci (windows-latest, stable-x86_64-pc-windows-gnu, msys2 {0}) (push) Has been cancelled
Deny / audit (push) Has been cancelled

* move consensus database to /types

* fix `storage` builds

* unify `VerifiedBlockInformation`

* fix docs

* change `Database` trait wording

* order imports

* service -> blockchain

* Apply suggestions from code review

Co-authored-by: hinto-janai <hinto.janai@protonmail.com>

* fix typo

* fix key_images_spent

* add back todo

* fix tests

* service -> blockchain 2

* update docs

* update docs 2

---------

Co-authored-by: hinto-janai <hinto.janai@protonmail.com>
This commit is contained in:
Boog900 2024-06-04 17:19:35 +00:00 committed by GitHub
parent b510739701
commit 0622237d19
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
28 changed files with 470 additions and 554 deletions

24
Cargo.lock generated
View file

@ -217,18 +217,18 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytemuck"
version = "1.15.0"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15"
checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5"
dependencies = [
"bytemuck_derive",
]
[[package]]
name = "bytemuck_derive"
version = "1.6.0"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60"
checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b"
dependencies = [
"proc-macro2",
"quote",
@ -326,9 +326,9 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
version = "0.5.12"
version = "0.5.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95"
checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
dependencies = [
"crossbeam-utils",
]
@ -441,6 +441,7 @@ dependencies = [
"cuprate-consensus-rules",
"cuprate-helper",
"cuprate-test-utils",
"cuprate-types",
"curve25519-dalek",
"dalek-ff-group",
"futures",
@ -557,11 +558,8 @@ version = "0.0.0"
name = "cuprate-types"
version = "0.0.0"
dependencies = [
"borsh",
"cfg-if",
"curve25519-dalek",
"monero-serai",
"serde",
]
[[package]]
@ -972,9 +970,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "heed"
version = "0.20.0"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7a300b0deeb2957162d7752b0f063b3be1c88333af5bb4e7a57d8fb3716f50b"
checksum = "f60d7cff16094be9627830b399c087a25017e93fb3768b87cd656a68ccb1ebe8"
dependencies = [
"bitflags 2.5.0",
"byteorder",
@ -1260,9 +1258,9 @@ checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
[[package]]
name = "lmdb-master-sys"
version = "0.2.0"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc9048db3a58c0732d7236abc4909058f9d2708cfb6d7d047eb895fddec6419a"
checksum = "a5142795c220effa4c8f4813537bd4c88113a07e45e93100ccb2adc5cec6c7f3"
dependencies = [
"cc",
"doxygen-rs",

View file

@ -10,6 +10,7 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
[dependencies]
cuprate-helper = { path = "../helper", default-features = false, features = ["std", "asynch", "num"] }
cuprate-consensus-rules = { path = "./rules", features = ["rayon"] }
cuprate-types = { path = "../types" }
thiserror = { workspace = true }
tower = { workspace = true, features = ["util"] }

View file

@ -163,6 +163,7 @@ impl HardFork {
/// Returns the hard-fork for a blocks `major_version` field.
///
/// <https://monero-book.cuprate.org/consensus_rules/hardforks.html#blocks-version-and-vote>
#[inline]
pub fn from_version(version: u8) -> Result<HardFork, HardForkError> {
Ok(match version {
1 => HardFork::V1,
@ -188,6 +189,7 @@ impl HardFork {
/// Returns the hard-fork for a blocks `minor_version` (vote) field.
///
/// <https://monero-book.cuprate.org/consensus_rules/hardforks.html#blocks-version-and-vote>
#[inline]
pub fn from_vote(vote: u8) -> HardFork {
if vote == 0 {
// A vote of 0 is interpreted as 1 as that's what Monero used to default to.
@ -197,6 +199,7 @@ impl HardFork {
Self::from_version(vote).unwrap_or(HardFork::V16)
}
#[inline]
pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> {
Ok((
HardFork::from_version(header.major_version)?,

View file

@ -6,22 +6,13 @@ use std::{
use curve25519_dalek::EdwardsPoint;
use monero_serai::transaction::{Input, Timelock};
use crate::{transactions::TransactionError, HardFork, TxVersion};
/// An already approved previous transaction output.
#[derive(Debug, Copy, Clone)]
pub struct OutputOnChain {
pub height: u64,
pub time_lock: Timelock,
pub key: Option<EdwardsPoint>,
pub commitment: EdwardsPoint,
}
use crate::{transactions::TransactionError, HardFork};
/// Gets the absolute offsets from the relative offsets.
///
/// This function will return an error if the relative offsets are empty.
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions.html#inputs-must-have-decoys>
fn get_absolute_offsets(relative_offsets: &[u64]) -> Result<Vec<u64>, TransactionError> {
pub fn get_absolute_offsets(relative_offsets: &[u64]) -> Result<Vec<u64>, TransactionError> {
if relative_offsets.is_empty() {
return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys);
}
@ -64,35 +55,6 @@ pub fn insert_ring_member_ids(
Ok(())
}
/// Get the ring members for the inputs from the outputs on the chain.
///
/// Will error if `outputs` does not contain the outputs needed.
pub fn get_ring_members_for_inputs(
get_outputs: impl Fn(u64, u64) -> Option<OutputOnChain>,
inputs: &[Input],
) -> Result<Vec<Vec<OutputOnChain>>, TransactionError> {
inputs
.iter()
.map(|inp| match inp {
Input::ToKey {
amount,
key_offsets,
..
} => {
let offsets = get_absolute_offsets(key_offsets)?;
Ok(offsets
.iter()
.map(|offset| {
get_outputs(amount.unwrap_or(0), *offset)
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)
})
.collect::<Result<_, TransactionError>>()?)
}
_ => Err(TransactionError::IncorrectInputType),
})
.collect::<Result<_, TransactionError>>()
}
/// Represents the ring members of all the inputs.
#[derive(Debug)]
pub enum Rings {
@ -102,45 +64,6 @@ pub enum Rings {
RingCT(Vec<Vec<[EdwardsPoint; 2]>>),
}
impl Rings {
/// Builds the rings for the transaction inputs, from the given outputs.
fn new(
outputs: Vec<Vec<OutputOnChain>>,
tx_version: TxVersion,
) -> Result<Rings, TransactionError> {
Ok(match tx_version {
TxVersion::RingSignatures => Rings::Legacy(
outputs
.into_iter()
.map(|inp_outs| {
inp_outs
.into_iter()
.map(|out| out.key.ok_or(TransactionError::RingMemberNotFoundOrInvalid))
.collect::<Result<Vec<_>, TransactionError>>()
})
.collect::<Result<Vec<_>, TransactionError>>()?,
),
TxVersion::RingCT => Rings::RingCT(
outputs
.into_iter()
.map(|inp_outs| {
inp_outs
.into_iter()
.map(|out| {
Ok([
out.key
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)?,
out.commitment,
])
})
.collect::<Result<_, TransactionError>>()
})
.collect::<Result<_, _>>()?,
),
})
}
}
/// Information on the outputs the transaction is referencing for inputs (ring members).
#[derive(Debug)]
pub struct TxRingMembersInfo {
@ -151,46 +74,6 @@ pub struct TxRingMembersInfo {
pub time_locked_outs: Vec<Timelock>,
}
impl TxRingMembersInfo {
/// Construct a [`TxRingMembersInfo`] struct.
///
/// The used outs must be all the ring members used in the transactions inputs.
pub fn new(
used_outs: Vec<Vec<OutputOnChain>>,
decoy_info: Option<DecoyInfo>,
tx_version: TxVersion,
) -> Result<TxRingMembersInfo, TransactionError> {
Ok(TxRingMembersInfo {
youngest_used_out_height: used_outs
.iter()
.map(|inp_outs| {
inp_outs
.iter()
// the output with the highest height is the youngest
.map(|out| out.height)
.max()
.expect("Input must have ring members")
})
.max()
.expect("Tx must have inputs"),
time_locked_outs: used_outs
.iter()
.flat_map(|inp_outs| {
inp_outs
.iter()
.filter_map(|out| match out.time_lock {
Timelock::None => None,
lock => Some(lock),
})
.collect::<Vec<_>>()
})
.collect(),
rings: Rings::new(used_outs, tx_version)?,
decoy_info,
})
}
}
/// A struct holding information about the inputs and their decoys. This data can vary by block so
/// this data needs to be retrieved after every change in the blockchain.
///

View file

@ -1,6 +1,6 @@
//! Block Verifier Service.
use std::{
collections::HashSet,
collections::HashMap,
future::Future,
pin::Pin,
sync::Arc,
@ -17,6 +17,7 @@ use cuprate_consensus_rules::{
miner_tx::MinerTxError,
ConsensusError, HardFork,
};
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
use crate::{
context::{BlockChainContextRequest, BlockChainContextResponse},
@ -83,37 +84,12 @@ impl PrePreparedBlock {
}
}
/// Information about a verified block.
#[derive(Debug)]
pub struct VerifiedBlockInformation {
/// The block that has been verified.
pub block: Block,
/// The block's hard-fork vote.
pub hf_vote: HardFork,
/// The txs in this block.
pub txs: Arc<[Arc<TransactionVerificationData>]>,
/// The blocks hash.
pub block_hash: [u8; 32],
/// the blocks POW hash.
pub pow_hash: [u8; 32],
/// The blocks height.
pub height: u64,
/// The amount of coins generated by this block.
pub generated_coins: u64,
/// This blocks wight.
pub weight: usize,
/// This blocks long term weight.
pub long_term_weight: usize,
/// The cumulative difficulty of the chain including this block.
pub cumulative_difficulty: u128,
}
/// A request to verify a block.
pub enum VerifyBlockRequest {
/// A request to verify a block.
MainChain {
block: Block,
prepared_txs: Arc<[Arc<TransactionVerificationData>]>,
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
},
}
@ -211,7 +187,7 @@ where
/// Verifies a prepared block.
async fn verify_main_chain_block<C, TxV>(
block: Block,
txs: Arc<[Arc<TransactionVerificationData>]>,
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
context_svc: C,
tx_verifier_svc: TxV,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
@ -255,28 +231,23 @@ where
// Check that the txs included are what we need and that there are not any extra.
let mut tx_hashes = txs.iter().map(|tx| &tx.tx_hash).collect::<HashSet<_>>();
let mut ordered_txs = Vec::with_capacity(txs.len());
tracing::debug!("Checking we have correct transactions for block.");
if tx_hashes.len() != txs.len() {
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
}
for tx_hash in &prepped_block.block.txs {
if !tx_hashes.remove(tx_hash) {
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
}
}
if !tx_hashes.is_empty() {
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
let tx = txs
.remove(tx_hash)
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
ordered_txs.push(Arc::new(tx));
}
drop(txs);
tracing::debug!("Verifying transactions for block.");
tx_verifier_svc
.oneshot(VerifyTxRequest::Prepped {
txs: txs.clone(),
txs: ordered_txs.clone(),
current_chain_height: context.chain_height,
top_hash: context.top_hash,
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
@ -285,11 +256,11 @@ where
.await?;
let block_weight =
prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
let total_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
prepped_block.miner_tx_weight + ordered_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
let total_fees = ordered_txs.iter().map(|tx| tx.fee).sum::<u64>();
tracing::debug!("Verifying block header.");
let (hf_vote, generated_coins) = check_block(
let (_, generated_coins) = check_block(
&prepped_block.block,
total_fees,
block_weight,
@ -301,13 +272,30 @@ where
Ok(VerifyBlockResponse::MainChain(VerifiedBlockInformation {
block_hash: prepped_block.block_hash,
block: prepped_block.block,
txs,
block_blob: prepped_block.block_blob,
txs: ordered_txs
.into_iter()
.map(|tx| {
// Note: it would be possible for the transaction verification service to hold onto the tx after the call
// if one of txs was invalid and the rest are still in rayon threads.
let tx = Arc::into_inner(tx).expect(
"Transaction verification service should not hold onto valid transactions.",
);
VerifiedTransactionInformation {
tx_blob: tx.tx_blob,
tx_weight: tx.tx_weight,
fee: tx.fee,
tx_hash: tx.tx_hash,
tx: tx.tx,
}
})
.collect(),
pow_hash: prepped_block.pow_hash,
generated_coins,
weight: block_weight,
height: context.chain_height,
long_term_weight: context.next_block_long_term_weight(block_weight),
hf_vote,
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
}))
}

View file

@ -12,8 +12,9 @@ use tower::ServiceExt;
use tracing::instrument;
use cuprate_helper::num::median;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError, HardFork};
use crate::{Database, ExtendedConsensusError, HardFork};
/// The amount of blocks we account for to calculate difficulty
const DIFFICULTY_WINDOW: usize = 720;
@ -301,8 +302,8 @@ async fn get_blocks_in_pow_info<D: Database + Clone>(
) -> Result<(VecDeque<u64>, VecDeque<u128>), ExtendedConsensusError> {
tracing::info!("Getting blocks timestamps");
let DatabaseResponse::BlockExtendedHeaderInRange(ext_header) = database
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(block_heights))
let BCResponse::BlockExtendedHeaderInRange(ext_header) = database
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights))
.await?
else {
panic!("Database sent incorrect response");

View file

@ -4,8 +4,9 @@ use tower::ServiceExt;
use tracing::instrument;
use cuprate_consensus_rules::{HFVotes, HFsInfo, HardFork};
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
use crate::{Database, ExtendedConsensusError};
/// The default amount of hard-fork votes to track to decide on activation of a hard-fork.
///
@ -86,16 +87,17 @@ impl HardForkState {
debug_assert_eq!(votes.total_votes(), config.window)
}
let DatabaseResponse::BlockExtendedHeader(ext_header) = database
let BCResponse::BlockExtendedHeader(ext_header) = database
.ready()
.await?
.call(DatabaseRequest::BlockExtendedHeader(chain_height - 1))
.call(BCReadRequest::BlockExtendedHeader(chain_height - 1))
.await?
else {
panic!("Database sent incorrect response!");
};
let current_hardfork = ext_header.version;
let current_hardfork =
HardFork::from_version(ext_header.version).expect("Stored block has invalid hardfork");
let mut hfs = HardForkState {
config,
@ -165,15 +167,15 @@ async fn get_votes_in_range<D: Database>(
) -> Result<HFVotes, ExtendedConsensusError> {
let mut votes = HFVotes::new(window_size);
let DatabaseResponse::BlockExtendedHeaderInRange(vote_list) = database
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(block_heights))
let BCResponse::BlockExtendedHeaderInRange(vote_list) = database
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights))
.await?
else {
panic!("Database sent incorrect response!");
};
for hf_info in vote_list.into_iter() {
votes.add_vote_for_hf(&hf_info.vote);
votes.add_vote_for_hf(&HardFork::from_vote(hf_info.vote));
}
Ok(votes)

View file

@ -20,8 +20,9 @@ use cuprate_consensus_rules::{
HardFork,
};
use cuprate_helper::asynch::rayon_spawn_async;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
use crate::{Database, ExtendedConsensusError};
/// The amount of randomX VMs to keep in the cache.
const RX_SEEDS_CACHED: usize = 2;
@ -225,10 +226,8 @@ async fn get_block_hashes<D: Database + Clone>(
for height in heights {
let db = database.clone();
fut.push_back(async move {
let DatabaseResponse::BlockHash(hash) = db
.clone()
.oneshot(DatabaseRequest::BlockHash(height))
.await?
let BCResponse::BlockHash(hash) =
db.clone().oneshot(BCReadRequest::BlockHash(height)).await?
else {
panic!("Database sent incorrect response!");
};

View file

@ -9,13 +9,14 @@ use tower::ServiceExt;
use tracing::Instrument;
use cuprate_consensus_rules::blocks::ContextToVerifyBlock;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use super::{
difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest,
BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken,
BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW,
};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
use crate::{Database, ExtendedConsensusError};
/// A request from the context service to the context task.
pub(super) struct ContextTaskRequest {
@ -69,19 +70,19 @@ impl ContextTask {
tracing::debug!("Initialising blockchain context");
let DatabaseResponse::ChainHeight(chain_height, top_block_hash) = database
let BCResponse::ChainHeight(chain_height, top_block_hash) = database
.ready()
.await?
.call(DatabaseRequest::ChainHeight)
.call(BCReadRequest::ChainHeight)
.await?
else {
panic!("Database sent incorrect response!");
};
let DatabaseResponse::GeneratedCoins(already_generated_coins) = database
let BCResponse::GeneratedCoins(already_generated_coins) = database
.ready()
.await?
.call(DatabaseRequest::GeneratedCoins)
.call(BCReadRequest::GeneratedCoins)
.await?
else {
panic!("Database sent incorrect response!");

View file

@ -18,8 +18,9 @@ use tracing::instrument;
use cuprate_consensus_rules::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5};
use cuprate_helper::{asynch::rayon_spawn_async, num::median};
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError, HardFork};
use crate::{Database, ExtendedConsensusError, HardFork};
/// The short term block weight window.
const SHORT_TERM_WINDOW: u64 = 100;
@ -292,8 +293,8 @@ async fn get_blocks_weight_in_range<D: Database + Clone>(
) -> Result<Vec<usize>, ExtendedConsensusError> {
tracing::info!("getting block weights.");
let DatabaseResponse::BlockExtendedHeaderInRange(ext_headers) = database
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(range))
let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(range))
.await?
else {
panic!("Database sent incorrect response!")
@ -313,8 +314,8 @@ async fn get_long_term_weight_in_range<D: Database + Clone>(
) -> Result<Vec<usize>, ExtendedConsensusError> {
tracing::info!("getting block long term weights.");
let DatabaseResponse::BlockExtendedHeaderInRange(ext_headers) = database
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(range))
let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(range))
.await?
else {
panic!("Database sent incorrect response!")

View file

@ -7,15 +7,10 @@
//! - [`TxVerifierService`] Which handles transaction verification.
//!
//! This crate is generic over the database which is implemented as a [`tower::Service`]. To
//! implement a database you need to have a service which accepts [`DatabaseRequest`] and responds
//! with [`DatabaseResponse`].
//! implement a database you need to have a service which accepts [`BCReadRequest`] and responds
//! with [`BCResponse`].
//!
use std::{
collections::{HashMap, HashSet},
future::Future,
};
use cuprate_consensus_rules::{transactions::OutputOnChain, ConsensusError, HardFork};
use cuprate_consensus_rules::{ConsensusError, HardFork};
mod batch_verifier;
pub mod block;
@ -24,16 +19,16 @@ pub mod context;
mod tests;
pub mod transactions;
pub use block::{
BlockVerifierService, PrePreparedBlock, VerifiedBlockInformation, VerifyBlockRequest,
VerifyBlockResponse,
};
pub use block::{BlockVerifierService, VerifyBlockRequest, VerifyBlockResponse};
pub use context::{
initialize_blockchain_context, BlockChainContext, BlockChainContextRequest,
BlockChainContextResponse, BlockChainContextService, ContextConfig,
};
pub use transactions::{TxVerifierService, VerifyTxRequest, VerifyTxResponse};
// re-export.
pub use cuprate_types::blockchain::{BCReadRequest, BCResponse};
/// An Error returned from one of the consensus services.
#[derive(Debug, thiserror::Error)]
pub enum ExtendedConsensusError {
@ -80,12 +75,24 @@ where
Ok((block_svc, tx_svc))
}
/// An internal trait used to represent a database so we don't have to write [`tower::Service`] bounds
use __private::Database;
pub mod __private {
use std::future::Future;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
/// A type alias trait used to represent a database, so we don't have to write [`tower::Service`] bounds
/// everywhere.
///
/// Automatically implemented for:
/// ```ignore
/// tower::Service<BCReadRequest, Response = BCResponse, Error = tower::BoxError>
/// ```
pub trait Database:
tower::Service<
DatabaseRequest,
Response = DatabaseResponse,
BCReadRequest,
Response = BCResponse,
Error = tower::BoxError,
Future = Self::Future2,
>
@ -93,102 +100,11 @@ pub trait Database:
type Future2: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static;
}
impl<T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>>
Database for T
impl<T: tower::Service<BCReadRequest, Response = BCResponse, Error = tower::BoxError>>
crate::Database for T
where
T::Future: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static,
{
type Future2 = T::Future;
}
/// An extended block header.
#[derive(Debug, Copy, Clone)]
pub struct ExtendedBlockHeader {
/// The blocks major version.
pub version: HardFork,
/// The blocks vote.
pub vote: HardFork,
/// The blocks timestamp.
pub timestamp: u64,
/// The blocks cumulative difficulty.
pub cumulative_difficulty: u128,
/// The blocks weight.
pub block_weight: usize,
/// The blocks long term weight.
pub long_term_weight: usize,
}
/// A database request to the database [`tower::Service`]
#[derive(Debug, Clone)]
pub enum DatabaseRequest {
/// A block extended header request.
/// Must return: [`DatabaseResponse::BlockExtendedHeader`]
BlockExtendedHeader(u64),
/// A block hash request.
/// Must return: [`DatabaseResponse::BlockHash`]
BlockHash(u64),
/// Removes the block hashes that are not in the _main_ chain.
///
/// This should filter (remove) hashes in alt-blocks as well.
FilterUnknownHashes(HashSet<[u8; 32]>),
/// A request for multiple block extended headers.
/// Must return: [`DatabaseResponse::BlockExtendedHeaderInRange`]
BlockExtendedHeaderInRange(std::ops::Range<u64>),
/// A request for the chains height.
/// Must return: [`DatabaseResponse::ChainHeight`]
ChainHeight,
/// A request for the total amount of generated coins.
/// Must return: [`DatabaseResponse::GeneratedCoins`]
GeneratedCoins,
/// A request for transaction outputs, this contains a map of amounts to amount indexes.
/// Must return: [`DatabaseResponse::Outputs`]
Outputs(HashMap<u64, HashSet<u64>>),
/// A request for the number of outputs with these amounts.
/// Must return: [`DatabaseResponse::NumberOutputsWithAmount`]
NumberOutputsWithAmount(Vec<u64>),
/// A request to check if these key images are in the database.
/// Must return: [`DatabaseResponse::KeyImagesSpent`]
KeyImagesSpent(HashSet<[u8; 32]>),
}
#[derive(Debug)]
pub enum DatabaseResponse {
/// A block extended header response.
BlockExtendedHeader(ExtendedBlockHeader),
/// A block hash response.
BlockHash([u8; 32]),
FilteredHashes(HashSet<[u8; 32]>),
/// A batch block extended header response.
BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
/// A chain height response.
/// Should contains the chains height and top block hash.
ChainHeight(u64, [u8; 32]),
/// Generated coins response.
/// Should contain the total amount of coins emitted in all block rewards.
GeneratedCoins(u64),
/// Outputs response.
/// Should contain a map of (amounts, amount_idx) -> Output.
/// If an outputs requested does not exist this should *not* be an error, the output
/// should just be omitted from the map.
Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
/// Number of outputs response.
/// Should contain a map of amounts -> numb outs.
/// If there are no outputs with that amount then the numb outs should be zero, *no* amounts
/// requested should be omitted.
NumberOutputsWithAmount(HashMap<u64, usize>),
/// Key images spent response.
/// returns true if key images are spent
KeyImagesSpent(bool),
}

View file

@ -15,7 +15,12 @@ use proptest::{
use proptest_derive::Arbitrary;
use tower::{BoxError, Service};
use crate::{DatabaseRequest, DatabaseResponse, ExtendedBlockHeader, HardFork};
use cuprate_types::{
blockchain::{BCReadRequest, BCResponse},
ExtendedBlockHeader,
};
use crate::HardFork;
prop_compose! {
/// Generates an arbitrary full [`DummyDatabase`], it is not safe to do consensus checks on the returned database
@ -56,8 +61,8 @@ pub struct DummyBlockExtendedHeader {
impl From<DummyBlockExtendedHeader> for ExtendedBlockHeader {
fn from(value: DummyBlockExtendedHeader) -> Self {
ExtendedBlockHeader {
version: value.version.unwrap_or(HardFork::V1),
vote: value.vote.unwrap_or(HardFork::V1),
version: value.version.unwrap_or(HardFork::V1) as u8,
vote: value.vote.unwrap_or(HardFork::V1) as u8,
timestamp: value.timestamp.unwrap_or_default(),
cumulative_difficulty: value.cumulative_difficulty.unwrap_or_default(),
block_weight: value.block_weight.unwrap_or_default(),
@ -122,8 +127,8 @@ pub struct DummyDatabase {
dummy_height: Option<usize>,
}
impl Service<DatabaseRequest> for DummyDatabase {
type Response = DatabaseResponse;
impl Service<BCReadRequest> for DummyDatabase {
type Response = BCResponse;
type Error = BoxError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
@ -132,13 +137,13 @@ impl Service<DatabaseRequest> for DummyDatabase {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
fn call(&mut self, req: BCReadRequest) -> Self::Future {
let blocks = self.blocks.clone();
let dummy_height = self.dummy_height;
async move {
Ok(match req {
DatabaseRequest::BlockExtendedHeader(id) => {
BCReadRequest::BlockExtendedHeader(id) => {
let mut id = usize::try_from(id).unwrap();
if let Some(dummy_height) = dummy_height {
let block_len = blocks.read().unwrap().len();
@ -146,7 +151,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
id -= dummy_height - block_len;
}
DatabaseResponse::BlockExtendedHeader(
BCResponse::BlockExtendedHeader(
blocks
.read()
.unwrap()
@ -156,12 +161,12 @@ impl Service<DatabaseRequest> for DummyDatabase {
.ok_or("block not in database!")?,
)
}
DatabaseRequest::BlockHash(id) => {
BCReadRequest::BlockHash(id) => {
let mut hash = [0; 32];
hash[0..8].copy_from_slice(&id.to_le_bytes());
DatabaseResponse::BlockHash(hash)
BCResponse::BlockHash(hash)
}
DatabaseRequest::BlockExtendedHeaderInRange(range) => {
BCReadRequest::BlockExtendedHeaderInRange(range) => {
let mut end = usize::try_from(range.end).unwrap();
let mut start = usize::try_from(range.start).unwrap();
@ -172,7 +177,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
start -= dummy_height - block_len;
}
DatabaseResponse::BlockExtendedHeaderInRange(
BCResponse::BlockExtendedHeaderInRange(
blocks
.read()
.unwrap()
@ -184,7 +189,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
.collect(),
)
}
DatabaseRequest::ChainHeight => {
BCReadRequest::ChainHeight => {
let height: u64 = dummy_height
.unwrap_or(blocks.read().unwrap().len())
.try_into()
@ -193,9 +198,9 @@ impl Service<DatabaseRequest> for DummyDatabase {
let mut top_hash = [0; 32];
top_hash[0..8].copy_from_slice(&height.to_le_bytes());
DatabaseResponse::ChainHeight(height, top_hash)
BCResponse::ChainHeight(height, top_hash)
}
DatabaseRequest::GeneratedCoins => DatabaseResponse::GeneratedCoins(0),
BCReadRequest::GeneratedCoins => BCResponse::GeneratedCoins(0),
_ => unimplemented!("the context svc should not need these requests!"),
})
}

View file

@ -28,11 +28,12 @@ use cuprate_consensus_rules::{
ConsensusError, HardFork, TxVersion,
};
use cuprate_helper::asynch::rayon_spawn_async;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{
batch_verifier::MultiThreadedBatchVerifier,
transactions::contextual_data::{batch_get_decoy_info, batch_get_ring_member_info},
Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError,
Database, ExtendedConsensusError,
};
pub mod contextual_data;
@ -129,8 +130,8 @@ pub enum VerifyTxRequest {
/// Verifies a batch of prepared txs.
Prepped {
/// The transactions to verify.
// TODO: Can we use references to remove the outer `Arc`? probably wont play nicely with rayon_spawn_async though
txs: Arc<[Arc<TransactionVerificationData>]>,
// TODO: Can we use references to remove the Vec? wont play nicely with Service though
txs: Vec<Arc<TransactionVerificationData>>,
/// The current chain height.
current_chain_height: u64,
/// The top block hash.
@ -159,7 +160,7 @@ pub enum VerifyTxRequest {
/// A response from a verify transaction request.
#[derive(Debug)]
pub enum VerifyTxResponse {
OkPrepped(Arc<[Arc<TransactionVerificationData>]>),
OkPrepped(Vec<Arc<TransactionVerificationData>>),
Ok,
}
@ -227,7 +228,7 @@ where
} => {
verify_prepped_transactions(
database,
txs,
&txs,
current_chain_height,
top_hash,
time_for_time_lock,
@ -259,13 +260,13 @@ where
let txs = rayon_spawn_async(|| {
txs.into_par_iter()
.map(|tx| TransactionVerificationData::new(tx).map(Arc::new))
.collect::<Result<Arc<_>, _>>()
.collect::<Result<Vec<_>, _>>()
})
.await?;
verify_prepped_transactions(
database,
txs.clone(),
&txs,
current_chain_height,
top_hash,
time_for_time_lock,
@ -279,7 +280,7 @@ where
#[instrument(name = "verify_txs", skip_all, fields(amt = txs.len()) level = "info")]
async fn verify_prepped_transactions<D>(
mut database: D,
txs: Arc<[Arc<TransactionVerificationData>]>,
txs: &[Arc<TransactionVerificationData>],
current_chain_height: u64,
top_hash: [u8; 32],
time_for_time_lock: u64,
@ -307,10 +308,10 @@ where
})
})?;
let DatabaseResponse::KeyImagesSpent(kis_spent) = database
let BCResponse::KeyImagesSpent(kis_spent) = database
.ready()
.await?
.call(DatabaseRequest::KeyImagesSpent(spent_kis))
.call(BCReadRequest::KeyImagesSpent(spent_kis))
.await?
else {
panic!("Database sent incorrect response!");
@ -339,12 +340,10 @@ where
if !verified_at_block_hashes.is_empty() {
tracing::trace!("Filtering block hashes not in the main chain.");
let DatabaseResponse::FilteredHashes(known_hashes) = database
let BCResponse::FilterUnknownHashes(known_hashes) = database
.ready()
.await?
.call(DatabaseRequest::FilterUnknownHashes(
verified_at_block_hashes,
))
.call(BCReadRequest::FilterUnknownHashes(verified_at_block_hashes))
.await?
else {
panic!("Database returned wrong response!");
@ -378,7 +377,7 @@ where
#[allow(clippy::type_complexity)] // I don't think the return is too complex
fn transactions_needing_verification(
txs: Arc<[Arc<TransactionVerificationData>]>,
txs: &[Arc<TransactionVerificationData>],
hashes_in_main_chain: HashSet<[u8; 32]>,
current_hf: &HardFork,
current_chain_height: u64,

View file

@ -15,21 +15,127 @@ use std::{
sync::Arc,
};
use monero_serai::transaction::Input;
use monero_serai::transaction::{Input, Timelock};
use tower::ServiceExt;
use tracing::instrument;
use cuprate_consensus_rules::{
transactions::{
get_ring_members_for_inputs, insert_ring_member_ids, DecoyInfo, TxRingMembersInfo,
get_absolute_offsets, insert_ring_member_ids, DecoyInfo, Rings, TransactionError,
TxRingMembersInfo,
},
ConsensusError, HardFork,
ConsensusError, HardFork, TxVersion,
};
use cuprate_types::{
blockchain::{BCReadRequest, BCResponse},
OutputOnChain,
};
use crate::{
transactions::TransactionVerificationData, Database, DatabaseRequest, DatabaseResponse,
ExtendedConsensusError,
};
use crate::{transactions::TransactionVerificationData, Database, ExtendedConsensusError};
/// Get the ring members for the inputs from the outputs on the chain.
///
/// Will error if `outputs` does not contain the outputs needed.
fn get_ring_members_for_inputs(
get_outputs: impl Fn(u64, u64) -> Option<OutputOnChain>,
inputs: &[Input],
) -> Result<Vec<Vec<OutputOnChain>>, TransactionError> {
inputs
.iter()
.map(|inp| match inp {
Input::ToKey {
amount,
key_offsets,
..
} => {
let offsets = get_absolute_offsets(key_offsets)?;
Ok(offsets
.iter()
.map(|offset| {
get_outputs(amount.unwrap_or(0), *offset)
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)
})
.collect::<Result<_, TransactionError>>()?)
}
_ => Err(TransactionError::IncorrectInputType),
})
.collect::<Result<_, TransactionError>>()
}
/// Construct a [`TxRingMembersInfo`] struct.
///
/// The used outs must be all the ring members used in the transactions inputs.
pub fn new_ring_member_info(
used_outs: Vec<Vec<OutputOnChain>>,
decoy_info: Option<DecoyInfo>,
tx_version: TxVersion,
) -> Result<TxRingMembersInfo, TransactionError> {
Ok(TxRingMembersInfo {
youngest_used_out_height: used_outs
.iter()
.map(|inp_outs| {
inp_outs
.iter()
// the output with the highest height is the youngest
.map(|out| out.height)
.max()
.expect("Input must have ring members")
})
.max()
.expect("Tx must have inputs"),
time_locked_outs: used_outs
.iter()
.flat_map(|inp_outs| {
inp_outs
.iter()
.filter_map(|out| match out.time_lock {
Timelock::None => None,
lock => Some(lock),
})
.collect::<Vec<_>>()
})
.collect(),
rings: new_rings(used_outs, tx_version)?,
decoy_info,
})
}
/// Builds the [`Rings`] for the transaction inputs, from the given outputs.
fn new_rings(
outputs: Vec<Vec<OutputOnChain>>,
tx_version: TxVersion,
) -> Result<Rings, TransactionError> {
Ok(match tx_version {
TxVersion::RingSignatures => Rings::Legacy(
outputs
.into_iter()
.map(|inp_outs| {
inp_outs
.into_iter()
.map(|out| out.key.ok_or(TransactionError::RingMemberNotFoundOrInvalid))
.collect::<Result<Vec<_>, TransactionError>>()
})
.collect::<Result<Vec<_>, TransactionError>>()?,
),
TxVersion::RingCT => Rings::RingCT(
outputs
.into_iter()
.map(|inp_outs| {
inp_outs
.into_iter()
.map(|out| {
Ok([
out.key
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)?,
out.commitment,
])
})
.collect::<Result<_, TransactionError>>()
})
.collect::<Result<_, _>>()?,
),
})
}
/// Retrieves the [`TxRingMembersInfo`] for the inputted [`TransactionVerificationData`].
///
@ -47,19 +153,19 @@ pub async fn batch_get_ring_member_info<D: Database>(
.map_err(ConsensusError::Transaction)?;
}
let DatabaseResponse::Outputs(outputs) = database
let BCResponse::Outputs(outputs) = database
.ready()
.await?
.call(DatabaseRequest::Outputs(output_ids))
.call(BCReadRequest::Outputs(output_ids))
.await?
else {
panic!("Database sent incorrect response!")
};
let DatabaseResponse::NumberOutputsWithAmount(outputs_with_amount) = database
let BCResponse::NumberOutputsWithAmount(outputs_with_amount) = database
.ready()
.await?
.call(DatabaseRequest::NumberOutputsWithAmount(
.call(BCReadRequest::NumberOutputsWithAmount(
outputs.keys().copied().collect(),
))
.await?
@ -87,7 +193,7 @@ pub async fn batch_get_ring_member_info<D: Database>(
None
};
TxRingMembersInfo::new(ring_members_for_tx, decoy_info, tx_v_data.version)
new_ring_member_info(ring_members_for_tx, decoy_info, tx_v_data.version)
.map_err(ConsensusError::Transaction)
})
.collect::<Result<_, _>>()?)
@ -128,10 +234,10 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>(
unique_input_amounts.len()
);
let DatabaseResponse::NumberOutputsWithAmount(outputs_with_amount) = database
let BCResponse::NumberOutputsWithAmount(outputs_with_amount) = database
.ready()
.await?
.call(DatabaseRequest::NumberOutputsWithAmount(
.call(BCReadRequest::NumberOutputsWithAmount(
unique_input_amounts.into_iter().collect(),
))
.await?

View file

@ -9,23 +9,26 @@ use monero_serai::transaction::{Timelock, Transaction};
use tower::{service_fn, Service, ServiceExt};
use cuprate_consensus::{
Database, DatabaseRequest, DatabaseResponse, TxVerifierService, VerifyTxRequest,
VerifyTxResponse,
TxVerifierService, VerifyTxRequest, VerifyTxResponse, __private::Database,
};
use cuprate_types::{
blockchain::{BCReadRequest, BCResponse},
OutputOnChain,
};
use cuprate_consensus_rules::{transactions::OutputOnChain, HardFork};
use cuprate_consensus_rules::HardFork;
use cuprate_test_utils::data::TX_E2D393;
fn dummy_database(outputs: BTreeMap<u64, OutputOnChain>) -> impl Database + Clone {
let outputs = Arc::new(outputs);
service_fn(move |req: DatabaseRequest| {
service_fn(move |req: BCReadRequest| {
ready(Ok(match req {
DatabaseRequest::NumberOutputsWithAmount(_) => {
DatabaseResponse::NumberOutputsWithAmount(HashMap::new())
BCReadRequest::NumberOutputsWithAmount(_) => {
BCResponse::NumberOutputsWithAmount(HashMap::new())
}
DatabaseRequest::Outputs(outs) => {
BCReadRequest::Outputs(outs) => {
let idxs = outs.get(&0).unwrap();
let mut ret = HashMap::new();
@ -37,9 +40,9 @@ fn dummy_database(outputs: BTreeMap<u64, OutputOnChain>) -> impl Database + Clon
.collect::<HashMap<_, _>>(),
);
DatabaseResponse::Outputs(ret)
BCResponse::Outputs(ret)
}
DatabaseRequest::KeyImagesSpent(_) => DatabaseResponse::KeyImagesSpent(false),
BCReadRequest::KeyImagesSpent(_) => BCResponse::KeyImagesSpent(false),
_ => panic!("Database request not needed for this test"),
}))
})

View file

@ -26,7 +26,7 @@ cfg-if = { workspace = true }
# We only need the `thread` feature if `service` is enabled.
# Figure out how to enable features of an already pulled in dependency conditionally.
cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] }
cuprate-types = { path = "../../types", features = ["service"] }
cuprate-types = { path = "../../types", features = ["blockchain"] }
curve25519-dalek = { workspace = true }
monero-pruning = { path = "../../pruning" }
monero-serai = { workspace = true, features = ["std"] }

View file

@ -50,11 +50,11 @@
//! This channel can be `.await`ed upon to (eventually) receive
//! the corresponding `Response` to your `Request`.
//!
//! [req_r]: cuprate_types::service::ReadRequest
//! [req_r]: cuprate_types::blockchain::BCReadRequest
//!
//! [req_w]: cuprate_types::service::WriteRequest
//! [req_w]: cuprate_types::blockchain::BCWriteRequest
//!
//! [resp]: cuprate_types::service::Response
//! [resp]: cuprate_types::blockchain::BCResponse
//!
//! # Example
//! Simple usage of `service`.
@ -63,7 +63,7 @@
//! use hex_literal::hex;
//! use tower::{Service, ServiceExt};
//!
//! use cuprate_types::service::{ReadRequest, WriteRequest, Response};
//! use cuprate_types::blockchain::{BCReadRequest, BCWriteRequest, BCResponse};
//! use cuprate_test_utils::data::block_v16_tx0;
//!
//! use cuprate_blockchain::{ConcreteEnv, config::ConfigBuilder, Env};
@ -82,7 +82,7 @@
//! // Prepare a request to write block.
//! let mut block = block_v16_tx0().clone();
//! # block.height = 0 as u64; // must be 0th height or panic in `add_block()`
//! let request = WriteRequest::WriteBlock(block);
//! let request = BCWriteRequest::WriteBlock(block);
//!
//! // Send the request.
//! // We receive back an `async` channel that will
@ -92,16 +92,16 @@
//!
//! // Block write was OK.
//! let response = response_channel.await?;
//! assert_eq!(response, Response::WriteBlockOk);
//! assert_eq!(response, BCResponse::WriteBlockOk);
//!
//! // Now, let's try getting the block hash
//! // of the block we just wrote.
//! let request = ReadRequest::BlockHash(0);
//! let request = BCReadRequest::BlockHash(0);
//! let response_channel = read_handle.ready().await?.call(request);
//! let response = response_channel.await?;
//! assert_eq!(
//! response,
//! Response::BlockHash(
//! BCResponse::BlockHash(
//! hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428")
//! )
//! );

View file

@ -15,13 +15,14 @@ use tokio_util::sync::PollSemaphore;
use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::{
service::{ReadRequest, Response},
blockchain::{BCReadRequest, BCResponse},
ExtendedBlockHeader, OutputOnChain,
};
use crate::{
config::ReaderThreads,
error::RuntimeError,
ops::block::block_exists,
ops::{
block::{get_block_extended_header_from_height, get_block_info},
blockchain::{cumulative_generated_coins, top_block_height},
@ -30,6 +31,7 @@ use crate::{
},
service::types::{ResponseReceiver, ResponseResult, ResponseSender},
tables::{BlockHeights, BlockInfos, Tables},
types::BlockHash,
types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId},
ConcreteEnv, DatabaseRo, Env, EnvInner,
};
@ -40,9 +42,9 @@ use crate::{
/// This is cheaply [`Clone`]able handle that
/// allows `async`hronously reading from the database.
///
/// Calling [`tower::Service::call`] with a [`DatabaseReadHandle`] & [`ReadRequest`]
/// Calling [`tower::Service::call`] with a [`DatabaseReadHandle`] & [`BCReadRequest`]
/// will return an `async`hronous channel that can be `.await`ed upon
/// to receive the corresponding [`Response`].
/// to receive the corresponding [`BCResponse`].
pub struct DatabaseReadHandle {
/// Handle to the custom `rayon` DB reader thread-pool.
///
@ -131,8 +133,8 @@ impl DatabaseReadHandle {
}
}
impl tower::Service<ReadRequest> for DatabaseReadHandle {
type Response = Response;
impl tower::Service<BCReadRequest> for DatabaseReadHandle {
type Response = BCResponse;
type Error = RuntimeError;
type Future = ResponseReceiver;
@ -152,7 +154,7 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
}
#[inline]
fn call(&mut self, request: ReadRequest) -> Self::Future {
fn call(&mut self, request: BCReadRequest) -> Self::Future {
let permit = self
.permit
.take()
@ -189,25 +191,26 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
/// The basic structure is:
/// 1. `Request` is mapped to a handler function
/// 2. Handler function is called
/// 3. [`Response`] is sent
/// 3. [`BCResponse`] is sent
fn map_request(
env: &ConcreteEnv, // Access to the database
request: ReadRequest, // The request we must fulfill
request: BCReadRequest, // The request we must fulfill
response_sender: ResponseSender, // The channel we must send the response back to
) {
use ReadRequest as R;
use BCReadRequest as R;
/* SOMEDAY: pre-request handling, run some code for each request? */
let response = match request {
R::BlockExtendedHeader(block) => block_extended_header(env, block),
R::BlockHash(block) => block_hash(env, block),
R::FilterUnknownHashes(hashes) => filter_unknown_hahses(env, hashes),
R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range),
R::ChainHeight => chain_height(env),
R::GeneratedCoins => generated_coins(env),
R::Outputs(map) => outputs(env, map),
R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec),
R::CheckKIsNotSpent(set) => check_k_is_not_spent(env, set),
R::KeyImagesSpent(set) => key_images_spent(env, set),
};
if let Err(e) = response_sender.send(response) {
@ -286,7 +289,10 @@ macro_rules! get_tables {
// FIXME: implement multi-transaction read atomicity.
// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576874589>.
/// [`ReadRequest::BlockExtendedHeader`].
// TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal
// amount of parallelism.
/// [`BCReadRequest::BlockExtendedHeader`].
#[inline]
fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
@ -294,12 +300,12 @@ fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> Respon
let tx_ro = env_inner.tx_ro()?;
let tables = env_inner.open_tables(&tx_ro)?;
Ok(Response::BlockExtendedHeader(
Ok(BCResponse::BlockExtendedHeader(
get_block_extended_header_from_height(&block_height, &tables)?,
))
}
/// [`ReadRequest::BlockHash`].
/// [`BCReadRequest::BlockHash`].
#[inline]
fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
@ -307,12 +313,40 @@ fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
let tx_ro = env_inner.tx_ro()?;
let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
Ok(Response::BlockHash(
Ok(BCResponse::BlockHash(
get_block_info(&block_height, &table_block_infos)?.block_hash,
))
}
/// [`ReadRequest::BlockExtendedHeaderInRange`].
/// [`BCReadRequest::FilterUnknownHashes`].
#[inline]
fn filter_unknown_hahses(env: &ConcreteEnv, mut hashes: HashSet<BlockHash>) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
let mut err = None;
hashes.retain(
|block_hash| match block_exists(block_hash, &table_block_heights) {
Ok(exists) => exists,
Err(e) => {
err.get_or_insert(e);
false
}
},
);
if let Some(e) = err {
Err(e)
} else {
Ok(BCResponse::FilterUnknownHashes(hashes))
}
}
/// [`BCReadRequest::BlockExtendedHeaderInRange`].
#[inline]
fn block_extended_header_in_range(
env: &ConcreteEnv,
@ -333,10 +367,10 @@ fn block_extended_header_in_range(
})
.collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?;
Ok(Response::BlockExtendedHeaderInRange(vec))
Ok(BCResponse::BlockExtendedHeaderInRange(vec))
}
/// [`ReadRequest::ChainHeight`].
/// [`BCReadRequest::ChainHeight`].
#[inline]
fn chain_height(env: &ConcreteEnv) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
@ -349,10 +383,10 @@ fn chain_height(env: &ConcreteEnv) -> ResponseResult {
let block_hash =
get_block_info(&chain_height.saturating_sub(1), &table_block_infos)?.block_hash;
Ok(Response::ChainHeight(chain_height, block_hash))
Ok(BCResponse::ChainHeight(chain_height, block_hash))
}
/// [`ReadRequest::GeneratedCoins`].
/// [`BCReadRequest::GeneratedCoins`].
#[inline]
fn generated_coins(env: &ConcreteEnv) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
@ -363,13 +397,13 @@ fn generated_coins(env: &ConcreteEnv) -> ResponseResult {
let top_height = top_block_height(&table_block_heights)?;
Ok(Response::GeneratedCoins(cumulative_generated_coins(
Ok(BCResponse::GeneratedCoins(cumulative_generated_coins(
&top_height,
&table_block_infos,
)?))
}
/// [`ReadRequest::Outputs`].
/// [`BCReadRequest::Outputs`].
#[inline]
fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
@ -407,10 +441,10 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) ->
})
.collect::<Result<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>, RuntimeError>>()?;
Ok(Response::Outputs(map))
Ok(BCResponse::Outputs(map))
}
/// [`ReadRequest::NumberOutputsWithAmount`].
/// [`BCReadRequest::NumberOutputsWithAmount`].
#[inline]
fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
@ -452,12 +486,12 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> Respon
})
.collect::<Result<HashMap<Amount, usize>, RuntimeError>>()?;
Ok(Response::NumberOutputsWithAmount(map))
Ok(BCResponse::NumberOutputsWithAmount(map))
}
/// [`ReadRequest::CheckKIsNotSpent`].
/// [`BCReadRequest::KeyImagesSpent`].
#[inline]
fn check_k_is_not_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> ResponseResult {
fn key_images_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
let env_inner = env.env_inner();
let tx_ro = thread_local(env);
@ -486,8 +520,8 @@ fn check_k_is_not_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> Res
// Else, `Ok(false)` will continue the iterator.
.find_any(|result| !matches!(result, Ok(false)))
{
None | Some(Ok(false)) => Ok(Response::CheckKIsNotSpent(true)), // Key image was NOT found.
Some(Ok(true)) => Ok(Response::CheckKIsNotSpent(false)), // Key image was found.
None | Some(Ok(false)) => Ok(BCResponse::KeyImagesSpent(false)), // Key image was NOT found.
Some(Ok(true)) => Ok(BCResponse::KeyImagesSpent(true)), // Key image was found.
Some(Err(e)) => Err(e), // A database error occurred.
}
}

View file

@ -16,7 +16,7 @@ use tower::{Service, ServiceExt};
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
use cuprate_types::{
service::{ReadRequest, Response, WriteRequest},
blockchain::{BCReadRequest, BCResponse, BCWriteRequest},
OutputOnChain, VerifiedBlockInformation,
};
@ -81,10 +81,10 @@ async fn test_template(
block.height = i as u64;
// Request a block to be written, assert it was written.
let request = WriteRequest::WriteBlock(block);
let request = BCWriteRequest::WriteBlock(block);
let response_channel = writer.call(request);
let response = response_channel.await.unwrap();
assert_eq!(response, Response::WriteBlockOk);
assert_eq!(response, BCResponse::WriteBlockOk);
}
//----------------------------------------------------------------------- Reset the transaction
@ -100,36 +100,36 @@ async fn test_template(
// Next few lines are just for preparing the expected responses,
// see further below for usage.
let extended_block_header_0 = Ok(Response::BlockExtendedHeader(
let extended_block_header_0 = Ok(BCResponse::BlockExtendedHeader(
get_block_extended_header_from_height(&0, &tables).unwrap(),
));
let extended_block_header_1 = if block_fns.len() > 1 {
Ok(Response::BlockExtendedHeader(
Ok(BCResponse::BlockExtendedHeader(
get_block_extended_header_from_height(&1, &tables).unwrap(),
))
} else {
Err(RuntimeError::KeyNotFound)
};
let block_hash_0 = Ok(Response::BlockHash(
let block_hash_0 = Ok(BCResponse::BlockHash(
get_block_info(&0, tables.block_infos()).unwrap().block_hash,
));
let block_hash_1 = if block_fns.len() > 1 {
Ok(Response::BlockHash(
Ok(BCResponse::BlockHash(
get_block_info(&1, tables.block_infos()).unwrap().block_hash,
))
} else {
Err(RuntimeError::KeyNotFound)
};
let range_0_1 = Ok(Response::BlockExtendedHeaderInRange(vec![
let range_0_1 = Ok(BCResponse::BlockExtendedHeaderInRange(vec![
get_block_extended_header_from_height(&0, &tables).unwrap(),
]));
let range_0_2 = if block_fns.len() >= 2 {
Ok(Response::BlockExtendedHeaderInRange(vec![
Ok(BCResponse::BlockExtendedHeaderInRange(vec![
get_block_extended_header_from_height(&0, &tables).unwrap(),
get_block_extended_header_from_height(&1, &tables).unwrap(),
]))
@ -140,10 +140,10 @@ async fn test_template(
let chain_height = {
let height = chain_height(tables.block_heights()).unwrap();
let block_info = get_block_info(&height.saturating_sub(1), tables.block_infos()).unwrap();
Ok(Response::ChainHeight(height, block_info.block_hash))
Ok(BCResponse::ChainHeight(height, block_info.block_hash))
};
let cumulative_generated_coins = Ok(Response::GeneratedCoins(cumulative_generated_coins));
let cumulative_generated_coins = Ok(BCResponse::GeneratedCoins(cumulative_generated_coins));
let num_req = tables
.outputs_iter()
@ -153,7 +153,7 @@ async fn test_template(
.map(|key| key.amount)
.collect::<Vec<Amount>>();
let num_resp = Ok(Response::NumberOutputsWithAmount(
let num_resp = Ok(BCResponse::NumberOutputsWithAmount(
num_req
.iter()
.map(|amount| match tables.num_outputs().get(amount) {
@ -168,21 +168,27 @@ async fn test_template(
// Contains a fake non-spent key-image.
let ki_req = HashSet::from([[0; 32]]);
let ki_resp = Ok(Response::CheckKIsNotSpent(true));
let ki_resp = Ok(BCResponse::KeyImagesSpent(false));
//----------------------------------------------------------------------- Assert expected response
// Assert read requests lead to the expected responses.
for (request, expected_response) in [
(ReadRequest::BlockExtendedHeader(0), extended_block_header_0),
(ReadRequest::BlockExtendedHeader(1), extended_block_header_1),
(ReadRequest::BlockHash(0), block_hash_0),
(ReadRequest::BlockHash(1), block_hash_1),
(ReadRequest::BlockExtendedHeaderInRange(0..1), range_0_1),
(ReadRequest::BlockExtendedHeaderInRange(0..2), range_0_2),
(ReadRequest::ChainHeight, chain_height),
(ReadRequest::GeneratedCoins, cumulative_generated_coins),
(ReadRequest::NumberOutputsWithAmount(num_req), num_resp),
(ReadRequest::CheckKIsNotSpent(ki_req), ki_resp),
(
BCReadRequest::BlockExtendedHeader(0),
extended_block_header_0,
),
(
BCReadRequest::BlockExtendedHeader(1),
extended_block_header_1,
),
(BCReadRequest::BlockHash(0), block_hash_0),
(BCReadRequest::BlockHash(1), block_hash_1),
(BCReadRequest::BlockExtendedHeaderInRange(0..1), range_0_1),
(BCReadRequest::BlockExtendedHeaderInRange(0..2), range_0_2),
(BCReadRequest::ChainHeight, chain_height),
(BCReadRequest::GeneratedCoins, cumulative_generated_coins),
(BCReadRequest::NumberOutputsWithAmount(num_req), num_resp),
(BCReadRequest::KeyImagesSpent(ki_req), ki_resp),
] {
let response = reader.clone().oneshot(request).await;
println!("response: {response:#?}, expected_response: {expected_response:#?}");
@ -196,10 +202,10 @@ async fn test_template(
// Assert each key image we inserted comes back as "spent".
for key_image in tables.key_images_iter().keys().unwrap() {
let key_image = key_image.unwrap();
let request = ReadRequest::CheckKIsNotSpent(HashSet::from([key_image]));
let request = BCReadRequest::KeyImagesSpent(HashSet::from([key_image]));
let response = reader.clone().oneshot(request).await;
println!("response: {response:#?}, key_image: {key_image:#?}");
assert_eq!(response.unwrap(), Response::CheckKIsNotSpent(false));
assert_eq!(response.unwrap(), BCResponse::KeyImagesSpent(true));
}
//----------------------------------------------------------------------- Output checks
@ -260,10 +266,10 @@ async fn test_template(
.collect::<Vec<OutputOnChain>>();
// Send a request for every output we inserted before.
let request = ReadRequest::Outputs(map.clone());
let request = BCReadRequest::Outputs(map.clone());
let response = reader.clone().oneshot(request).await;
println!("Response::Outputs response: {response:#?}");
let Ok(Response::Outputs(response)) = response else {
let Ok(BCResponse::Outputs(response)) = response else {
panic!("{response:#?}")
};

View file

@ -6,15 +6,15 @@
use futures::channel::oneshot::Sender;
use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::service::Response;
use cuprate_types::blockchain::BCResponse;
use crate::error::RuntimeError;
//---------------------------------------------------------------------------------------------------- Types
/// The actual type of the response.
///
/// Either our [`Response`], or a database error occurred.
pub(super) type ResponseResult = Result<Response, RuntimeError>;
/// Either our [`BCResponse`], or a database error occurred.
pub(super) type ResponseResult = Result<BCResponse, RuntimeError>;
/// The `Receiver` channel that receives the read response.
///

View file

@ -10,7 +10,7 @@ use futures::channel::oneshot;
use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::{
service::{Response, WriteRequest},
blockchain::{BCResponse, BCWriteRequest},
VerifiedBlockInformation,
};
@ -33,15 +33,15 @@ const WRITER_THREAD_NAME: &str = concat!(module_path!(), "::DatabaseWriter");
/// it is not [`Clone`]able as there is only ever 1 place within Cuprate
/// that writes.
///
/// Calling [`tower::Service::call`] with a [`DatabaseWriteHandle`] & [`WriteRequest`]
/// Calling [`tower::Service::call`] with a [`DatabaseWriteHandle`] & [`BCWriteRequest`]
/// will return an `async`hronous channel that can be `.await`ed upon
/// to receive the corresponding [`Response`].
/// to receive the corresponding [`BCResponse`].
#[derive(Debug)]
pub struct DatabaseWriteHandle {
/// Sender channel to the database write thread-pool.
///
/// We provide the response channel for the thread-pool.
pub(super) sender: crossbeam::channel::Sender<(WriteRequest, ResponseSender)>,
pub(super) sender: crossbeam::channel::Sender<(BCWriteRequest, ResponseSender)>,
}
impl DatabaseWriteHandle {
@ -65,8 +65,8 @@ impl DatabaseWriteHandle {
}
}
impl tower::Service<WriteRequest> for DatabaseWriteHandle {
type Response = Response;
impl tower::Service<BCWriteRequest> for DatabaseWriteHandle {
type Response = BCResponse;
type Error = RuntimeError;
type Future = ResponseReceiver;
@ -76,7 +76,7 @@ impl tower::Service<WriteRequest> for DatabaseWriteHandle {
}
#[inline]
fn call(&mut self, request: WriteRequest) -> Self::Future {
fn call(&mut self, request: BCWriteRequest) -> Self::Future {
// Response channel we `.await` on.
let (response_sender, receiver) = oneshot::channel();
@ -95,7 +95,7 @@ pub(super) struct DatabaseWriter {
/// Any caller can send some requests to this channel.
/// They send them alongside another `Response` channel,
/// which we will eventually send to.
receiver: crossbeam::channel::Receiver<(WriteRequest, ResponseSender)>,
receiver: crossbeam::channel::Receiver<(BCWriteRequest, ResponseSender)>,
/// Access to the database.
env: Arc<ConcreteEnv>,
@ -153,7 +153,7 @@ impl DatabaseWriter {
// FIXME: will there be more than 1 write request?
// this won't have to be an enum.
let response = match &request {
WriteRequest::WriteBlock(block) => write_block(&self.env, block),
BCWriteRequest::WriteBlock(block) => write_block(&self.env, block),
};
// If the database needs to resize, do so.
@ -218,7 +218,7 @@ impl DatabaseWriter {
// Each function will return the [`Response`] that we
// should send back to the caller in [`map_request()`].
/// [`WriteRequest::WriteBlock`].
/// [`BCWriteRequest::WriteBlock`].
#[inline]
fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult {
let env_inner = env.env_inner();
@ -232,7 +232,7 @@ fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseR
match result {
Ok(()) => {
TxRw::commit(tx_rw)?;
Ok(Response::WriteBlockOk)
Ok(BCResponse::WriteBlockOk)
}
Err(e) => {
// INVARIANT: ensure database atomicity by aborting

View file

@ -6,13 +6,13 @@
)]
//---------------------------------------------------------------------------------------------------- Import
use std::sync::{Arc, OnceLock};
use std::sync::OnceLock;
use hex_literal::hex;
use monero_serai::{block::Block, transaction::Transaction};
use cuprate_helper::map::combine_low_high_bits_to_u128;
use cuprate_types::{TransactionVerificationData, VerifiedBlockInformation};
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
use crate::data::constants::{
BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73,
@ -20,14 +20,14 @@ use crate::data::constants::{
};
//---------------------------------------------------------------------------------------------------- Conversion
/// Converts `monero_serai`'s `Block` into a
/// `cuprate_types::VerifiedBlockInformation` (superset).
/// Converts [`monero_serai::Block`] into a
/// [`VerifiedBlockInformation`] (superset).
///
/// To prevent pulling other code in order to actually calculate things
/// (e.g. `pow_hash`), some information must be provided statically,
/// this struct represents that data that must be provided.
///
/// Consider using `cuprate_test_utils::rpc` to get this data easily.
/// Consider using [`cuprate_test_utils::rpc`] to get this data easily.
struct VerifiedBlockMap {
block_blob: &'static [u8],
pow_hash: [u8; 32],
@ -43,7 +43,7 @@ struct VerifiedBlockMap {
}
impl VerifiedBlockMap {
/// Turn the various static data bits in `self` into a `VerifiedBlockInformation`.
/// Turn the various static data bits in `self` into a [`VerifiedBlockInformation`].
///
/// Transactions are verified that they at least match the block's,
/// although the correctness of data (whether this block actually existed or not)
@ -64,11 +64,7 @@ impl VerifiedBlockMap {
let block_blob = block_blob.to_vec();
let block = Block::read(&mut block_blob.as_slice()).unwrap();
let txs: Vec<Arc<TransactionVerificationData>> = txs
.iter()
.map(to_tx_verification_data)
.map(Arc::new)
.collect();
let txs = txs.iter().map(to_tx_verification_data).collect::<Vec<_>>();
assert_eq!(
txs.len(),
@ -101,11 +97,11 @@ impl VerifiedBlockMap {
}
}
// Same as [`VerifiedBlockMap`] but for [`TransactionVerificationData`].
fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> TransactionVerificationData {
// Same as [`VerifiedBlockMap`] but for [`VerifiedTransactionInformation`].
fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> VerifiedTransactionInformation {
let tx_blob = tx_blob.as_ref().to_vec();
let tx = Transaction::read(&mut tx_blob.as_slice()).unwrap();
TransactionVerificationData {
VerifiedTransactionInformation {
tx_weight: tx.weight(),
fee: tx.rct_signatures.base.fee,
tx_hash: tx.hash(),
@ -239,7 +235,7 @@ verified_block_information_fn! {
//---------------------------------------------------------------------------------------------------- Transactions
/// Generate a transaction accessor function with this signature:
/// `fn() -> &'static TransactionVerificationData`
/// `fn() -> &'static VerifiedTransactionInformation`
///
/// Same as [`verified_block_information_fn`] but for transactions.
macro_rules! transaction_verification_data_fn {
@ -249,7 +245,7 @@ macro_rules! transaction_verification_data_fn {
weight: $weight:literal, // Transaction weight
hash: $hash:literal, // Transaction hash as a string literal
) => {
#[doc = concat!("Return [`", stringify!($tx_blob), "`] as a [`TransactionVerificationData`].")]
#[doc = concat!("Return [`", stringify!($tx_blob), "`] as a [`VerifiedTransactionInformation`].")]
///
/// ```rust
#[doc = "# use cuprate_test_utils::data::*;"]
@ -261,8 +257,8 @@ macro_rules! transaction_verification_data_fn {
#[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")]
#[doc = "assert_eq!(tx.fee, tx.tx.rct_signatures.base.fee);"]
/// ```
pub fn $fn_name() -> &'static TransactionVerificationData {
static TX: OnceLock<TransactionVerificationData> = OnceLock::new();
pub fn $fn_name() -> &'static VerifiedTransactionInformation {
static TX: OnceLock<VerifiedTransactionInformation> = OnceLock::new();
TX.get_or_init(|| to_tx_verification_data($tx_blob))
}
};
@ -319,8 +315,8 @@ mod tests {
let mut txs = [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()]
.into_iter()
.flat_map(|block| block.txs.iter().map(|arc| (**arc).clone()))
.collect::<Vec<TransactionVerificationData>>();
.flat_map(|block| block.txs.iter().cloned())
.collect::<Vec<VerifiedTransactionInformation>>();
txs.extend([
tx_v1_sig0().clone(),
@ -333,7 +329,7 @@ mod tests {
let tx_rpc = rpc
.get_transaction_verification_data(&[tx.tx_hash])
.await
.collect::<Vec<TransactionVerificationData>>()
.collect::<Vec<VerifiedTransactionInformation>>()
.pop()
.unwrap();
assert_eq!(tx, tx_rpc);

View file

@ -19,10 +19,10 @@
//! The free functions provide access to typed data found in `cuprate_types`:
//! ```rust
//! # use cuprate_test_utils::data::*;
//! use cuprate_types::{VerifiedBlockInformation, TransactionVerificationData};
//! use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
//!
//! let block: VerifiedBlockInformation = block_v16_tx0().clone();
//! let tx: TransactionVerificationData = tx_v1_sig0().clone();
//! let tx: VerifiedTransactionInformation = tx_v1_sig0().clone();
//! ```
mod constants;

View file

@ -1,8 +1,6 @@
//! HTTP RPC client.
//---------------------------------------------------------------------------------------------------- Use
use std::sync::Arc;
use serde::Deserialize;
use serde_json::json;
use tokio::task::spawn_blocking;
@ -12,7 +10,7 @@ use monero_serai::{
rpc::{HttpRpc, Rpc},
};
use cuprate_types::{TransactionVerificationData, VerifiedBlockInformation};
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
use crate::rpc::constants::LOCALHOST_RPC_URL;
@ -110,10 +108,9 @@ impl HttpRpcClient {
.await
.unwrap();
let txs: Vec<Arc<TransactionVerificationData>> = self
let txs: Vec<VerifiedTransactionInformation> = self
.get_transaction_verification_data(&block.txs)
.await
.map(Arc::new)
.collect();
let block_header = result.block_header;
@ -152,7 +149,7 @@ impl HttpRpcClient {
}
}
/// Request data and map the response to a [`TransactionVerificationData`].
/// Request data and map the response to a [`VerifiedTransactionInformation`].
///
/// # Panics
/// This function will panic at any error point, e.g.,
@ -160,7 +157,7 @@ impl HttpRpcClient {
pub async fn get_transaction_verification_data<'a>(
&self,
tx_hashes: &'a [[u8; 32]],
) -> impl Iterator<Item = TransactionVerificationData> + 'a {
) -> impl Iterator<Item = VerifiedTransactionInformation> + 'a {
self.rpc
.get_transactions(tx_hashes)
.await
@ -170,7 +167,7 @@ impl HttpRpcClient {
.map(|(i, tx)| {
let tx_hash = tx.hash();
assert_eq!(tx_hash, tx_hashes[i]);
TransactionVerificationData {
VerifiedTransactionInformation {
tx_blob: tx.serialize(),
tx_weight: tx.weight(),
tx_hash,

View file

@ -9,14 +9,11 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/types"
keywords = ["cuprate", "types"]
[features]
default = ["service"]
service = []
default = ["blockchain"]
blockchain = []
[dependencies]
borsh = { workspace = true, optional = true }
cfg-if = { workspace = true }
curve25519-dalek = { workspace = true }
monero-serai = { workspace = true }
serde = { workspace = true, optional = true }
[dev-dependencies]

View file

@ -1,10 +1,7 @@
//! Database [`ReadRequest`]s, [`WriteRequest`]s, and [`Response`]s.
//!
//! See [`cuprate_database`](https://github.com/Cuprate/cuprate/blob/00c3692eac6b2669e74cfd8c9b41c7e704c779ad/database/src/service/mod.rs#L1-L59)'s
//! `service` module for more usage/documentation.
//! Database [`BCReadRequest`]s, [`BCWriteRequest`]s, and [`BCResponse`]s.
//!
//! Tests that assert particular requests lead to particular
//! responses are also tested in `cuprate_database`.
//! responses are also tested in Cuprate's blockchain database crate.
//---------------------------------------------------------------------------------------------------- Import
use std::{
@ -20,18 +17,16 @@ use serde::{Deserialize, Serialize};
use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation};
//---------------------------------------------------------------------------------------------------- ReadRequest
/// A read request to the database.
/// A read request to the blockchain database.
///
/// This pairs with [`Response`], where each variant here
/// matches in name with a `Response` variant. For example,
/// the proper response for a [`ReadRequest::BlockHash`]
/// would be a [`Response::BlockHash`].
/// This pairs with [`BCResponse`], where each variant here
/// matches in name with a [`BCResponse`] variant. For example,
/// the proper response for a [`BCReadRequest::BlockHash`]
/// would be a [`BCResponse::BlockHash`].
///
/// See `Response` for the expected responses per `Request`.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub enum ReadRequest {
pub enum BCReadRequest {
/// Request a block's extended header.
///
/// The input is the block's height.
@ -42,6 +37,11 @@ pub enum ReadRequest {
/// The input is the block's height.
BlockHash(u64),
/// Removes the block hashes that are not in the _main_ chain.
///
/// This should filter (remove) hashes in alt-blocks as well.
FilterUnknownHashes(HashSet<[u8; 32]>),
/// Request a range of block extended headers.
///
/// The input is a range of block heights.
@ -86,18 +86,17 @@ pub enum ReadRequest {
/// Check that all key images within a set arer not spent.
///
/// Input is a set of key images.
CheckKIsNotSpent(HashSet<[u8; 32]>),
KeyImagesSpent(HashSet<[u8; 32]>),
}
//---------------------------------------------------------------------------------------------------- WriteRequest
/// A write request to the database.
/// A write request to the blockchain database.
///
/// There is currently only 1 write request to the database,
/// as such, the only valid [`Response`] to this request is
/// the proper response for a [`Response::WriteBlockOk`].
/// as such, the only valid [`BCResponse`] to this request is
/// the proper response for a [`BCResponse::WriteBlockOk`].
#[derive(Debug, Clone, PartialEq, Eq)]
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub enum WriteRequest {
pub enum BCWriteRequest {
/// Request that a block be written to the database.
///
/// Input is an already verified block.
@ -109,60 +108,64 @@ pub enum WriteRequest {
///
/// These are the data types returned when using sending a `Request`.
///
/// This pairs with [`ReadRequest`] and [`WriteRequest`],
/// This pairs with [`BCReadRequest`] and [`BCWriteRequest`],
/// see those two for more info.
#[derive(Debug, Clone, PartialEq, Eq)]
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub enum Response {
pub enum BCResponse {
//------------------------------------------------------ Reads
/// Response to [`ReadRequest::BlockExtendedHeader`].
/// Response to [`BCReadRequest::BlockExtendedHeader`].
///
/// Inner value is the extended headed of the requested block.
BlockExtendedHeader(ExtendedBlockHeader),
/// Response to [`ReadRequest::BlockHash`].
/// Response to [`BCReadRequest::BlockHash`].
///
/// Inner value is the hash of the requested block.
BlockHash([u8; 32]),
/// Response to [`ReadRequest::BlockExtendedHeaderInRange`].
/// Response to [`BCReadRequest::FilterUnknownHashes`].
///
/// Inner value is the list of hashes that were in the main chain.
FilterUnknownHashes(HashSet<[u8; 32]>),
/// Response to [`BCReadRequest::BlockExtendedHeaderInRange`].
///
/// Inner value is the list of extended header(s) of the requested block(s).
BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
/// Response to [`ReadRequest::ChainHeight`].
/// Response to [`BCReadRequest::ChainHeight`].
///
/// Inner value is the chain height, and the top block's hash.
ChainHeight(u64, [u8; 32]),
/// Response to [`ReadRequest::GeneratedCoins`].
/// Response to [`BCReadRequest::GeneratedCoins`].
///
/// Inner value is the total amount of generated coins so far, in atomic units.
GeneratedCoins(u64),
/// Response to [`ReadRequest::Outputs`].
/// Response to [`BCReadRequest::Outputs`].
///
/// Inner value is all the outputs requested,
/// associated with their amount and amount index.
Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
/// Response to [`ReadRequest::NumberOutputsWithAmount`].
/// Response to [`BCReadRequest::NumberOutputsWithAmount`].
///
/// Inner value is a `HashMap` of all the outputs requested where:
/// - Key = output amount
/// - Value = count of outputs with the same amount
NumberOutputsWithAmount(HashMap<u64, usize>),
/// Response to [`ReadRequest::CheckKIsNotSpent`].
/// Response to [`BCReadRequest::KeyImagesSpent`].
///
/// The inner value is `true` if _any_ of the key images
/// were spent (exited in the database already).
/// were spent (existed in the database already).
///
/// The inner value is `false` if _none_ of the key images were spent.
CheckKIsNotSpent(bool),
KeyImagesSpent(bool),
//------------------------------------------------------ Writes
/// Response to [`WriteRequest::WriteBlock`].
/// Response to [`BCWriteRequest::WriteBlock`].
///
/// This response indicates that the requested block has
/// successfully been written to the database without error.

View file

@ -3,8 +3,8 @@
//! This crate is a kitchen-sink for data types that are shared across `Cuprate`.
//!
//! # Features flags
//! The `service` module, containing `cuprate_database` request/response
//! types, must be enabled with the `service` feature (on by default).
//! The [`blockchain`] module, containing the blockchain database request/response
//! types, must be enabled with the `blockchain` feature (on by default).
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
@ -88,14 +88,11 @@
mod types;
pub use types::{
ExtendedBlockHeader, OutputOnChain, TransactionVerificationData, VerifiedBlockInformation,
ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation, VerifiedTransactionInformation,
};
//---------------------------------------------------------------------------------------------------- Feature-gated
cfg_if::cfg_if! {
if #[cfg(feature = "service")] {
pub mod service;
}
}
#[cfg(feature = "blockchain")]
pub mod blockchain;
//---------------------------------------------------------------------------------------------------- Private

View file

@ -1,28 +1,17 @@
//! Various shared data types in Cuprate.
//---------------------------------------------------------------------------------------------------- Import
use std::sync::Arc;
use curve25519_dalek::edwards::EdwardsPoint;
use monero_serai::{
block::Block,
transaction::{Timelock, Transaction},
};
#[cfg(feature = "borsh")]
use borsh::{BorshDeserialize, BorshSerialize};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
//---------------------------------------------------------------------------------------------------- ExtendedBlockHeader
/// Extended header data of a block.
///
/// This contains various metadata of a block, but not the block blob itself.
///
/// For more definitions, see also: <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_last_block_header>.
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub struct ExtendedBlockHeader {
/// The block's major version.
///
@ -46,15 +35,12 @@ pub struct ExtendedBlockHeader {
pub long_term_weight: usize,
}
//---------------------------------------------------------------------------------------------------- TransactionVerificationData
/// Data needed to verify a transaction.
//---------------------------------------------------------------------------------------------------- VerifiedTransactionInformation
/// Verified information of a transaction.
///
/// This represents data that allows verification of a transaction,
/// although it doesn't mean it _has_ been verified.
/// This represents a transaction in a valid block.
#[derive(Clone, Debug, PartialEq, Eq)]
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub struct TransactionVerificationData {
pub struct VerifiedTransactionInformation {
/// The transaction itself.
pub tx: Transaction,
/// The serialized byte form of [`Self::tx`].
@ -77,11 +63,7 @@ pub struct TransactionVerificationData {
/// Verified information of a block.
///
/// This represents a block that has already been verified to be correct.
///
/// For more definitions, see also: <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_block>.
#[derive(Clone, Debug, PartialEq, Eq)]
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub struct VerifiedBlockInformation {
/// The block itself.
pub block: Block,
@ -90,7 +72,7 @@ pub struct VerifiedBlockInformation {
/// [`Block::serialize`].
pub block_blob: Vec<u8>,
/// All the transactions in the block, excluding the [`Block::miner_tx`].
pub txs: Vec<Arc<TransactionVerificationData>>,
pub txs: Vec<VerifiedTransactionInformation>,
/// The block's hash.
///
/// [`Block::hash`].
@ -111,9 +93,7 @@ pub struct VerifiedBlockInformation {
//---------------------------------------------------------------------------------------------------- OutputOnChain
/// An already existing transaction output.
#[derive(Clone, Debug, PartialEq, Eq)]
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct OutputOnChain {
/// The block height this output belongs to.
pub height: u64,