diff --git a/Cargo.lock b/Cargo.lock index d50402d0..fd0e6d47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -499,6 +499,7 @@ version = "0.1.0" dependencies = [ "chrono", "crossbeam", + "curve25519-dalek", "dirs", "futures", "libc", diff --git a/consensus/rules/src/decomposed_amount.rs b/consensus/rules/src/decomposed_amount.rs index 59348149..64ea3146 100644 --- a/consensus/rules/src/decomposed_amount.rs +++ b/consensus/rules/src/decomposed_amount.rs @@ -2,35 +2,29 @@ use std::sync::OnceLock; /// Decomposed amount table. /// -static DECOMPOSED_AMOUNTS: OnceLock<[u64; 172]> = OnceLock::new(); - #[rustfmt::skip] -pub fn decomposed_amounts() -> &'static [u64; 172] { - DECOMPOSED_AMOUNTS.get_or_init(|| { - [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 20, 30, 40, 50, 60, 70, 80, 90, - 100, 200, 300, 400, 500, 600, 700, 800, 900, - 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, - 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, - 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, - 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, - 10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000, - 100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000, - 1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000, - 10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000, - 100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000, - 1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000, - 10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000, - 100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000, - 1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000, - 10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000, - 100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000, - 1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000, - 10000000000000000000 - ] - }) -} +const DECOMPOSED_AMOUNT: [u64; 172] = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 20, 30, 40, 50, 60, 70, 80, 90, + 100, 200, 300, 400, 500, 600, 700, 800, 900, + 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, + 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, + 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, + 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, + 10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000, + 100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000, + 1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000, + 10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000, + 100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000, + 1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000, + 10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000, + 100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000, + 1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000, + 10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000, + 100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000, + 1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000, + 10000000000000000000 +]; /// Checks that an output amount is decomposed. /// @@ -40,7 +34,7 @@ pub fn decomposed_amounts() -> &'static [u64; 172] { /// ref: #[inline] pub fn is_decomposed_amount(amount: &u64) -> bool { - decomposed_amounts().binary_search(amount).is_ok() + DECOMPOSED_AMOUNT.binary_search(amount).is_ok() } #[cfg(test)] diff --git a/consensus/src/block.rs b/consensus/src/block.rs index 99b21c99..f2bfc0dd 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -9,22 +9,71 @@ use std::{ use cuprate_helper::asynch::rayon_spawn_async; use futures::FutureExt; -use monero_serai::{block::Block, transaction::Input}; +use monero_serai::{ + block::Block, + transaction::{Input, Transaction}, +}; +use rayon::prelude::*; use tower::{Service, ServiceExt}; +use cuprate_consensus_rules::blocks::randomx_seed_height; use cuprate_consensus_rules::{ - blocks::{calculate_pow_hash, check_block, check_block_pow, BlockError, RandomX}, + blocks::{ + calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height, BlockError, + RandomX, + }, miner_tx::MinerTxError, ConsensusError, HardFork, }; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; +use crate::context::rx_vms::RandomXVM; +use crate::context::RawBlockChainContext; use crate::{ context::{BlockChainContextRequest, BlockChainContextResponse}, transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse}, Database, ExtendedConsensusError, }; +#[derive(Debug)] +pub struct PrePreparedBlockExPOW { + pub block: Block, + pub block_blob: Vec, + + pub hf_vote: HardFork, + pub hf_version: HardFork, + + pub block_hash: [u8; 32], + pub height: u64, + + pub miner_tx_weight: usize, +} + +impl PrePreparedBlockExPOW { + pub fn new(block: Block) -> Result { + let (hf_version, hf_vote) = + HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; + + let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else { + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputNotOfTypeGen, + )))? + }; + + Ok(PrePreparedBlockExPOW { + block_blob: block.serialize(), + hf_vote, + hf_version, + + block_hash: block.hash(), + height: *height, + + miner_tx_weight: block.miner_tx.weight(), + block, + }) + } +} + /// A pre-prepared block with all data needed to verify it. #[derive(Debug)] pub struct PrePreparedBlock { @@ -82,6 +131,34 @@ impl PrePreparedBlock { block, }) } + + fn new_prepped( + block: PrePreparedBlockExPOW, + randomx_vm: Option<&R>, + ) -> Result { + let Some(Input::Gen(height)) = block.block.miner_tx.prefix.inputs.first() else { + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputNotOfTypeGen, + )))? + }; + + Ok(PrePreparedBlock { + block_blob: block.block_blob, + hf_vote: block.hf_vote, + hf_version: block.hf_version, + + block_hash: block.block_hash, + pow_hash: calculate_pow_hash( + randomx_vm, + &block.block.serialize_hashable(), + *height, + &block.hf_version, + )?, + + miner_tx_weight: block.block.miner_tx.weight(), + block: block.block, + }) + } } /// A request to verify a block. @@ -91,12 +168,20 @@ pub enum VerifyBlockRequest { block: Block, prepared_txs: HashMap<[u8; 32], TransactionVerificationData>, }, + MainChainPrepped { + block: PrePreparedBlock, + txs: Vec>, + }, + MainChainBatchPrepareBlocks { + blocks: Vec<(Block, Vec)>, + }, } /// A response from a verify block request. pub enum VerifyBlockResponse { /// This block is valid. MainChain(VerifiedBlockInformation), + MainChainBatchPrepped(Vec<(PrePreparedBlock, Vec>)>), } /// The block verifier service. @@ -178,18 +263,162 @@ where } => { verify_main_chain_block(block, prepared_txs, context_svc, tx_verifier_svc).await } + VerifyBlockRequest::MainChainBatchPrepareBlocks { blocks } => { + batch_prepare_main_chain_block(blocks, context_svc).await + } + VerifyBlockRequest::MainChainPrepped { block, txs } => { + verify_prepped_main_chain_block(block, txs, context_svc, tx_verifier_svc, None) + .await + } } } .boxed() } } -/// Verifies a prepared block. -async fn verify_main_chain_block( - block: Block, - mut txs: HashMap<[u8; 32], TransactionVerificationData>, +async fn batch_prepare_main_chain_block( + blocks: Vec<(Block, Vec)>, + mut context_svc: C, +) -> Result +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send + + 'static, + C::Future: Send + 'static, +{ + let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip(); + + tracing::debug!("Calculating block hashes."); + let blocks: Vec = rayon_spawn_async(|| { + blocks + .into_iter() + .map(PrePreparedBlockExPOW::new) + .collect::, _>>() + }) + .await?; + + let mut timestamps_hfs = Vec::with_capacity(blocks.len()); + let mut new_rx_vm = None; + + for window in blocks.windows(2) { + if window[0].block_hash != window[1].block.header.previous + || window[0].height != window[1].height - 1 + { + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + } + + if is_randomx_seed_height(window[0].height) { + new_rx_vm = Some((window[0].height, window[0].block_hash)); + } + + timestamps_hfs.push((window[0].block.header.timestamp, window[0].hf_version)) + } + + tracing::debug!("getting blockchain context"); + let BlockChainContextResponse::Context(checked_context) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::GetContext) + .await + .map_err(Into::::into)? + else { + panic!("Context service returned wrong response!"); + }; + + let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::BatchGetDifficulties( + timestamps_hfs, + )) + .await + .map_err(Into::::into)? + else { + panic!("Context service returned wrong response!"); + }; + + let context = checked_context.unchecked_blockchain_context().clone(); + + if context.chain_height != blocks[0].height { + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputsHeightIncorrect, + )))?; + } + + if context.top_hash != blocks[0].block.header.previous { + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + } + + let mut rx_vms = context.rx_vms; + + if let Some((new_vm_height, new_vm_seed)) = new_rx_vm { + let new_vm = rayon_spawn_async(move || { + Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) + }) + .await; + + context_svc + .ready() + .await? + .call(BlockChainContextRequest::NewRXVM(( + new_vm_seed, + new_vm.clone(), + ))) + .await + .map_err(Into::::into)?; + + rx_vms.insert(new_vm_height, new_vm); + } + + let blocks = rayon_spawn_async(move || { + blocks + .into_par_iter() + .zip(difficulties) + .zip(txs) + .map(|((block, difficultly), txs)| { + let height = block.height; + let block = PrePreparedBlock::new_prepped( + block, + rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref), + )?; + + check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?; + + let mut txs = txs + .into_par_iter() + .map(|tx| { + let tx = TransactionVerificationData::new(tx)?; + Ok::<_, ConsensusError>((tx.tx_hash, tx)) + }) + .collect::, _>>()?; + + let mut ordered_txs = Vec::with_capacity(txs.len()); + + for tx_hash in &block.block.txs { + let tx = txs + .remove(tx_hash) + .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; + ordered_txs.push(Arc::new(tx)); + } + + Ok((block, ordered_txs)) + }) + .collect::, ExtendedConsensusError>>() + }) + .await?; + + Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks)) +} + +async fn verify_prepped_main_chain_block( + prepped_block: PrePreparedBlock, + txs: Vec>, context_svc: C, tx_verifier_svc: TxV, + cached_context: Option, ) -> Result where C: Service< @@ -201,54 +430,43 @@ where C::Future: Send + 'static, TxV: Service, { - tracing::debug!("getting blockchain context"); + let context = if let Some(context) = cached_context { + context + } else { + let BlockChainContextResponse::Context(checked_context) = context_svc + .oneshot(BlockChainContextRequest::GetContext) + .await + .map_err(Into::::into)? + else { + panic!("Context service returned wrong response!"); + }; - let BlockChainContextResponse::Context(checked_context) = context_svc - .oneshot(BlockChainContextRequest::GetContext) - .await - .map_err(Into::::into)? - else { - panic!("Context service returned wrong response!"); + let context = checked_context.unchecked_blockchain_context().clone(); + + tracing::debug!("got blockchain context: {:?}", context); + + context }; - let context = checked_context.unchecked_blockchain_context().clone(); - tracing::debug!("got blockchain context: {:?}", context); - - // Set up the block and just pass it to [`verify_main_chain_block_prepared`] - - let rx_vms = context.rx_vms.clone(); - - let height = context.chain_height; - let prepped_block = rayon_spawn_async(move || { - PrePreparedBlock::new(block, rx_vms.get(&height).map(AsRef::as_ref)) - }) - .await?; - tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash)); check_block_pow(&prepped_block.pow_hash, context.next_difficulty) .map_err(ConsensusError::Block)?; - // Check that the txs included are what we need and that there are not any extra. - - let mut ordered_txs = Vec::with_capacity(txs.len()); - - tracing::debug!("Checking we have correct transactions for block."); + if prepped_block.block.txs.len() != txs.len() { + return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); + } if !prepped_block.block.txs.is_empty() { - for tx_hash in &prepped_block.block.txs { - let tx = txs - .remove(tx_hash) - .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; - ordered_txs.push(Arc::new(tx)); + for (expected_tx_hash, tx) in prepped_block.block.txs.iter().zip(txs.iter()) { + if expected_tx_hash != &tx.tx_hash { + return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); + } } - drop(txs); - - tracing::debug!("Verifying transactions for block."); tx_verifier_svc .oneshot(VerifyTxRequest::Prepped { - txs: ordered_txs.clone(), + txs: txs.clone(), current_chain_height: context.chain_height, top_hash: context.top_hash, time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(), @@ -258,8 +476,8 @@ where } let block_weight = - prepped_block.miner_tx_weight + ordered_txs.iter().map(|tx| tx.tx_weight).sum::(); - let total_fees = ordered_txs.iter().map(|tx| tx.fee).sum::(); + prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::(); + let total_fees = txs.iter().map(|tx| tx.fee).sum::(); tracing::debug!("Verifying block header."); let (_, generated_coins) = check_block( @@ -275,7 +493,7 @@ where block_hash: prepped_block.block_hash, block: prepped_block.block, block_blob: prepped_block.block_blob, - txs: ordered_txs + txs: txs .into_iter() .map(|tx| { // Note: it would be possible for the transaction verification service to hold onto the tx after the call @@ -301,3 +519,76 @@ where cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty, })) } + +/// Verifies a prepared block. +async fn verify_main_chain_block( + block: Block, + mut txs: HashMap<[u8; 32], TransactionVerificationData>, + mut context_svc: C, + tx_verifier_svc: TxV, +) -> Result +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send + + 'static, + C::Future: Send + 'static, + TxV: Service, +{ + let BlockChainContextResponse::Context(checked_context) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::GetContext) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + let context = checked_context.unchecked_blockchain_context().clone(); + tracing::debug!("got blockchain context: {:?}", context); + + // Set up the block and just pass it to [`verify_main_chain_block_prepared`] + + let rx_vms = context.rx_vms.clone(); + + let height = context.chain_height; + let prepped_block = rayon_spawn_async(move || { + PrePreparedBlock::new( + block, + rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref), + ) + }) + .await?; + + tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash)); + + check_block_pow(&prepped_block.pow_hash, context.next_difficulty) + .map_err(ConsensusError::Block)?; + + // Check that the txs included are what we need and that there are not any extra. + + let mut ordered_txs = Vec::with_capacity(txs.len()); + + tracing::debug!("Checking we have correct transactions for block."); + + if !prepped_block.block.txs.is_empty() { + for tx_hash in &prepped_block.block.txs { + let tx = txs + .remove(tx_hash) + .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; + ordered_txs.push(Arc::new(tx)); + } + drop(txs); + } + + verify_prepped_main_chain_block( + prepped_block, + ordered_txs, + context_svc, + tx_verifier_svc, + Some(context), + ) + .await +} diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 6be0ad08..0e9d5357 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -289,7 +289,7 @@ async fn verify_prepped_transactions( where D: Database + Clone + Sync + Send + 'static, { - tracing::debug!("Verifying transactions"); + tracing::debug!("Verifying {} transactions", txs.len()); tracing::trace!("Checking for duplicate key images"); @@ -462,7 +462,7 @@ async fn verify_transactions_decoy_info( where D: Database + Clone + Sync + Send + 'static, { - if hf == HardFork::V1 { + if hf == HardFork::V1 || txs.is_empty() { return Ok(()); } @@ -484,6 +484,10 @@ async fn verify_transactions( where D: Database + Clone + Sync + Send + 'static, { + if txs.is_empty() { + return Ok(()); + } + let txs_ring_member_info = batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?; diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 59e4e71d..4cf3e4c5 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -15,6 +15,7 @@ std = [] atomic = ["dep:crossbeam"] asynch = ["dep:futures", "dep:rayon"] constants = [] +crypto = ["dep:curve25519-dalek"] fs = ["dep:dirs"] num = [] map = ["dep:monero-serai"] @@ -22,6 +23,7 @@ time = ["dep:chrono", "std"] thread = ["std", "dep:target_os_lib"] [dependencies] +curve25519-dalek = { worspace = true, optional = true } crossbeam = { workspace = true, optional = true } chrono = { workspace = true, optional = true, features = ["std", "clock"] } dirs = { workspace = true, optional = true } diff --git a/helper/src/commitment.rs b/helper/src/commitment.rs new file mode 100644 index 00000000..a9fdb71f --- /dev/null +++ b/helper/src/commitment.rs @@ -0,0 +1,62 @@ +use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; +use std::{collections::HashMap, sync::OnceLock}; + +use curve25519_dalek::edwards::{CompressedEdwardsY, VartimeEdwardsPrecomputation}; +use curve25519_dalek::traits::VartimePrecomputedMultiscalarMul; +use curve25519_dalek::{EdwardsPoint, Scalar}; +use monero_serai::H; + +static H_PRECOMP: OnceLock = OnceLock::new(); + +static PRECOMPUTED_COMMITMENTS: OnceLock> = OnceLock::new(); + +#[rustfmt::skip] +const DECOMPOSED_AMOUNT: [u64; 172] = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 20, 30, 40, 50, 60, 70, 80, 90, + 100, 200, 300, 400, 500, 600, 700, 800, 900, + 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, + 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, + 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, + 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, + 10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000, + 100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000, + 1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000, + 10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000, + 100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000, + 1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000, + 10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000, + 100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000, + 1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000, + 10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000, + 100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000, + 1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000, + 10000000000000000000 +]; + +fn h_precomp() -> &'static VartimeEdwardsPrecomputation { + H_PRECOMP.get_or_init(|| VartimeEdwardsPrecomputation::new([H(), ED25519_BASEPOINT_POINT])) +} + +fn precomputed_commitments() -> &'static HashMap { + PRECOMPUTED_COMMITMENTS.get_or_init(|| { + DECOMPOSED_AMOUNT + .iter() + .map(|&amount| { + ( + amount, + (ED25519_BASEPOINT_POINT + H() * Scalar::from(amount)), + ) + }) + .collect() + }) +} + +pub fn compute_zero_commitment(amount: u64) -> EdwardsPoint { + precomputed_commitments() + .get(&amount) + .copied() + .unwrap_or_else(|| { + h_precomp().vartime_multiscalar_mul([Scalar::from(amount), Scalar::from(1_u8)]) + }) +} diff --git a/helper/src/lib.rs b/helper/src/lib.rs index 90f420d6..da049009 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -1,7 +1,7 @@ #![doc = include_str!("../README.md")] //---------------------------------------------------------------------------------------------------- Lints #![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)] -#![deny(nonstandard_style, deprecated, missing_docs, unused_mut)] +#![deny(nonstandard_style, deprecated, unused_mut)] #![forbid( unused_unsafe, future_incompatible, @@ -54,9 +54,10 @@ pub mod num; #[cfg(feature = "map")] pub mod map; +#[cfg(feature = "crypto")] +pub mod commitment; #[cfg(feature = "thread")] pub mod thread; - #[cfg(feature = "time")] pub mod time; diff --git a/p2p/cuprate-p2p/src/bin/test_init.rs b/p2p/cuprate-p2p/src/bin/test_init.rs index ebef12ff..66047a6e 100644 --- a/p2p/cuprate-p2p/src/bin/test_init.rs +++ b/p2p/cuprate-p2p/src/bin/test_init.rs @@ -147,7 +147,7 @@ async fn main() { let (database_read, mut database_write) = cuprate_blockchain::service::init( cuprate_blockchain::config::ConfigBuilder::new() .fast() - .reader_threads(ReaderThreads::Number(4)) + .reader_threads(ReaderThreads::Number(8)) .build(), ) .unwrap(); @@ -240,32 +240,32 @@ async fn main() { entry.blocks.len() ); - for (block, txs) in entry.blocks { - let txs = if txs.is_empty() { - HashMap::new() - } else { - rayon_spawn_async(|| { - txs.into_par_iter() - .map(|tx| { - let tx = TransactionVerificationData::new(tx).unwrap(); + tracing::info!("Prepping {} blocks for verification", entry.blocks.len()); - (tx.tx_hash, tx) - }) - .collect::>() - }) - .await - }; + let VerifyBlockResponse::MainChainBatchPrepped(blocks) = block_verifier + .ready() + .await + .unwrap() + .call(VerifyBlockRequest::MainChainBatchPrepareBlocks { + blocks: entry.blocks, + }) + .await + .unwrap() + else { + panic!() + }; + for (block, txs) in blocks { let VerifyBlockResponse::MainChain(block_info) = block_verifier .ready() .await .unwrap() - .call(VerifyBlockRequest::MainChain { - block, - prepared_txs: txs, - }) + .call(VerifyBlockRequest::MainChainPrepped { block, txs }) .await - .unwrap(); + .unwrap() + else { + panic!() + }; let height = block_info.height; diff --git a/storage/cuprate-blockchain/Cargo.toml b/storage/cuprate-blockchain/Cargo.toml index 515f133a..39774c3b 100644 --- a/storage/cuprate-blockchain/Cargo.toml +++ b/storage/cuprate-blockchain/Cargo.toml @@ -25,7 +25,7 @@ cfg-if = { workspace = true } # FIXME: # We only need the `thread` feature if `service` is enabled. # Figure out how to enable features of an already pulled in dependency conditionally. -cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } +cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map", "crypto"] } cuprate-types = { path = "../../types", features = ["blockchain"] } curve25519-dalek = { workspace = true } monero-pruning = { path = "../../pruning" } diff --git a/storage/cuprate-blockchain/src/backend/redb/env.rs b/storage/cuprate-blockchain/src/backend/redb/env.rs index 67e430f8..5c13384d 100644 --- a/storage/cuprate-blockchain/src/backend/redb/env.rs +++ b/storage/cuprate-blockchain/src/backend/redb/env.rs @@ -57,8 +57,7 @@ impl Env for ConcreteEnv { // // should we use that instead of Immediate? SyncMode::Safe => redb::Durability::Immediate, - SyncMode::Async => redb::Durability::Eventual, - SyncMode::Fast => redb::Durability::None, + SyncMode::Async | SyncMode::Fast => redb::Durability::Eventual, // SOMEDAY: dynamic syncs are not implemented. SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(), }; diff --git a/storage/cuprate-blockchain/src/ops/block.rs b/storage/cuprate-blockchain/src/ops/block.rs index 4f16cfde..6bae6a00 100644 --- a/storage/cuprate-blockchain/src/ops/block.rs +++ b/storage/cuprate-blockchain/src/ops/block.rs @@ -1,5 +1,6 @@ //! Blocks functions. +use std::time::Instant; //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; use monero_serai::block::Block; @@ -43,6 +44,8 @@ pub fn add_block( block: &VerifiedBlockInformation, tables: &mut impl TablesMut, ) -> Result<(), RuntimeError> { + let time = Instant::now(); + //------------------------------------------------------ Check preconditions first // Cast height to `u32` for storage (handled at top of function). @@ -62,6 +65,7 @@ pub fn add_block( ); // Expensive checks - debug only. + /* #[cfg(debug_assertions)] { assert_eq!(block.block.serialize(), block.block_blob); @@ -72,6 +76,8 @@ pub fn add_block( } } + */ + //------------------------------------------------------ Transaction / Outputs / Key Images // Add the miner transaction first. { @@ -122,6 +128,8 @@ pub fn add_block( .block_heights_mut() .put(&block.block_hash, &block.height)?; + println!("time to add block: {}", time.elapsed().as_nanos()); + Ok(()) } diff --git a/storage/cuprate-blockchain/src/ops/output.rs b/storage/cuprate-blockchain/src/ops/output.rs index 5b7620e4..8849a17a 100644 --- a/storage/cuprate-blockchain/src/ops/output.rs +++ b/storage/cuprate-blockchain/src/ops/output.rs @@ -1,6 +1,7 @@ //! Output functions. //---------------------------------------------------------------------------------------------------- Import +use cuprate_helper::commitment::compute_zero_commitment; use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar}; use monero_serai::{transaction::Timelock, H}; @@ -156,7 +157,7 @@ pub fn output_to_output_on_chain( ) -> Result { // FIXME: implement lookup table for common values: // - let commitment = ED25519_BASEPOINT_POINT + H() * Scalar::from(amount); + let commitment = compute_zero_commitment(amount); let time_lock = if output .output_flags diff --git a/storage/cuprate-blockchain/src/ops/tx.rs b/storage/cuprate-blockchain/src/ops/tx.rs index b4f2984b..9e6794a6 100644 --- a/storage/cuprate-blockchain/src/ops/tx.rs +++ b/storage/cuprate-blockchain/src/ops/tx.rs @@ -2,6 +2,7 @@ //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; +use cuprate_helper::commitment::compute_zero_commitment; use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar}; use monero_serai::transaction::{Input, Timelock, Transaction}; @@ -125,10 +126,7 @@ pub fn add_tx( // // FIXME: implement lookup table for common values: // - let commitment = (ED25519_BASEPOINT_POINT - + monero_serai::H() * Scalar::from(amount)) - .compress() - .to_bytes(); + let commitment = compute_zero_commitment(amount).compress().to_bytes(); add_rct_output( &RctOutput { diff --git a/storage/cuprate-blockchain/src/service/write.rs b/storage/cuprate-blockchain/src/service/write.rs index 8c2cc91e..cfcec501 100644 --- a/storage/cuprate-blockchain/src/service/write.rs +++ b/storage/cuprate-blockchain/src/service/write.rs @@ -1,6 +1,7 @@ //! Database writer thread definitions and logic. //---------------------------------------------------------------------------------------------------- Import +use std::time::Instant; use std::{ sync::Arc, task::{Context, Poll}, @@ -221,11 +222,14 @@ impl DatabaseWriter { /// [`BCWriteRequest::WriteBlock`]. #[inline] fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult { + let time = Instant::now(); let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; let result = { let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + println!("time to open table: {}", time.elapsed().as_nanos()); + crate::ops::block::add_block(block, &mut tables_mut) };