mirror of
https://github.com/Cuprate/cuprate.git
synced 2024-12-22 19:49:28 +00:00
performance improvements
This commit is contained in:
parent
df489dcd02
commit
e8ca8372c0
14 changed files with 469 additions and 104 deletions
1
Cargo.lock
generated
1
Cargo.lock
generated
|
@ -499,6 +499,7 @@ version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"crossbeam",
|
"crossbeam",
|
||||||
|
"curve25519-dalek",
|
||||||
"dirs",
|
"dirs",
|
||||||
"futures",
|
"futures",
|
||||||
"libc",
|
"libc",
|
||||||
|
|
|
@ -2,35 +2,29 @@ use std::sync::OnceLock;
|
||||||
|
|
||||||
/// Decomposed amount table.
|
/// Decomposed amount table.
|
||||||
///
|
///
|
||||||
static DECOMPOSED_AMOUNTS: OnceLock<[u64; 172]> = OnceLock::new();
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
pub fn decomposed_amounts() -> &'static [u64; 172] {
|
const DECOMPOSED_AMOUNT: [u64; 172] = [
|
||||||
DECOMPOSED_AMOUNTS.get_or_init(|| {
|
1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||||
[
|
10, 20, 30, 40, 50, 60, 70, 80, 90,
|
||||||
1, 2, 3, 4, 5, 6, 7, 8, 9,
|
100, 200, 300, 400, 500, 600, 700, 800, 900,
|
||||||
10, 20, 30, 40, 50, 60, 70, 80, 90,
|
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000,
|
||||||
100, 200, 300, 400, 500, 600, 700, 800, 900,
|
10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
|
||||||
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000,
|
100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000,
|
||||||
10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
|
1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000,
|
||||||
100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000,
|
10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000,
|
||||||
1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000,
|
100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000,
|
||||||
10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000,
|
1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000,
|
||||||
100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000,
|
10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000,
|
||||||
1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000,
|
100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000,
|
||||||
10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000,
|
1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000,
|
||||||
100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000,
|
10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000,
|
||||||
1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000,
|
100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000,
|
||||||
10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000,
|
1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000,
|
||||||
100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000,
|
10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000,
|
||||||
1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000,
|
100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000,
|
||||||
10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000,
|
1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000,
|
||||||
100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000,
|
10000000000000000000
|
||||||
1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000,
|
];
|
||||||
10000000000000000000
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks that an output amount is decomposed.
|
/// Checks that an output amount is decomposed.
|
||||||
///
|
///
|
||||||
|
@ -40,7 +34,7 @@ pub fn decomposed_amounts() -> &'static [u64; 172] {
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#output-amounts>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#output-amounts>
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_decomposed_amount(amount: &u64) -> bool {
|
pub fn is_decomposed_amount(amount: &u64) -> bool {
|
||||||
decomposed_amounts().binary_search(amount).is_ok()
|
DECOMPOSED_AMOUNT.binary_search(amount).is_ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -9,22 +9,71 @@ use std::{
|
||||||
|
|
||||||
use cuprate_helper::asynch::rayon_spawn_async;
|
use cuprate_helper::asynch::rayon_spawn_async;
|
||||||
use futures::FutureExt;
|
use futures::FutureExt;
|
||||||
use monero_serai::{block::Block, transaction::Input};
|
use monero_serai::{
|
||||||
|
block::Block,
|
||||||
|
transaction::{Input, Transaction},
|
||||||
|
};
|
||||||
|
use rayon::prelude::*;
|
||||||
use tower::{Service, ServiceExt};
|
use tower::{Service, ServiceExt};
|
||||||
|
|
||||||
|
use cuprate_consensus_rules::blocks::randomx_seed_height;
|
||||||
use cuprate_consensus_rules::{
|
use cuprate_consensus_rules::{
|
||||||
blocks::{calculate_pow_hash, check_block, check_block_pow, BlockError, RandomX},
|
blocks::{
|
||||||
|
calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height, BlockError,
|
||||||
|
RandomX,
|
||||||
|
},
|
||||||
miner_tx::MinerTxError,
|
miner_tx::MinerTxError,
|
||||||
ConsensusError, HardFork,
|
ConsensusError, HardFork,
|
||||||
};
|
};
|
||||||
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
||||||
|
|
||||||
|
use crate::context::rx_vms::RandomXVM;
|
||||||
|
use crate::context::RawBlockChainContext;
|
||||||
use crate::{
|
use crate::{
|
||||||
context::{BlockChainContextRequest, BlockChainContextResponse},
|
context::{BlockChainContextRequest, BlockChainContextResponse},
|
||||||
transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse},
|
transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse},
|
||||||
Database, ExtendedConsensusError,
|
Database, ExtendedConsensusError,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PrePreparedBlockExPOW {
|
||||||
|
pub block: Block,
|
||||||
|
pub block_blob: Vec<u8>,
|
||||||
|
|
||||||
|
pub hf_vote: HardFork,
|
||||||
|
pub hf_version: HardFork,
|
||||||
|
|
||||||
|
pub block_hash: [u8; 32],
|
||||||
|
pub height: u64,
|
||||||
|
|
||||||
|
pub miner_tx_weight: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PrePreparedBlockExPOW {
|
||||||
|
pub fn new(block: Block) -> Result<PrePreparedBlockExPOW, ConsensusError> {
|
||||||
|
let (hf_version, hf_vote) =
|
||||||
|
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
||||||
|
|
||||||
|
let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else {
|
||||||
|
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
|
MinerTxError::InputNotOfTypeGen,
|
||||||
|
)))?
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(PrePreparedBlockExPOW {
|
||||||
|
block_blob: block.serialize(),
|
||||||
|
hf_vote,
|
||||||
|
hf_version,
|
||||||
|
|
||||||
|
block_hash: block.hash(),
|
||||||
|
height: *height,
|
||||||
|
|
||||||
|
miner_tx_weight: block.miner_tx.weight(),
|
||||||
|
block,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A pre-prepared block with all data needed to verify it.
|
/// A pre-prepared block with all data needed to verify it.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PrePreparedBlock {
|
pub struct PrePreparedBlock {
|
||||||
|
@ -82,6 +131,34 @@ impl PrePreparedBlock {
|
||||||
block,
|
block,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn new_prepped<R: RandomX>(
|
||||||
|
block: PrePreparedBlockExPOW,
|
||||||
|
randomx_vm: Option<&R>,
|
||||||
|
) -> Result<PrePreparedBlock, ConsensusError> {
|
||||||
|
let Some(Input::Gen(height)) = block.block.miner_tx.prefix.inputs.first() else {
|
||||||
|
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
|
MinerTxError::InputNotOfTypeGen,
|
||||||
|
)))?
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(PrePreparedBlock {
|
||||||
|
block_blob: block.block_blob,
|
||||||
|
hf_vote: block.hf_vote,
|
||||||
|
hf_version: block.hf_version,
|
||||||
|
|
||||||
|
block_hash: block.block_hash,
|
||||||
|
pow_hash: calculate_pow_hash(
|
||||||
|
randomx_vm,
|
||||||
|
&block.block.serialize_hashable(),
|
||||||
|
*height,
|
||||||
|
&block.hf_version,
|
||||||
|
)?,
|
||||||
|
|
||||||
|
miner_tx_weight: block.block.miner_tx.weight(),
|
||||||
|
block: block.block,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request to verify a block.
|
/// A request to verify a block.
|
||||||
|
@ -91,12 +168,20 @@ pub enum VerifyBlockRequest {
|
||||||
block: Block,
|
block: Block,
|
||||||
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||||
},
|
},
|
||||||
|
MainChainPrepped {
|
||||||
|
block: PrePreparedBlock,
|
||||||
|
txs: Vec<Arc<TransactionVerificationData>>,
|
||||||
|
},
|
||||||
|
MainChainBatchPrepareBlocks {
|
||||||
|
blocks: Vec<(Block, Vec<Transaction>)>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A response from a verify block request.
|
/// A response from a verify block request.
|
||||||
pub enum VerifyBlockResponse {
|
pub enum VerifyBlockResponse {
|
||||||
/// This block is valid.
|
/// This block is valid.
|
||||||
MainChain(VerifiedBlockInformation),
|
MainChain(VerifiedBlockInformation),
|
||||||
|
MainChainBatchPrepped(Vec<(PrePreparedBlock, Vec<Arc<TransactionVerificationData>>)>),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The block verifier service.
|
/// The block verifier service.
|
||||||
|
@ -178,18 +263,162 @@ where
|
||||||
} => {
|
} => {
|
||||||
verify_main_chain_block(block, prepared_txs, context_svc, tx_verifier_svc).await
|
verify_main_chain_block(block, prepared_txs, context_svc, tx_verifier_svc).await
|
||||||
}
|
}
|
||||||
|
VerifyBlockRequest::MainChainBatchPrepareBlocks { blocks } => {
|
||||||
|
batch_prepare_main_chain_block(blocks, context_svc).await
|
||||||
|
}
|
||||||
|
VerifyBlockRequest::MainChainPrepped { block, txs } => {
|
||||||
|
verify_prepped_main_chain_block(block, txs, context_svc, tx_verifier_svc, None)
|
||||||
|
.await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verifies a prepared block.
|
async fn batch_prepare_main_chain_block<C>(
|
||||||
async fn verify_main_chain_block<C, TxV>(
|
blocks: Vec<(Block, Vec<Transaction>)>,
|
||||||
block: Block,
|
mut context_svc: C,
|
||||||
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
|
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||||
|
where
|
||||||
|
C: Service<
|
||||||
|
BlockChainContextRequest,
|
||||||
|
Response = BlockChainContextResponse,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
> + Send
|
||||||
|
+ 'static,
|
||||||
|
C::Future: Send + 'static,
|
||||||
|
{
|
||||||
|
let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
||||||
|
|
||||||
|
tracing::debug!("Calculating block hashes.");
|
||||||
|
let blocks: Vec<PrePreparedBlockExPOW> = rayon_spawn_async(|| {
|
||||||
|
blocks
|
||||||
|
.into_iter()
|
||||||
|
.map(PrePreparedBlockExPOW::new)
|
||||||
|
.collect::<Result<Vec<_>, _>>()
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut timestamps_hfs = Vec::with_capacity(blocks.len());
|
||||||
|
let mut new_rx_vm = None;
|
||||||
|
|
||||||
|
for window in blocks.windows(2) {
|
||||||
|
if window[0].block_hash != window[1].block.header.previous
|
||||||
|
|| window[0].height != window[1].height - 1
|
||||||
|
{
|
||||||
|
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if is_randomx_seed_height(window[0].height) {
|
||||||
|
new_rx_vm = Some((window[0].height, window[0].block_hash));
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamps_hfs.push((window[0].block.header.timestamp, window[0].hf_version))
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::debug!("getting blockchain context");
|
||||||
|
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(BlockChainContextRequest::GetContext)
|
||||||
|
.await
|
||||||
|
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||||
|
else {
|
||||||
|
panic!("Context service returned wrong response!");
|
||||||
|
};
|
||||||
|
|
||||||
|
let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(BlockChainContextRequest::BatchGetDifficulties(
|
||||||
|
timestamps_hfs,
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||||
|
else {
|
||||||
|
panic!("Context service returned wrong response!");
|
||||||
|
};
|
||||||
|
|
||||||
|
let context = checked_context.unchecked_blockchain_context().clone();
|
||||||
|
|
||||||
|
if context.chain_height != blocks[0].height {
|
||||||
|
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
|
MinerTxError::InputsHeightIncorrect,
|
||||||
|
)))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if context.top_hash != blocks[0].block.header.previous {
|
||||||
|
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut rx_vms = context.rx_vms;
|
||||||
|
|
||||||
|
if let Some((new_vm_height, new_vm_seed)) = new_rx_vm {
|
||||||
|
let new_vm = rayon_spawn_async(move || {
|
||||||
|
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
context_svc
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(BlockChainContextRequest::NewRXVM((
|
||||||
|
new_vm_seed,
|
||||||
|
new_vm.clone(),
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.map_err(Into::<ExtendedConsensusError>::into)?;
|
||||||
|
|
||||||
|
rx_vms.insert(new_vm_height, new_vm);
|
||||||
|
}
|
||||||
|
|
||||||
|
let blocks = rayon_spawn_async(move || {
|
||||||
|
blocks
|
||||||
|
.into_par_iter()
|
||||||
|
.zip(difficulties)
|
||||||
|
.zip(txs)
|
||||||
|
.map(|((block, difficultly), txs)| {
|
||||||
|
let height = block.height;
|
||||||
|
let block = PrePreparedBlock::new_prepped(
|
||||||
|
block,
|
||||||
|
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?;
|
||||||
|
|
||||||
|
let mut txs = txs
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|tx| {
|
||||||
|
let tx = TransactionVerificationData::new(tx)?;
|
||||||
|
Ok::<_, ConsensusError>((tx.tx_hash, tx))
|
||||||
|
})
|
||||||
|
.collect::<Result<HashMap<_, _>, _>>()?;
|
||||||
|
|
||||||
|
let mut ordered_txs = Vec::with_capacity(txs.len());
|
||||||
|
|
||||||
|
for tx_hash in &block.block.txs {
|
||||||
|
let tx = txs
|
||||||
|
.remove(tx_hash)
|
||||||
|
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
|
||||||
|
ordered_txs.push(Arc::new(tx));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((block, ordered_txs))
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<_>, ExtendedConsensusError>>()
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn verify_prepped_main_chain_block<C, TxV>(
|
||||||
|
prepped_block: PrePreparedBlock,
|
||||||
|
txs: Vec<Arc<TransactionVerificationData>>,
|
||||||
context_svc: C,
|
context_svc: C,
|
||||||
tx_verifier_svc: TxV,
|
tx_verifier_svc: TxV,
|
||||||
|
cached_context: Option<RawBlockChainContext>,
|
||||||
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||||
where
|
where
|
||||||
C: Service<
|
C: Service<
|
||||||
|
@ -201,54 +430,43 @@ where
|
||||||
C::Future: Send + 'static,
|
C::Future: Send + 'static,
|
||||||
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
|
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
|
||||||
{
|
{
|
||||||
tracing::debug!("getting blockchain context");
|
let context = if let Some(context) = cached_context {
|
||||||
|
context
|
||||||
|
} else {
|
||||||
|
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||||
|
.oneshot(BlockChainContextRequest::GetContext)
|
||||||
|
.await
|
||||||
|
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||||
|
else {
|
||||||
|
panic!("Context service returned wrong response!");
|
||||||
|
};
|
||||||
|
|
||||||
let BlockChainContextResponse::Context(checked_context) = context_svc
|
let context = checked_context.unchecked_blockchain_context().clone();
|
||||||
.oneshot(BlockChainContextRequest::GetContext)
|
|
||||||
.await
|
tracing::debug!("got blockchain context: {:?}", context);
|
||||||
.map_err(Into::<ExtendedConsensusError>::into)?
|
|
||||||
else {
|
context
|
||||||
panic!("Context service returned wrong response!");
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let context = checked_context.unchecked_blockchain_context().clone();
|
|
||||||
tracing::debug!("got blockchain context: {:?}", context);
|
|
||||||
|
|
||||||
// Set up the block and just pass it to [`verify_main_chain_block_prepared`]
|
|
||||||
|
|
||||||
let rx_vms = context.rx_vms.clone();
|
|
||||||
|
|
||||||
let height = context.chain_height;
|
|
||||||
let prepped_block = rayon_spawn_async(move || {
|
|
||||||
PrePreparedBlock::new(block, rx_vms.get(&height).map(AsRef::as_ref))
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
|
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
|
||||||
|
|
||||||
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
||||||
.map_err(ConsensusError::Block)?;
|
.map_err(ConsensusError::Block)?;
|
||||||
|
|
||||||
// Check that the txs included are what we need and that there are not any extra.
|
if prepped_block.block.txs.len() != txs.len() {
|
||||||
|
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||||
let mut ordered_txs = Vec::with_capacity(txs.len());
|
}
|
||||||
|
|
||||||
tracing::debug!("Checking we have correct transactions for block.");
|
|
||||||
|
|
||||||
if !prepped_block.block.txs.is_empty() {
|
if !prepped_block.block.txs.is_empty() {
|
||||||
for tx_hash in &prepped_block.block.txs {
|
for (expected_tx_hash, tx) in prepped_block.block.txs.iter().zip(txs.iter()) {
|
||||||
let tx = txs
|
if expected_tx_hash != &tx.tx_hash {
|
||||||
.remove(tx_hash)
|
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||||
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
|
}
|
||||||
ordered_txs.push(Arc::new(tx));
|
|
||||||
}
|
}
|
||||||
drop(txs);
|
|
||||||
|
|
||||||
tracing::debug!("Verifying transactions for block.");
|
|
||||||
|
|
||||||
tx_verifier_svc
|
tx_verifier_svc
|
||||||
.oneshot(VerifyTxRequest::Prepped {
|
.oneshot(VerifyTxRequest::Prepped {
|
||||||
txs: ordered_txs.clone(),
|
txs: txs.clone(),
|
||||||
current_chain_height: context.chain_height,
|
current_chain_height: context.chain_height,
|
||||||
top_hash: context.top_hash,
|
top_hash: context.top_hash,
|
||||||
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
||||||
|
@ -258,8 +476,8 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
let block_weight =
|
let block_weight =
|
||||||
prepped_block.miner_tx_weight + ordered_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
||||||
let total_fees = ordered_txs.iter().map(|tx| tx.fee).sum::<u64>();
|
let total_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
|
||||||
|
|
||||||
tracing::debug!("Verifying block header.");
|
tracing::debug!("Verifying block header.");
|
||||||
let (_, generated_coins) = check_block(
|
let (_, generated_coins) = check_block(
|
||||||
|
@ -275,7 +493,7 @@ where
|
||||||
block_hash: prepped_block.block_hash,
|
block_hash: prepped_block.block_hash,
|
||||||
block: prepped_block.block,
|
block: prepped_block.block,
|
||||||
block_blob: prepped_block.block_blob,
|
block_blob: prepped_block.block_blob,
|
||||||
txs: ordered_txs
|
txs: txs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|tx| {
|
.map(|tx| {
|
||||||
// Note: it would be possible for the transaction verification service to hold onto the tx after the call
|
// Note: it would be possible for the transaction verification service to hold onto the tx after the call
|
||||||
|
@ -301,3 +519,76 @@ where
|
||||||
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
|
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Verifies a prepared block.
|
||||||
|
async fn verify_main_chain_block<C, TxV>(
|
||||||
|
block: Block,
|
||||||
|
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||||
|
mut context_svc: C,
|
||||||
|
tx_verifier_svc: TxV,
|
||||||
|
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||||
|
where
|
||||||
|
C: Service<
|
||||||
|
BlockChainContextRequest,
|
||||||
|
Response = BlockChainContextResponse,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
> + Send
|
||||||
|
+ 'static,
|
||||||
|
C::Future: Send + 'static,
|
||||||
|
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
|
||||||
|
{
|
||||||
|
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(BlockChainContextRequest::GetContext)
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
|
panic!("Context service returned wrong response!");
|
||||||
|
};
|
||||||
|
|
||||||
|
let context = checked_context.unchecked_blockchain_context().clone();
|
||||||
|
tracing::debug!("got blockchain context: {:?}", context);
|
||||||
|
|
||||||
|
// Set up the block and just pass it to [`verify_main_chain_block_prepared`]
|
||||||
|
|
||||||
|
let rx_vms = context.rx_vms.clone();
|
||||||
|
|
||||||
|
let height = context.chain_height;
|
||||||
|
let prepped_block = rayon_spawn_async(move || {
|
||||||
|
PrePreparedBlock::new(
|
||||||
|
block,
|
||||||
|
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
|
||||||
|
|
||||||
|
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
||||||
|
.map_err(ConsensusError::Block)?;
|
||||||
|
|
||||||
|
// Check that the txs included are what we need and that there are not any extra.
|
||||||
|
|
||||||
|
let mut ordered_txs = Vec::with_capacity(txs.len());
|
||||||
|
|
||||||
|
tracing::debug!("Checking we have correct transactions for block.");
|
||||||
|
|
||||||
|
if !prepped_block.block.txs.is_empty() {
|
||||||
|
for tx_hash in &prepped_block.block.txs {
|
||||||
|
let tx = txs
|
||||||
|
.remove(tx_hash)
|
||||||
|
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
|
||||||
|
ordered_txs.push(Arc::new(tx));
|
||||||
|
}
|
||||||
|
drop(txs);
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_prepped_main_chain_block(
|
||||||
|
prepped_block,
|
||||||
|
ordered_txs,
|
||||||
|
context_svc,
|
||||||
|
tx_verifier_svc,
|
||||||
|
Some(context),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
|
@ -289,7 +289,7 @@ async fn verify_prepped_transactions<D>(
|
||||||
where
|
where
|
||||||
D: Database + Clone + Sync + Send + 'static,
|
D: Database + Clone + Sync + Send + 'static,
|
||||||
{
|
{
|
||||||
tracing::debug!("Verifying transactions");
|
tracing::debug!("Verifying {} transactions", txs.len());
|
||||||
|
|
||||||
tracing::trace!("Checking for duplicate key images");
|
tracing::trace!("Checking for duplicate key images");
|
||||||
|
|
||||||
|
@ -462,7 +462,7 @@ async fn verify_transactions_decoy_info<D>(
|
||||||
where
|
where
|
||||||
D: Database + Clone + Sync + Send + 'static,
|
D: Database + Clone + Sync + Send + 'static,
|
||||||
{
|
{
|
||||||
if hf == HardFork::V1 {
|
if hf == HardFork::V1 || txs.is_empty() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,6 +484,10 @@ async fn verify_transactions<D>(
|
||||||
where
|
where
|
||||||
D: Database + Clone + Sync + Send + 'static,
|
D: Database + Clone + Sync + Send + 'static,
|
||||||
{
|
{
|
||||||
|
if txs.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let txs_ring_member_info =
|
let txs_ring_member_info =
|
||||||
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?;
|
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?;
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ std = []
|
||||||
atomic = ["dep:crossbeam"]
|
atomic = ["dep:crossbeam"]
|
||||||
asynch = ["dep:futures", "dep:rayon"]
|
asynch = ["dep:futures", "dep:rayon"]
|
||||||
constants = []
|
constants = []
|
||||||
|
crypto = ["dep:curve25519-dalek"]
|
||||||
fs = ["dep:dirs"]
|
fs = ["dep:dirs"]
|
||||||
num = []
|
num = []
|
||||||
map = ["dep:monero-serai"]
|
map = ["dep:monero-serai"]
|
||||||
|
@ -22,6 +23,7 @@ time = ["dep:chrono", "std"]
|
||||||
thread = ["std", "dep:target_os_lib"]
|
thread = ["std", "dep:target_os_lib"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
curve25519-dalek = { worspace = true, optional = true }
|
||||||
crossbeam = { workspace = true, optional = true }
|
crossbeam = { workspace = true, optional = true }
|
||||||
chrono = { workspace = true, optional = true, features = ["std", "clock"] }
|
chrono = { workspace = true, optional = true, features = ["std", "clock"] }
|
||||||
dirs = { workspace = true, optional = true }
|
dirs = { workspace = true, optional = true }
|
||||||
|
|
62
helper/src/commitment.rs
Normal file
62
helper/src/commitment.rs
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
use curve25519_dalek::constants::ED25519_BASEPOINT_POINT;
|
||||||
|
use std::{collections::HashMap, sync::OnceLock};
|
||||||
|
|
||||||
|
use curve25519_dalek::edwards::{CompressedEdwardsY, VartimeEdwardsPrecomputation};
|
||||||
|
use curve25519_dalek::traits::VartimePrecomputedMultiscalarMul;
|
||||||
|
use curve25519_dalek::{EdwardsPoint, Scalar};
|
||||||
|
use monero_serai::H;
|
||||||
|
|
||||||
|
static H_PRECOMP: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
|
||||||
|
|
||||||
|
static PRECOMPUTED_COMMITMENTS: OnceLock<HashMap<u64, EdwardsPoint>> = OnceLock::new();
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
const DECOMPOSED_AMOUNT: [u64; 172] = [
|
||||||
|
1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||||
|
10, 20, 30, 40, 50, 60, 70, 80, 90,
|
||||||
|
100, 200, 300, 400, 500, 600, 700, 800, 900,
|
||||||
|
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000,
|
||||||
|
10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
|
||||||
|
100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000,
|
||||||
|
1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000,
|
||||||
|
10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000,
|
||||||
|
100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000,
|
||||||
|
1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000,
|
||||||
|
10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000,
|
||||||
|
100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000,
|
||||||
|
1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000,
|
||||||
|
10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000,
|
||||||
|
100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000,
|
||||||
|
1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000,
|
||||||
|
10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000,
|
||||||
|
100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000,
|
||||||
|
1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000,
|
||||||
|
10000000000000000000
|
||||||
|
];
|
||||||
|
|
||||||
|
fn h_precomp() -> &'static VartimeEdwardsPrecomputation {
|
||||||
|
H_PRECOMP.get_or_init(|| VartimeEdwardsPrecomputation::new([H(), ED25519_BASEPOINT_POINT]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn precomputed_commitments() -> &'static HashMap<u64, EdwardsPoint> {
|
||||||
|
PRECOMPUTED_COMMITMENTS.get_or_init(|| {
|
||||||
|
DECOMPOSED_AMOUNT
|
||||||
|
.iter()
|
||||||
|
.map(|&amount| {
|
||||||
|
(
|
||||||
|
amount,
|
||||||
|
(ED25519_BASEPOINT_POINT + H() * Scalar::from(amount)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn compute_zero_commitment(amount: u64) -> EdwardsPoint {
|
||||||
|
precomputed_commitments()
|
||||||
|
.get(&amount)
|
||||||
|
.copied()
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
h_precomp().vartime_multiscalar_mul([Scalar::from(amount), Scalar::from(1_u8)])
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
//---------------------------------------------------------------------------------------------------- Lints
|
//---------------------------------------------------------------------------------------------------- Lints
|
||||||
#![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)]
|
#![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)]
|
||||||
#![deny(nonstandard_style, deprecated, missing_docs, unused_mut)]
|
#![deny(nonstandard_style, deprecated, unused_mut)]
|
||||||
#![forbid(
|
#![forbid(
|
||||||
unused_unsafe,
|
unused_unsafe,
|
||||||
future_incompatible,
|
future_incompatible,
|
||||||
|
@ -54,9 +54,10 @@ pub mod num;
|
||||||
#[cfg(feature = "map")]
|
#[cfg(feature = "map")]
|
||||||
pub mod map;
|
pub mod map;
|
||||||
|
|
||||||
|
#[cfg(feature = "crypto")]
|
||||||
|
pub mod commitment;
|
||||||
#[cfg(feature = "thread")]
|
#[cfg(feature = "thread")]
|
||||||
pub mod thread;
|
pub mod thread;
|
||||||
|
|
||||||
#[cfg(feature = "time")]
|
#[cfg(feature = "time")]
|
||||||
pub mod time;
|
pub mod time;
|
||||||
|
|
||||||
|
|
|
@ -147,7 +147,7 @@ async fn main() {
|
||||||
let (database_read, mut database_write) = cuprate_blockchain::service::init(
|
let (database_read, mut database_write) = cuprate_blockchain::service::init(
|
||||||
cuprate_blockchain::config::ConfigBuilder::new()
|
cuprate_blockchain::config::ConfigBuilder::new()
|
||||||
.fast()
|
.fast()
|
||||||
.reader_threads(ReaderThreads::Number(4))
|
.reader_threads(ReaderThreads::Number(8))
|
||||||
.build(),
|
.build(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -240,32 +240,32 @@ async fn main() {
|
||||||
entry.blocks.len()
|
entry.blocks.len()
|
||||||
);
|
);
|
||||||
|
|
||||||
for (block, txs) in entry.blocks {
|
tracing::info!("Prepping {} blocks for verification", entry.blocks.len());
|
||||||
let txs = if txs.is_empty() {
|
|
||||||
HashMap::new()
|
|
||||||
} else {
|
|
||||||
rayon_spawn_async(|| {
|
|
||||||
txs.into_par_iter()
|
|
||||||
.map(|tx| {
|
|
||||||
let tx = TransactionVerificationData::new(tx).unwrap();
|
|
||||||
|
|
||||||
(tx.tx_hash, tx)
|
let VerifyBlockResponse::MainChainBatchPrepped(blocks) = block_verifier
|
||||||
})
|
.ready()
|
||||||
.collect::<HashMap<_, _>>()
|
.await
|
||||||
})
|
.unwrap()
|
||||||
.await
|
.call(VerifyBlockRequest::MainChainBatchPrepareBlocks {
|
||||||
};
|
blocks: entry.blocks,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
else {
|
||||||
|
panic!()
|
||||||
|
};
|
||||||
|
|
||||||
|
for (block, txs) in blocks {
|
||||||
let VerifyBlockResponse::MainChain(block_info) = block_verifier
|
let VerifyBlockResponse::MainChain(block_info) = block_verifier
|
||||||
.ready()
|
.ready()
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.call(VerifyBlockRequest::MainChain {
|
.call(VerifyBlockRequest::MainChainPrepped { block, txs })
|
||||||
block,
|
|
||||||
prepared_txs: txs,
|
|
||||||
})
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
else {
|
||||||
|
panic!()
|
||||||
|
};
|
||||||
|
|
||||||
let height = block_info.height;
|
let height = block_info.height;
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ cfg-if = { workspace = true }
|
||||||
# FIXME:
|
# FIXME:
|
||||||
# We only need the `thread` feature if `service` is enabled.
|
# We only need the `thread` feature if `service` is enabled.
|
||||||
# Figure out how to enable features of an already pulled in dependency conditionally.
|
# Figure out how to enable features of an already pulled in dependency conditionally.
|
||||||
cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] }
|
cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map", "crypto"] }
|
||||||
cuprate-types = { path = "../../types", features = ["blockchain"] }
|
cuprate-types = { path = "../../types", features = ["blockchain"] }
|
||||||
curve25519-dalek = { workspace = true }
|
curve25519-dalek = { workspace = true }
|
||||||
monero-pruning = { path = "../../pruning" }
|
monero-pruning = { path = "../../pruning" }
|
||||||
|
|
|
@ -57,8 +57,7 @@ impl Env for ConcreteEnv {
|
||||||
// <https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Paranoid>
|
// <https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Paranoid>
|
||||||
// should we use that instead of Immediate?
|
// should we use that instead of Immediate?
|
||||||
SyncMode::Safe => redb::Durability::Immediate,
|
SyncMode::Safe => redb::Durability::Immediate,
|
||||||
SyncMode::Async => redb::Durability::Eventual,
|
SyncMode::Async | SyncMode::Fast => redb::Durability::Eventual,
|
||||||
SyncMode::Fast => redb::Durability::None,
|
|
||||||
// SOMEDAY: dynamic syncs are not implemented.
|
// SOMEDAY: dynamic syncs are not implemented.
|
||||||
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
|
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
//! Blocks functions.
|
//! Blocks functions.
|
||||||
|
|
||||||
|
use std::time::Instant;
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use bytemuck::TransparentWrapper;
|
use bytemuck::TransparentWrapper;
|
||||||
use monero_serai::block::Block;
|
use monero_serai::block::Block;
|
||||||
|
@ -43,6 +44,8 @@ pub fn add_block(
|
||||||
block: &VerifiedBlockInformation,
|
block: &VerifiedBlockInformation,
|
||||||
tables: &mut impl TablesMut,
|
tables: &mut impl TablesMut,
|
||||||
) -> Result<(), RuntimeError> {
|
) -> Result<(), RuntimeError> {
|
||||||
|
let time = Instant::now();
|
||||||
|
|
||||||
//------------------------------------------------------ Check preconditions first
|
//------------------------------------------------------ Check preconditions first
|
||||||
|
|
||||||
// Cast height to `u32` for storage (handled at top of function).
|
// Cast height to `u32` for storage (handled at top of function).
|
||||||
|
@ -62,6 +65,7 @@ pub fn add_block(
|
||||||
);
|
);
|
||||||
|
|
||||||
// Expensive checks - debug only.
|
// Expensive checks - debug only.
|
||||||
|
/*
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(block.block.serialize(), block.block_blob);
|
assert_eq!(block.block.serialize(), block.block_blob);
|
||||||
|
@ -72,6 +76,8 @@ pub fn add_block(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
//------------------------------------------------------ Transaction / Outputs / Key Images
|
//------------------------------------------------------ Transaction / Outputs / Key Images
|
||||||
// Add the miner transaction first.
|
// Add the miner transaction first.
|
||||||
{
|
{
|
||||||
|
@ -122,6 +128,8 @@ pub fn add_block(
|
||||||
.block_heights_mut()
|
.block_heights_mut()
|
||||||
.put(&block.block_hash, &block.height)?;
|
.put(&block.block_hash, &block.height)?;
|
||||||
|
|
||||||
|
println!("time to add block: {}", time.elapsed().as_nanos());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
//! Output functions.
|
//! Output functions.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
|
use cuprate_helper::commitment::compute_zero_commitment;
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
|
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
|
||||||
use monero_serai::{transaction::Timelock, H};
|
use monero_serai::{transaction::Timelock, H};
|
||||||
|
|
||||||
|
@ -156,7 +157,7 @@ pub fn output_to_output_on_chain(
|
||||||
) -> Result<OutputOnChain, RuntimeError> {
|
) -> Result<OutputOnChain, RuntimeError> {
|
||||||
// FIXME: implement lookup table for common values:
|
// FIXME: implement lookup table for common values:
|
||||||
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
|
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
|
||||||
let commitment = ED25519_BASEPOINT_POINT + H() * Scalar::from(amount);
|
let commitment = compute_zero_commitment(amount);
|
||||||
|
|
||||||
let time_lock = if output
|
let time_lock = if output
|
||||||
.output_flags
|
.output_flags
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use bytemuck::TransparentWrapper;
|
use bytemuck::TransparentWrapper;
|
||||||
|
use cuprate_helper::commitment::compute_zero_commitment;
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar};
|
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar};
|
||||||
use monero_serai::transaction::{Input, Timelock, Transaction};
|
use monero_serai::transaction::{Input, Timelock, Transaction};
|
||||||
|
|
||||||
|
@ -125,10 +126,7 @@ pub fn add_tx(
|
||||||
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1559489302>
|
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1559489302>
|
||||||
// FIXME: implement lookup table for common values:
|
// FIXME: implement lookup table for common values:
|
||||||
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
|
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
|
||||||
let commitment = (ED25519_BASEPOINT_POINT
|
let commitment = compute_zero_commitment(amount).compress().to_bytes();
|
||||||
+ monero_serai::H() * Scalar::from(amount))
|
|
||||||
.compress()
|
|
||||||
.to_bytes();
|
|
||||||
|
|
||||||
add_rct_output(
|
add_rct_output(
|
||||||
&RctOutput {
|
&RctOutput {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
//! Database writer thread definitions and logic.
|
//! Database writer thread definitions and logic.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
|
use std::time::Instant;
|
||||||
use std::{
|
use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
|
@ -221,11 +222,14 @@ impl DatabaseWriter {
|
||||||
/// [`BCWriteRequest::WriteBlock`].
|
/// [`BCWriteRequest::WriteBlock`].
|
||||||
#[inline]
|
#[inline]
|
||||||
fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult {
|
fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult {
|
||||||
|
let time = Instant::now();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
let tx_rw = env_inner.tx_rw()?;
|
let tx_rw = env_inner.tx_rw()?;
|
||||||
|
|
||||||
let result = {
|
let result = {
|
||||||
let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?;
|
let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?;
|
||||||
|
println!("time to open table: {}", time.elapsed().as_nanos());
|
||||||
|
|
||||||
crate::ops::block::add_block(block, &mut tables_mut)
|
crate::ops::block::add_block(block, &mut tables_mut)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue