performance improvements

This commit is contained in:
Boog900 2024-06-05 02:29:06 +01:00
parent df489dcd02
commit e8ca8372c0
No known key found for this signature in database
GPG key ID: 42AB1287CB0041C2
14 changed files with 469 additions and 104 deletions

1
Cargo.lock generated
View file

@ -499,6 +499,7 @@ version = "0.1.0"
dependencies = [
"chrono",
"crossbeam",
"curve25519-dalek",
"dirs",
"futures",
"libc",

View file

@ -2,12 +2,8 @@ use std::sync::OnceLock;
/// Decomposed amount table.
///
static DECOMPOSED_AMOUNTS: OnceLock<[u64; 172]> = OnceLock::new();
#[rustfmt::skip]
pub fn decomposed_amounts() -> &'static [u64; 172] {
DECOMPOSED_AMOUNTS.get_or_init(|| {
[
const DECOMPOSED_AMOUNT: [u64; 172] = [
1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 20, 30, 40, 50, 60, 70, 80, 90,
100, 200, 300, 400, 500, 600, 700, 800, 900,
@ -28,9 +24,7 @@ pub fn decomposed_amounts() -> &'static [u64; 172] {
100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000,
1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000,
10000000000000000000
]
})
}
];
/// Checks that an output amount is decomposed.
///
@ -40,7 +34,7 @@ pub fn decomposed_amounts() -> &'static [u64; 172] {
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#output-amounts>
#[inline]
pub fn is_decomposed_amount(amount: &u64) -> bool {
decomposed_amounts().binary_search(amount).is_ok()
DECOMPOSED_AMOUNT.binary_search(amount).is_ok()
}
#[cfg(test)]

View file

@ -9,22 +9,71 @@ use std::{
use cuprate_helper::asynch::rayon_spawn_async;
use futures::FutureExt;
use monero_serai::{block::Block, transaction::Input};
use monero_serai::{
block::Block,
transaction::{Input, Transaction},
};
use rayon::prelude::*;
use tower::{Service, ServiceExt};
use cuprate_consensus_rules::blocks::randomx_seed_height;
use cuprate_consensus_rules::{
blocks::{calculate_pow_hash, check_block, check_block_pow, BlockError, RandomX},
blocks::{
calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height, BlockError,
RandomX,
},
miner_tx::MinerTxError,
ConsensusError, HardFork,
};
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
use crate::context::rx_vms::RandomXVM;
use crate::context::RawBlockChainContext;
use crate::{
context::{BlockChainContextRequest, BlockChainContextResponse},
transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse},
Database, ExtendedConsensusError,
};
#[derive(Debug)]
pub struct PrePreparedBlockExPOW {
pub block: Block,
pub block_blob: Vec<u8>,
pub hf_vote: HardFork,
pub hf_version: HardFork,
pub block_hash: [u8; 32],
pub height: u64,
pub miner_tx_weight: usize,
}
impl PrePreparedBlockExPOW {
pub fn new(block: Block) -> Result<PrePreparedBlockExPOW, ConsensusError> {
let (hf_version, hf_vote) =
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else {
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputNotOfTypeGen,
)))?
};
Ok(PrePreparedBlockExPOW {
block_blob: block.serialize(),
hf_vote,
hf_version,
block_hash: block.hash(),
height: *height,
miner_tx_weight: block.miner_tx.weight(),
block,
})
}
}
/// A pre-prepared block with all data needed to verify it.
#[derive(Debug)]
pub struct PrePreparedBlock {
@ -82,6 +131,34 @@ impl PrePreparedBlock {
block,
})
}
fn new_prepped<R: RandomX>(
block: PrePreparedBlockExPOW,
randomx_vm: Option<&R>,
) -> Result<PrePreparedBlock, ConsensusError> {
let Some(Input::Gen(height)) = block.block.miner_tx.prefix.inputs.first() else {
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputNotOfTypeGen,
)))?
};
Ok(PrePreparedBlock {
block_blob: block.block_blob,
hf_vote: block.hf_vote,
hf_version: block.hf_version,
block_hash: block.block_hash,
pow_hash: calculate_pow_hash(
randomx_vm,
&block.block.serialize_hashable(),
*height,
&block.hf_version,
)?,
miner_tx_weight: block.block.miner_tx.weight(),
block: block.block,
})
}
}
/// A request to verify a block.
@ -91,12 +168,20 @@ pub enum VerifyBlockRequest {
block: Block,
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
},
MainChainPrepped {
block: PrePreparedBlock,
txs: Vec<Arc<TransactionVerificationData>>,
},
MainChainBatchPrepareBlocks {
blocks: Vec<(Block, Vec<Transaction>)>,
},
}
/// A response from a verify block request.
pub enum VerifyBlockResponse {
/// This block is valid.
MainChain(VerifiedBlockInformation),
MainChainBatchPrepped(Vec<(PrePreparedBlock, Vec<Arc<TransactionVerificationData>>)>),
}
/// The block verifier service.
@ -178,18 +263,162 @@ where
} => {
verify_main_chain_block(block, prepared_txs, context_svc, tx_verifier_svc).await
}
VerifyBlockRequest::MainChainBatchPrepareBlocks { blocks } => {
batch_prepare_main_chain_block(blocks, context_svc).await
}
VerifyBlockRequest::MainChainPrepped { block, txs } => {
verify_prepped_main_chain_block(block, txs, context_svc, tx_verifier_svc, None)
.await
}
}
}
.boxed()
}
}
/// Verifies a prepared block.
async fn verify_main_chain_block<C, TxV>(
block: Block,
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
async fn batch_prepare_main_chain_block<C>(
blocks: Vec<(Block, Vec<Transaction>)>,
mut context_svc: C,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where
C: Service<
BlockChainContextRequest,
Response = BlockChainContextResponse,
Error = tower::BoxError,
> + Send
+ 'static,
C::Future: Send + 'static,
{
let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
tracing::debug!("Calculating block hashes.");
let blocks: Vec<PrePreparedBlockExPOW> = rayon_spawn_async(|| {
blocks
.into_iter()
.map(PrePreparedBlockExPOW::new)
.collect::<Result<Vec<_>, _>>()
})
.await?;
let mut timestamps_hfs = Vec::with_capacity(blocks.len());
let mut new_rx_vm = None;
for window in blocks.windows(2) {
if window[0].block_hash != window[1].block.header.previous
|| window[0].height != window[1].height - 1
{
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
}
if is_randomx_seed_height(window[0].height) {
new_rx_vm = Some((window[0].height, window[0].block_hash));
}
timestamps_hfs.push((window[0].block.header.timestamp, window[0].hf_version))
}
tracing::debug!("getting blockchain context");
let BlockChainContextResponse::Context(checked_context) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::GetContext)
.await
.map_err(Into::<ExtendedConsensusError>::into)?
else {
panic!("Context service returned wrong response!");
};
let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::BatchGetDifficulties(
timestamps_hfs,
))
.await
.map_err(Into::<ExtendedConsensusError>::into)?
else {
panic!("Context service returned wrong response!");
};
let context = checked_context.unchecked_blockchain_context().clone();
if context.chain_height != blocks[0].height {
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputsHeightIncorrect,
)))?;
}
if context.top_hash != blocks[0].block.header.previous {
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
}
let mut rx_vms = context.rx_vms;
if let Some((new_vm_height, new_vm_seed)) = new_rx_vm {
let new_vm = rayon_spawn_async(move || {
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
})
.await;
context_svc
.ready()
.await?
.call(BlockChainContextRequest::NewRXVM((
new_vm_seed,
new_vm.clone(),
)))
.await
.map_err(Into::<ExtendedConsensusError>::into)?;
rx_vms.insert(new_vm_height, new_vm);
}
let blocks = rayon_spawn_async(move || {
blocks
.into_par_iter()
.zip(difficulties)
.zip(txs)
.map(|((block, difficultly), txs)| {
let height = block.height;
let block = PrePreparedBlock::new_prepped(
block,
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
)?;
check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?;
let mut txs = txs
.into_par_iter()
.map(|tx| {
let tx = TransactionVerificationData::new(tx)?;
Ok::<_, ConsensusError>((tx.tx_hash, tx))
})
.collect::<Result<HashMap<_, _>, _>>()?;
let mut ordered_txs = Vec::with_capacity(txs.len());
for tx_hash in &block.block.txs {
let tx = txs
.remove(tx_hash)
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
ordered_txs.push(Arc::new(tx));
}
Ok((block, ordered_txs))
})
.collect::<Result<Vec<_>, ExtendedConsensusError>>()
})
.await?;
Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks))
}
async fn verify_prepped_main_chain_block<C, TxV>(
prepped_block: PrePreparedBlock,
txs: Vec<Arc<TransactionVerificationData>>,
context_svc: C,
tx_verifier_svc: TxV,
cached_context: Option<RawBlockChainContext>,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where
C: Service<
@ -201,8 +430,9 @@ where
C::Future: Send + 'static,
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
{
tracing::debug!("getting blockchain context");
let context = if let Some(context) = cached_context {
context
} else {
let BlockChainContextResponse::Context(checked_context) = context_svc
.oneshot(BlockChainContextRequest::GetContext)
.await
@ -212,43 +442,31 @@ where
};
let context = checked_context.unchecked_blockchain_context().clone();
tracing::debug!("got blockchain context: {:?}", context);
// Set up the block and just pass it to [`verify_main_chain_block_prepared`]
let rx_vms = context.rx_vms.clone();
let height = context.chain_height;
let prepped_block = rayon_spawn_async(move || {
PrePreparedBlock::new(block, rx_vms.get(&height).map(AsRef::as_ref))
})
.await?;
context
};
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
.map_err(ConsensusError::Block)?;
// Check that the txs included are what we need and that there are not any extra.
let mut ordered_txs = Vec::with_capacity(txs.len());
tracing::debug!("Checking we have correct transactions for block.");
if prepped_block.block.txs.len() != txs.len() {
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
}
if !prepped_block.block.txs.is_empty() {
for tx_hash in &prepped_block.block.txs {
let tx = txs
.remove(tx_hash)
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
ordered_txs.push(Arc::new(tx));
for (expected_tx_hash, tx) in prepped_block.block.txs.iter().zip(txs.iter()) {
if expected_tx_hash != &tx.tx_hash {
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
}
}
drop(txs);
tracing::debug!("Verifying transactions for block.");
tx_verifier_svc
.oneshot(VerifyTxRequest::Prepped {
txs: ordered_txs.clone(),
txs: txs.clone(),
current_chain_height: context.chain_height,
top_hash: context.top_hash,
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
@ -258,8 +476,8 @@ where
}
let block_weight =
prepped_block.miner_tx_weight + ordered_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
let total_fees = ordered_txs.iter().map(|tx| tx.fee).sum::<u64>();
prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
let total_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
tracing::debug!("Verifying block header.");
let (_, generated_coins) = check_block(
@ -275,7 +493,7 @@ where
block_hash: prepped_block.block_hash,
block: prepped_block.block,
block_blob: prepped_block.block_blob,
txs: ordered_txs
txs: txs
.into_iter()
.map(|tx| {
// Note: it would be possible for the transaction verification service to hold onto the tx after the call
@ -301,3 +519,76 @@ where
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
}))
}
/// Verifies a prepared block.
async fn verify_main_chain_block<C, TxV>(
block: Block,
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
mut context_svc: C,
tx_verifier_svc: TxV,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where
C: Service<
BlockChainContextRequest,
Response = BlockChainContextResponse,
Error = tower::BoxError,
> + Send
+ 'static,
C::Future: Send + 'static,
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
{
let BlockChainContextResponse::Context(checked_context) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::GetContext)
.await?
else {
panic!("Context service returned wrong response!");
};
let context = checked_context.unchecked_blockchain_context().clone();
tracing::debug!("got blockchain context: {:?}", context);
// Set up the block and just pass it to [`verify_main_chain_block_prepared`]
let rx_vms = context.rx_vms.clone();
let height = context.chain_height;
let prepped_block = rayon_spawn_async(move || {
PrePreparedBlock::new(
block,
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
)
})
.await?;
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
.map_err(ConsensusError::Block)?;
// Check that the txs included are what we need and that there are not any extra.
let mut ordered_txs = Vec::with_capacity(txs.len());
tracing::debug!("Checking we have correct transactions for block.");
if !prepped_block.block.txs.is_empty() {
for tx_hash in &prepped_block.block.txs {
let tx = txs
.remove(tx_hash)
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
ordered_txs.push(Arc::new(tx));
}
drop(txs);
}
verify_prepped_main_chain_block(
prepped_block,
ordered_txs,
context_svc,
tx_verifier_svc,
Some(context),
)
.await
}

View file

@ -289,7 +289,7 @@ async fn verify_prepped_transactions<D>(
where
D: Database + Clone + Sync + Send + 'static,
{
tracing::debug!("Verifying transactions");
tracing::debug!("Verifying {} transactions", txs.len());
tracing::trace!("Checking for duplicate key images");
@ -462,7 +462,7 @@ async fn verify_transactions_decoy_info<D>(
where
D: Database + Clone + Sync + Send + 'static,
{
if hf == HardFork::V1 {
if hf == HardFork::V1 || txs.is_empty() {
return Ok(());
}
@ -484,6 +484,10 @@ async fn verify_transactions<D>(
where
D: Database + Clone + Sync + Send + 'static,
{
if txs.is_empty() {
return Ok(());
}
let txs_ring_member_info =
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?;

View file

@ -15,6 +15,7 @@ std = []
atomic = ["dep:crossbeam"]
asynch = ["dep:futures", "dep:rayon"]
constants = []
crypto = ["dep:curve25519-dalek"]
fs = ["dep:dirs"]
num = []
map = ["dep:monero-serai"]
@ -22,6 +23,7 @@ time = ["dep:chrono", "std"]
thread = ["std", "dep:target_os_lib"]
[dependencies]
curve25519-dalek = { worspace = true, optional = true }
crossbeam = { workspace = true, optional = true }
chrono = { workspace = true, optional = true, features = ["std", "clock"] }
dirs = { workspace = true, optional = true }

62
helper/src/commitment.rs Normal file
View file

@ -0,0 +1,62 @@
use curve25519_dalek::constants::ED25519_BASEPOINT_POINT;
use std::{collections::HashMap, sync::OnceLock};
use curve25519_dalek::edwards::{CompressedEdwardsY, VartimeEdwardsPrecomputation};
use curve25519_dalek::traits::VartimePrecomputedMultiscalarMul;
use curve25519_dalek::{EdwardsPoint, Scalar};
use monero_serai::H;
static H_PRECOMP: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
static PRECOMPUTED_COMMITMENTS: OnceLock<HashMap<u64, EdwardsPoint>> = OnceLock::new();
#[rustfmt::skip]
const DECOMPOSED_AMOUNT: [u64; 172] = [
1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 20, 30, 40, 50, 60, 70, 80, 90,
100, 200, 300, 400, 500, 600, 700, 800, 900,
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000,
10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000,
1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000,
10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000,
100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000,
1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000,
10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000,
100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000,
1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000,
10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000,
100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000,
1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000,
10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000,
100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000,
1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000,
10000000000000000000
];
fn h_precomp() -> &'static VartimeEdwardsPrecomputation {
H_PRECOMP.get_or_init(|| VartimeEdwardsPrecomputation::new([H(), ED25519_BASEPOINT_POINT]))
}
fn precomputed_commitments() -> &'static HashMap<u64, EdwardsPoint> {
PRECOMPUTED_COMMITMENTS.get_or_init(|| {
DECOMPOSED_AMOUNT
.iter()
.map(|&amount| {
(
amount,
(ED25519_BASEPOINT_POINT + H() * Scalar::from(amount)),
)
})
.collect()
})
}
pub fn compute_zero_commitment(amount: u64) -> EdwardsPoint {
precomputed_commitments()
.get(&amount)
.copied()
.unwrap_or_else(|| {
h_precomp().vartime_multiscalar_mul([Scalar::from(amount), Scalar::from(1_u8)])
})
}

View file

@ -1,7 +1,7 @@
#![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
#![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)]
#![deny(nonstandard_style, deprecated, missing_docs, unused_mut)]
#![deny(nonstandard_style, deprecated, unused_mut)]
#![forbid(
unused_unsafe,
future_incompatible,
@ -54,9 +54,10 @@ pub mod num;
#[cfg(feature = "map")]
pub mod map;
#[cfg(feature = "crypto")]
pub mod commitment;
#[cfg(feature = "thread")]
pub mod thread;
#[cfg(feature = "time")]
pub mod time;

View file

@ -147,7 +147,7 @@ async fn main() {
let (database_read, mut database_write) = cuprate_blockchain::service::init(
cuprate_blockchain::config::ConfigBuilder::new()
.fast()
.reader_threads(ReaderThreads::Number(4))
.reader_threads(ReaderThreads::Number(8))
.build(),
)
.unwrap();
@ -240,32 +240,32 @@ async fn main() {
entry.blocks.len()
);
for (block, txs) in entry.blocks {
let txs = if txs.is_empty() {
HashMap::new()
} else {
rayon_spawn_async(|| {
txs.into_par_iter()
.map(|tx| {
let tx = TransactionVerificationData::new(tx).unwrap();
tracing::info!("Prepping {} blocks for verification", entry.blocks.len());
(tx.tx_hash, tx)
})
.collect::<HashMap<_, _>>()
let VerifyBlockResponse::MainChainBatchPrepped(blocks) = block_verifier
.ready()
.await
.unwrap()
.call(VerifyBlockRequest::MainChainBatchPrepareBlocks {
blocks: entry.blocks,
})
.await
.unwrap()
else {
panic!()
};
for (block, txs) in blocks {
let VerifyBlockResponse::MainChain(block_info) = block_verifier
.ready()
.await
.unwrap()
.call(VerifyBlockRequest::MainChain {
block,
prepared_txs: txs,
})
.call(VerifyBlockRequest::MainChainPrepped { block, txs })
.await
.unwrap();
.unwrap()
else {
panic!()
};
let height = block_info.height;

View file

@ -25,7 +25,7 @@ cfg-if = { workspace = true }
# FIXME:
# We only need the `thread` feature if `service` is enabled.
# Figure out how to enable features of an already pulled in dependency conditionally.
cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] }
cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map", "crypto"] }
cuprate-types = { path = "../../types", features = ["blockchain"] }
curve25519-dalek = { workspace = true }
monero-pruning = { path = "../../pruning" }

View file

@ -57,8 +57,7 @@ impl Env for ConcreteEnv {
// <https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Paranoid>
// should we use that instead of Immediate?
SyncMode::Safe => redb::Durability::Immediate,
SyncMode::Async => redb::Durability::Eventual,
SyncMode::Fast => redb::Durability::None,
SyncMode::Async | SyncMode::Fast => redb::Durability::Eventual,
// SOMEDAY: dynamic syncs are not implemented.
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
};

View file

@ -1,5 +1,6 @@
//! Blocks functions.
use std::time::Instant;
//---------------------------------------------------------------------------------------------------- Import
use bytemuck::TransparentWrapper;
use monero_serai::block::Block;
@ -43,6 +44,8 @@ pub fn add_block(
block: &VerifiedBlockInformation,
tables: &mut impl TablesMut,
) -> Result<(), RuntimeError> {
let time = Instant::now();
//------------------------------------------------------ Check preconditions first
// Cast height to `u32` for storage (handled at top of function).
@ -62,6 +65,7 @@ pub fn add_block(
);
// Expensive checks - debug only.
/*
#[cfg(debug_assertions)]
{
assert_eq!(block.block.serialize(), block.block_blob);
@ -72,6 +76,8 @@ pub fn add_block(
}
}
*/
//------------------------------------------------------ Transaction / Outputs / Key Images
// Add the miner transaction first.
{
@ -122,6 +128,8 @@ pub fn add_block(
.block_heights_mut()
.put(&block.block_hash, &block.height)?;
println!("time to add block: {}", time.elapsed().as_nanos());
Ok(())
}

View file

@ -1,6 +1,7 @@
//! Output functions.
//---------------------------------------------------------------------------------------------------- Import
use cuprate_helper::commitment::compute_zero_commitment;
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
use monero_serai::{transaction::Timelock, H};
@ -156,7 +157,7 @@ pub fn output_to_output_on_chain(
) -> Result<OutputOnChain, RuntimeError> {
// FIXME: implement lookup table for common values:
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
let commitment = ED25519_BASEPOINT_POINT + H() * Scalar::from(amount);
let commitment = compute_zero_commitment(amount);
let time_lock = if output
.output_flags

View file

@ -2,6 +2,7 @@
//---------------------------------------------------------------------------------------------------- Import
use bytemuck::TransparentWrapper;
use cuprate_helper::commitment::compute_zero_commitment;
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar};
use monero_serai::transaction::{Input, Timelock, Transaction};
@ -125,10 +126,7 @@ pub fn add_tx(
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1559489302>
// FIXME: implement lookup table for common values:
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
let commitment = (ED25519_BASEPOINT_POINT
+ monero_serai::H() * Scalar::from(amount))
.compress()
.to_bytes();
let commitment = compute_zero_commitment(amount).compress().to_bytes();
add_rct_output(
&RctOutput {

View file

@ -1,6 +1,7 @@
//! Database writer thread definitions and logic.
//---------------------------------------------------------------------------------------------------- Import
use std::time::Instant;
use std::{
sync::Arc,
task::{Context, Poll},
@ -221,11 +222,14 @@ impl DatabaseWriter {
/// [`BCWriteRequest::WriteBlock`].
#[inline]
fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult {
let time = Instant::now();
let env_inner = env.env_inner();
let tx_rw = env_inner.tx_rw()?;
let result = {
let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?;
println!("time to open table: {}", time.elapsed().as_nanos());
crate::ops::block::add_block(block, &mut tables_mut)
};