Make RX VM an option for calculate_pow_hash

This means we don't have to init the dataset
if it's not needed
This commit is contained in:
Boog900 2024-01-09 22:39:29 +00:00
parent 730bc8fb42
commit 7cf7ea1693
No known key found for this signature in database
GPG key ID: 5401367FB7302004
6 changed files with 129 additions and 54 deletions

View file

@ -67,9 +67,11 @@ pub fn randomx_seed_height(height: u64) -> u64 {
/// Calculates the POW hash of this block. /// Calculates the POW hash of this block.
/// ///
/// `randomx_vm` must be [`Some`] after hf 12.
///
/// ref: https://monero-book.cuprate.org/consensus_rules/blocks.html#pow-function /// ref: https://monero-book.cuprate.org/consensus_rules/blocks.html#pow-function
pub fn calculate_pow_hash<R: RandomX>( pub fn calculate_pow_hash<R: RandomX>(
randomx_vm: &R, randomx_vm: Option<&R>,
buf: &[u8], buf: &[u8],
height: u64, height: u64,
hf: &HardFork, hf: &HardFork,
@ -88,6 +90,7 @@ pub fn calculate_pow_hash<R: RandomX>(
cryptonight_hash_r(buf, height) cryptonight_hash_r(buf, height)
} else { } else {
randomx_vm randomx_vm
.expect("RandomX VM needed from hf 12")
.calculate_hash(buf) .calculate_hash(buf)
.map_err(|_| BlockError::POWInvalid)? .map_err(|_| BlockError::POWInvalid)?
}) })

View file

@ -129,10 +129,20 @@ fn check_time_lock(time_lock: &Timelock, chain_height: u64) -> Result<(), MinerT
/// Sums the outputs checking for overflow. /// Sums the outputs checking for overflow.
/// ///
/// ref: https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#output-amounts /// ref: https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#output-amounts
fn sum_outputs(outputs: &[Output], hf: &HardFork) -> Result<u64, MinerTxError> { /// && https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#zero-amount-v1-output
fn sum_outputs(
outputs: &[Output],
hf: &HardFork,
tx_version: &TxVersion,
) -> Result<u64, MinerTxError> {
let mut sum: u64 = 0; let mut sum: u64 = 0;
for out in outputs { for out in outputs {
let amt = out.amount.unwrap_or(0); let amt = out.amount.unwrap_or(0);
if tx_version == &TxVersion::RingSignatures && amt == 0 {
return Err(MinerTxError::OutputAmountIncorrect);
}
if hf == &HardFork::V3 && !is_decomposed_amount(&amt) { if hf == &HardFork::V3 && !is_decomposed_amount(&amt) {
return Err(MinerTxError::OutputNotDecomposed); return Err(MinerTxError::OutputNotDecomposed);
} }
@ -193,7 +203,7 @@ pub fn check_miner_tx(
check_output_types(&tx.prefix.outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?; check_output_types(&tx.prefix.outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?;
let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf); let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf);
let total_outs = sum_outputs(&tx.prefix.outputs, hf)?; let total_outs = sum_outputs(&tx.prefix.outputs, hf, &tx_version)?;
check_total_output_amt(total_outs, reward, total_fees, hf) check_total_output_amt(total_outs, reward, total_fees, hf)
} }

View file

@ -36,7 +36,8 @@ use monero_consensus::{blocks::randomx_seed_height, HardFork};
mod tx_pool; mod tx_pool;
const MAX_BLOCKS_IN_RANGE: u64 = 200; const MAX_BLOCKS_IN_RANGE: u64 = 1000;
const BATCHES_IN_REQUEST: u64 = 3;
const MAX_BLOCKS_HEADERS_IN_RANGE: u64 = 1000; const MAX_BLOCKS_HEADERS_IN_RANGE: u64 = 1000;
/// Calls for a batch of blocks, returning the response and the time it took. /// Calls for a batch of blocks, returning the response and the time it took.
@ -100,19 +101,21 @@ where
D::Future: Send + 'static, D::Future: Send + 'static,
{ {
let mut next_fut = tokio::spawn(call_batch( let mut next_fut = tokio::spawn(call_batch(
start_height..(start_height + (MAX_BLOCKS_IN_RANGE * 4)).min(chain_height), start_height..(start_height + (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST)).min(chain_height),
database.clone(), database.clone(),
)); ));
for next_batch_start in (start_height..chain_height) for next_batch_start in (start_height..chain_height)
.step_by((MAX_BLOCKS_IN_RANGE * 4) as usize) .step_by((MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST) as usize)
.skip(1) .skip(1)
{ {
// Call the next batch while we handle this batch. // Call the next batch while we handle this batch.
let current_fut = std::mem::replace( let current_fut = std::mem::replace(
&mut next_fut, &mut next_fut,
tokio::spawn(call_batch( tokio::spawn(call_batch(
next_batch_start..(next_batch_start + (MAX_BLOCKS_IN_RANGE * 4)).min(chain_height), next_batch_start
..(next_batch_start + (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST))
.min(chain_height),
database.clone(), database.clone(),
)), )),
); );
@ -123,7 +126,7 @@ where
tracing::info!( tracing::info!(
"Got batch: {:?}, chain height: {}", "Got batch: {:?}, chain height: {}",
(next_batch_start - (MAX_BLOCKS_IN_RANGE * 4))..(next_batch_start), (next_batch_start - (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST))..(next_batch_start),
chain_height chain_height
); );
@ -223,50 +226,62 @@ where
tokio::spawn(async move { tokio::spawn(async move {
while let Some(blocks) = incoming_blocks.next().await { while let Some(blocks) = incoming_blocks.next().await {
let unwrapped_rx_vms = randomx_vms.as_mut().unwrap(); if blocks.last().unwrap().header.major_version >= 12 {
let unwrapped_rx_vms = randomx_vms.as_mut().unwrap();
let blocks = rayon_spawn_async(move || { let blocks = rayon_spawn_async(move || {
blocks blocks
.into_iter() .into_iter()
.map(move |block| PrePreparedBlockExPOW::new(block).unwrap()) .map(move |block| PrePreparedBlockExPOW::new(block).unwrap())
.collect::<Vec<_>>() .collect::<Vec<_>>()
})
.await;
let seeds_needed = blocks
.iter()
.map(|block| {
rx_seed_cache.new_block(block.block.number() as u64, &block.block_hash);
randomx_seed_height(block.block.number() as u64)
}) })
.collect::<HashSet<_>>(); .await;
unwrapped_rx_vms.retain(|seed_height, _| seeds_needed.contains(seed_height)); let seeds_needed = blocks
.iter()
for seed_height in seeds_needed { .map(|block| {
unwrapped_rx_vms.entry(seed_height).or_insert_with(|| { rx_seed_cache.new_block(block.block.number() as u64, &block.block_hash);
RandomXVM::new(rx_seed_cache.get_seeds_hash(seed_height)).unwrap() randomx_seed_height(block.block.number() as u64)
});
}
let arc_rx_vms = Arc::new(randomx_vms.take().unwrap());
let cloned_arc_rx_vms = arc_rx_vms.clone();
let blocks = rayon_spawn_async(move || {
blocks
.into_iter()
.map(move |block| {
let rx_vm = arc_rx_vms
.get(&randomx_seed_height(block.block.number() as u64))
.unwrap();
PrePreparedBlock::new(block, rx_vm).unwrap()
}) })
.collect::<Vec<_>>() .collect::<HashSet<_>>();
})
.await;
randomx_vms = Some(Arc::into_inner(cloned_arc_rx_vms).unwrap()); unwrapped_rx_vms.retain(|seed_height, _| seeds_needed.contains(seed_height));
prepped_blocks_tx.send(blocks).await.unwrap(); for seed_height in seeds_needed {
unwrapped_rx_vms.entry(seed_height).or_insert_with(|| {
RandomXVM::new(rx_seed_cache.get_seeds_hash(seed_height)).unwrap()
});
}
let arc_rx_vms = Arc::new(randomx_vms.take().unwrap());
let cloned_arc_rx_vms = arc_rx_vms.clone();
let blocks = rayon_spawn_async(move || {
blocks
.into_iter()
.map(move |block| {
let rx_vm = arc_rx_vms
.get(&randomx_seed_height(block.block.number() as u64))
.unwrap();
PrePreparedBlock::new_rx(block, rx_vm).unwrap()
})
.collect::<Vec<_>>()
})
.await;
randomx_vms = Some(Arc::into_inner(cloned_arc_rx_vms).unwrap());
prepped_blocks_tx.send(blocks).await.unwrap();
} else {
let blocks = rayon_spawn_async(move || {
blocks
.into_iter()
.map(move |block| PrePreparedBlock::new(block).unwrap())
.collect::<Vec<_>>()
})
.await;
prepped_blocks_tx.send(blocks).await.unwrap();
}
} }
}); });

View file

@ -68,7 +68,44 @@ pub struct PrePreparedBlock {
} }
impl PrePreparedBlock { impl PrePreparedBlock {
pub fn new<R: RandomX>( pub fn new(block: Block) -> Result<PrePreparedBlock, ConsensusError> {
struct DummyRX;
impl RandomX for DummyRX {
type Error = ();
fn calculate_hash(&self, _: &[u8]) -> Result<[u8; 32], Self::Error> {
panic!("DummyRX cant calculate hash")
}
}
let (hf_version, hf_vote) =
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else {
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputNotOfTypeGen,
)))?
};
Ok(PrePreparedBlock {
block_blob: block.serialize(),
hf_vote,
hf_version,
block_hash: block.hash(),
pow_hash: calculate_pow_hash::<DummyRX>(
None,
&block.serialize_hashable(),
*height,
&hf_version,
)?,
miner_tx_weight: block.miner_tx.weight(),
block,
})
}
pub fn new_rx<R: RandomX>(
block: PrePreparedBlockExPOW, block: PrePreparedBlockExPOW,
randomx_vm: &R, randomx_vm: &R,
) -> Result<PrePreparedBlock, ConsensusError> { ) -> Result<PrePreparedBlock, ConsensusError> {
@ -85,7 +122,7 @@ impl PrePreparedBlock {
block_hash: block.block_hash, block_hash: block.block_hash,
pow_hash: calculate_pow_hash( pow_hash: calculate_pow_hash(
randomx_vm, Some(randomx_vm),
&block.block.serialize_hashable(), &block.block.serialize_hashable(),
*height, *height,
&block.hf_version, &block.hf_version,

View file

@ -61,8 +61,11 @@ impl RandomXSeed {
} }
} }
self.seeds.pop_back();
self.seeds.push_front((height, *hash)); self.seeds.push_front((height, *hash));
if self.seeds.len() > RX_SEEDS_CACHED {
self.seeds.pop_back();
}
} }
} }
} }

View file

@ -1,10 +1,12 @@
extern crate cc; extern crate cc;
use std::env;
use cc::Build; use cc::Build;
fn main() { fn main() {
Build::new() let mut cfg = Build::new();
.include("c") cfg.include("c")
.file("c/aesb.c") .file("c/aesb.c")
.file("c/blake256.c") .file("c/blake256.c")
.file("c/groestl.c") .file("c/groestl.c")
@ -20,8 +22,13 @@ fn main() {
.file("c/slow-hash.c") .file("c/slow-hash.c")
.file("c/CryptonightR_JIT.c") .file("c/CryptonightR_JIT.c")
.file("c/CryptonightR_template.S") .file("c/CryptonightR_template.S")
.flag("-maes")
.flag("-O3") .flag("-O3")
.flag("-fexceptions") .flag("-fexceptions");
.compile("cryptonight")
let target = env::var("TARGET").unwrap();
if target.contains("x86_64") {
cfg.flag("-maes").flag("-msse2");
}
cfg.compile("cryptonight")
} }