mirror of
https://github.com/Cuprate/cuprate.git
synced 2024-11-17 00:07:55 +00:00
add tests to context sub-services + fix issues in other tests
+ fmt + clippy.
This commit is contained in:
parent
2440ccbd8d
commit
3a52b346e1
24 changed files with 636 additions and 262 deletions
|
@ -57,3 +57,8 @@ dirs = {version="5.0", optional = true}
|
|||
# here to help cargo to pick a version - remove me
|
||||
syn = "2.0.37"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = {version = "1", features = ["rt-multi-thread", "macros"]}
|
||||
proptest = "1"
|
||||
proptest-derive = "0.4.0"
|
|
@ -2,14 +2,13 @@
|
|||
|
||||
use std::path::Path;
|
||||
use std::{
|
||||
io::Read,
|
||||
ops::Range,
|
||||
path::PathBuf,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tower::ServiceExt;
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
use cuprate_common::Network;
|
||||
|
@ -23,7 +22,7 @@ use monero_consensus::{
|
|||
VerifiedBlockInformation, VerifyBlockRequest, VerifyTxResponse,
|
||||
};
|
||||
|
||||
const MAX_BLOCKS_IN_RANGE: u64 = 500;
|
||||
const MAX_BLOCKS_IN_RANGE: u64 = 300;
|
||||
const MAX_BLOCKS_HEADERS_IN_RANGE: u64 = 250;
|
||||
|
||||
/// Calls for a batch of blocks, returning the response and the time it took.
|
||||
|
@ -45,6 +44,15 @@ fn simple_get_hf(height: u64) -> HardFork {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_hf_height(hf: &HardFork) -> u64 {
|
||||
match hf {
|
||||
HardFork::V1 => 0,
|
||||
HardFork::V2 => 1009827,
|
||||
HardFork::V3 => 1141317,
|
||||
_ => todo!("rules past v3"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_cache_and_context<Ctx>(
|
||||
cache: &RwLock<ScanningCache>,
|
||||
context_updater: &mut Ctx,
|
||||
|
@ -81,6 +89,7 @@ where
|
|||
/// Batches all transactions together when getting outs
|
||||
///
|
||||
/// TODO: reduce the amount of parameters of this function
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn batch_txs_verify_blocks<Tx, Blk, Ctx>(
|
||||
cache: &RwLock<ScanningCache>,
|
||||
save_file: &Path,
|
||||
|
@ -144,50 +153,7 @@ where
|
|||
|
||||
update_cache_and_context(cache, context_updater, verified_block_info).await?;
|
||||
|
||||
if current_height + u64::try_from(block_id).unwrap() % 25000 == 0 {
|
||||
tracing::info!("Saving cache to: {}", save_file.display());
|
||||
cache.read().unwrap().save(save_file)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Batches only transactions per block together when getting outs
|
||||
///
|
||||
/// TODO: reduce the amount of parameters of this function
|
||||
async fn verify_blocks<Blk, Ctx>(
|
||||
cache: &RwLock<ScanningCache>,
|
||||
save_file: &Path,
|
||||
txs: Vec<Vec<Transaction>>,
|
||||
blocks: Vec<Block>,
|
||||
block_verifier: &mut Blk,
|
||||
context_updater: &mut Ctx,
|
||||
current_height: u64,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
Blk: tower::Service<
|
||||
VerifyBlockRequest,
|
||||
Response = VerifiedBlockInformation,
|
||||
Error = ConsensusError,
|
||||
>,
|
||||
Ctx: tower::Service<UpdateBlockchainCacheRequest, Response = (), Error = tower::BoxError>,
|
||||
{
|
||||
for (block_id, (block, txs)) in blocks.into_iter().zip(txs.into_iter()).enumerate() {
|
||||
let verified_block_info: VerifiedBlockInformation = block_verifier
|
||||
.ready()
|
||||
.await?
|
||||
.call(VerifyBlockRequest::MainChainBatchSetupVerify(block, txs))
|
||||
.await?;
|
||||
|
||||
tracing::info!(
|
||||
"verified block: {}",
|
||||
current_height + u64::try_from(block_id).unwrap()
|
||||
);
|
||||
|
||||
update_cache_and_context(cache, context_updater, verified_block_info).await?;
|
||||
|
||||
if current_height + u64::try_from(block_id).unwrap() % 25000 == 0 {
|
||||
if (current_height + u64::try_from(block_id).unwrap()) % 25000 == 0 {
|
||||
tracing::info!("Saving cache to: {}", save_file.display());
|
||||
cache.read().unwrap().save(save_file)?;
|
||||
}
|
||||
|
@ -200,7 +166,7 @@ async fn scan_chain<D>(
|
|||
cache: Arc<RwLock<ScanningCache>>,
|
||||
save_file: PathBuf,
|
||||
rpc_config: Arc<RwLock<RpcConfig>>,
|
||||
mut database: D,
|
||||
database: D,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
|
@ -259,7 +225,7 @@ where
|
|||
chain_height
|
||||
);
|
||||
|
||||
let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
||||
let (mut blocks, mut txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
||||
let batch_len = u64::try_from(blocks.len()).unwrap();
|
||||
|
||||
let hf_start_batch = simple_get_hf(current_height);
|
||||
|
@ -279,23 +245,46 @@ where
|
|||
hf_start_batch,
|
||||
)
|
||||
.await?;
|
||||
current_height += batch_len;
|
||||
next_batch_start_height += batch_len;
|
||||
} else {
|
||||
tracing::warn!(
|
||||
"Hard fork during batch, getting outputs per block this will take a while!"
|
||||
);
|
||||
verify_blocks(
|
||||
let end_hf_start = get_hf_height(&hf_end_batch);
|
||||
let height_diff = (end_hf_start - current_height) as usize;
|
||||
|
||||
batch_txs_verify_blocks(
|
||||
&cache,
|
||||
&save_file,
|
||||
txs.drain(0..height_diff).collect(),
|
||||
blocks.drain(0..height_diff).collect(),
|
||||
&mut transaction_verifier,
|
||||
&mut block_verifier,
|
||||
&mut context_updater,
|
||||
current_height,
|
||||
hf_start_batch,
|
||||
)
|
||||
.await?;
|
||||
|
||||
current_height += height_diff as u64;
|
||||
next_batch_start_height += height_diff as u64;
|
||||
|
||||
tracing::info!("Hard fork activating: {:?}", hf_end_batch);
|
||||
|
||||
batch_txs_verify_blocks(
|
||||
&cache,
|
||||
&save_file,
|
||||
txs,
|
||||
blocks,
|
||||
&mut transaction_verifier,
|
||||
&mut block_verifier,
|
||||
&mut context_updater,
|
||||
current_height,
|
||||
hf_end_batch,
|
||||
)
|
||||
.await?;
|
||||
|
||||
current_height += batch_len - height_diff as u64;
|
||||
next_batch_start_height += batch_len - height_diff as u64;
|
||||
}
|
||||
current_height += batch_len;
|
||||
next_batch_start_height += batch_len;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -102,7 +102,6 @@ where
|
|||
VerifyBlockRequest::MainChain(block, txs) => {
|
||||
verify_main_chain_block(block, txs, context_svc, tx_verifier_svc).await
|
||||
}
|
||||
_ => todo!(),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
|
|
|
@ -1,13 +1,7 @@
|
|||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
use crypto_bigint::{CheckedMul, U256};
|
||||
use futures::stream::{FuturesOrdered, StreamExt};
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
transaction::{Timelock, Transaction},
|
||||
};
|
||||
use monero_serai::block::Block;
|
||||
|
||||
use crate::{helper::current_time, ConsensusError, Database, HardFork};
|
||||
use crate::{helper::current_time, ConsensusError};
|
||||
|
||||
const BLOCK_SIZE_SANITY_LEEWAY: usize = 100;
|
||||
const BLOCK_FUTURE_TIME_LIMIT: u64 = 60 * 60 * 2;
|
||||
|
|
|
@ -21,8 +21,8 @@ use tower::{Service, ServiceExt};
|
|||
use crate::{helper::current_time, ConsensusError, Database, DatabaseRequest, DatabaseResponse};
|
||||
|
||||
pub mod difficulty;
|
||||
mod hardforks;
|
||||
mod weight;
|
||||
pub mod hardforks;
|
||||
pub mod weight;
|
||||
|
||||
pub use difficulty::DifficultyCacheConfig;
|
||||
pub use hardforks::{HardFork, HardForkConfig};
|
||||
|
@ -280,7 +280,7 @@ impl tower::Service<UpdateBlockchainCacheRequest> for BlockChainContextService {
|
|||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
|
@ -299,13 +299,9 @@ impl tower::Service<UpdateBlockchainCacheRequest> for BlockChainContextService {
|
|||
already_generated_coins,
|
||||
} = internal_blockchain_context_lock.deref_mut();
|
||||
|
||||
difficulty_cache
|
||||
.new_block(new.height, new.timestamp, new.cumulative_difficulty)
|
||||
.await?;
|
||||
difficulty_cache.new_block(new.height, new.timestamp, new.cumulative_difficulty);
|
||||
|
||||
weight_cache
|
||||
.new_block(new.height, new.weight, new.long_term_weight)
|
||||
.await?;
|
||||
weight_cache.new_block(new.height, new.weight, new.long_term_weight);
|
||||
|
||||
hardfork_state.new_block(new.vote, new.height).await?;
|
||||
|
||||
|
|
|
@ -7,6 +7,9 @@ use crate::{
|
|||
helper::median, ConsensusError, Database, DatabaseRequest, DatabaseResponse, HardFork,
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// The amount of blocks we account for to calculate difficulty
|
||||
const DIFFICULTY_WINDOW: usize = 720;
|
||||
/// The proportion of blocks we remove from the [`DIFFICULTY_WINDOW`]. When the window
|
||||
|
@ -27,7 +30,7 @@ pub struct DifficultyCacheConfig {
|
|||
}
|
||||
|
||||
impl DifficultyCacheConfig {
|
||||
pub fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig {
|
||||
pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig {
|
||||
DifficultyCacheConfig { window, cut, lag }
|
||||
}
|
||||
|
||||
|
@ -100,28 +103,23 @@ impl DifficultyCache {
|
|||
let (timestamps, cumulative_difficulties) =
|
||||
get_blocks_in_pow_info(database.clone(), block_start..chain_height).await?;
|
||||
|
||||
let mut diff = DifficultyCache {
|
||||
tracing::info!(
|
||||
"Current chain height: {}, accounting for {} blocks timestamps",
|
||||
chain_height,
|
||||
timestamps.len()
|
||||
);
|
||||
|
||||
let diff = DifficultyCache {
|
||||
timestamps,
|
||||
cumulative_difficulties,
|
||||
last_accounted_height: chain_height - 1,
|
||||
config,
|
||||
};
|
||||
|
||||
tracing::info!(
|
||||
"Current chain height: {}, accounting for {} blocks timestamps",
|
||||
chain_height,
|
||||
diff.timestamps.len()
|
||||
);
|
||||
|
||||
Ok(diff)
|
||||
}
|
||||
|
||||
pub async fn new_block(
|
||||
&mut self,
|
||||
height: u64,
|
||||
timestamp: u64,
|
||||
cumulative_difficulty: u128,
|
||||
) -> Result<(), ConsensusError> {
|
||||
pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) {
|
||||
assert_eq!(self.last_accounted_height + 1, height);
|
||||
self.last_accounted_height += 1;
|
||||
|
||||
|
@ -132,8 +130,6 @@ impl DifficultyCache {
|
|||
self.timestamps.pop_front();
|
||||
self.cumulative_difficulties.pop_front();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the required difficulty for the next block.
|
||||
|
@ -145,13 +141,16 @@ impl DifficultyCache {
|
|||
}
|
||||
|
||||
let mut sorted_timestamps = self.timestamps.clone();
|
||||
if sorted_timestamps.len() > DIFFICULTY_WINDOW {
|
||||
sorted_timestamps.drain(DIFFICULTY_WINDOW..);
|
||||
if sorted_timestamps.len() > self.config.window {
|
||||
sorted_timestamps.drain(self.config.window..);
|
||||
};
|
||||
sorted_timestamps.make_contiguous().sort_unstable();
|
||||
|
||||
let (window_start, window_end) =
|
||||
get_window_start_and_end(sorted_timestamps.len(), self.config.accounted_window_len());
|
||||
let (window_start, window_end) = get_window_start_and_end(
|
||||
sorted_timestamps.len(),
|
||||
self.config.accounted_window_len(),
|
||||
self.config.window,
|
||||
);
|
||||
|
||||
let mut time_span =
|
||||
u128::from(sorted_timestamps[window_end - 1] - sorted_timestamps[window_start]);
|
||||
|
@ -163,6 +162,7 @@ impl DifficultyCache {
|
|||
time_span = 1;
|
||||
}
|
||||
|
||||
// TODO: do checked operations here and unwrap so we don't silently overflow?
|
||||
(windowed_work * hf.block_time().as_secs() as u128 + time_span - 1) / time_span
|
||||
}
|
||||
|
||||
|
@ -203,9 +203,15 @@ impl DifficultyCache {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_window_start_and_end(window_len: usize, accounted_window: usize) -> (usize, usize) {
|
||||
let window_len = if window_len > DIFFICULTY_WINDOW {
|
||||
DIFFICULTY_WINDOW
|
||||
fn get_window_start_and_end(
|
||||
window_len: usize,
|
||||
accounted_window: usize,
|
||||
window: usize,
|
||||
) -> (usize, usize) {
|
||||
debug_assert!(window > accounted_window);
|
||||
|
||||
let window_len = if window_len > window {
|
||||
window
|
||||
} else {
|
||||
window_len
|
||||
};
|
||||
|
|
126
consensus/src/context/difficulty/tests.rs
Normal file
126
consensus/src/context/difficulty/tests.rs
Normal file
|
@ -0,0 +1,126 @@
|
|||
use std::collections::VecDeque;
|
||||
|
||||
use proptest::{arbitrary::any, prop_assert_eq, prop_compose, proptest};
|
||||
|
||||
use super::{DifficultyCache, DifficultyCacheConfig};
|
||||
use crate::{helper::median, test_utils::mock_db::*, HardFork};
|
||||
|
||||
const TEST_WINDOW: usize = 72;
|
||||
const TEST_CUT: usize = 6;
|
||||
const TEST_LAG: usize = 2;
|
||||
|
||||
const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG;
|
||||
|
||||
const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig =
|
||||
DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG);
|
||||
|
||||
#[tokio::test]
|
||||
async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> {
|
||||
let mut db_builder = DummyDatabaseBuilder::default();
|
||||
let genesis = DummyBlockExtendedHeader::default().with_difficulty_info(0, 1);
|
||||
db_builder.add_block(genesis);
|
||||
|
||||
let mut difficulty_cache =
|
||||
DifficultyCache::init(TEST_DIFFICULTY_CONFIG, db_builder.finish()).await?;
|
||||
|
||||
for height in 1..3 {
|
||||
assert_eq!(difficulty_cache.next_difficulty(&HardFork::V1), 1);
|
||||
difficulty_cache.new_block(height, 0, u128::MAX);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn genesis_block_skipped() -> Result<(), tower::BoxError> {
|
||||
let mut db_builder = DummyDatabaseBuilder::default();
|
||||
let genesis = DummyBlockExtendedHeader::default().with_difficulty_info(0, 1);
|
||||
db_builder.add_block(genesis);
|
||||
let diff_cache = DifficultyCache::init(TEST_DIFFICULTY_CONFIG, db_builder.finish()).await?;
|
||||
assert!(diff_cache.cumulative_difficulties.is_empty());
|
||||
assert!(diff_cache.timestamps.is_empty());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Generates an arbitrary full difficulty cache.
|
||||
fn arb_full_difficulty_cache()
|
||||
(
|
||||
blocks in any::<[(u64, u64); TEST_TOTAL_ACCOUNTED_BLOCKS]>()
|
||||
) -> DifficultyCache {
|
||||
let (timestamps, mut cumulative_difficulties): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
||||
cumulative_difficulties.sort_unstable();
|
||||
DifficultyCache {
|
||||
last_accounted_height: timestamps.len().try_into().unwrap(),
|
||||
config: TEST_DIFFICULTY_CONFIG,
|
||||
timestamps: timestamps.into(),
|
||||
// we generate cumulative_difficulties in range 0..u64::MAX as if the generated values are close to u128::MAX
|
||||
// it will cause overflows
|
||||
cumulative_difficulties: cumulative_difficulties.into_iter().map(u128::from).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn check_calculations_lag(
|
||||
mut diff_cache in arb_full_difficulty_cache(),
|
||||
timestamp in any::<u64>(),
|
||||
cumulative_difficulty in any::<u128>(),
|
||||
hf in any::<HardFork>()
|
||||
) {
|
||||
// duplicate the cache and remove the lag
|
||||
let mut no_lag_cache = diff_cache.clone();
|
||||
no_lag_cache.config.lag = 0;
|
||||
|
||||
for _ in 0..TEST_LAG {
|
||||
// now remove the blocks that are outside our window due to no log
|
||||
no_lag_cache.timestamps.pop_front();
|
||||
no_lag_cache.cumulative_difficulties.pop_front();
|
||||
}
|
||||
// get the difficulty
|
||||
let next_diff_no_lag = no_lag_cache.next_difficulty(&hf);
|
||||
|
||||
for _ in 0..TEST_LAG {
|
||||
// add new blocks to the lagged cache
|
||||
diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
||||
}
|
||||
// they both should now be the same
|
||||
prop_assert_eq!(diff_cache.next_difficulty(&hf), next_diff_no_lag)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn next_difficulty_consistant(diff_cache in arb_full_difficulty_cache(), hf in any::<HardFork>()) {
|
||||
let first_call = diff_cache.next_difficulty(&hf);
|
||||
prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf));
|
||||
prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf));
|
||||
prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn median_timestamp_adds_genesis(timestamps in any::<[u64; TEST_WINDOW -1]>()) {
|
||||
let mut timestamps: VecDeque<u64> = timestamps.into();
|
||||
|
||||
let diff_cache = DifficultyCache {
|
||||
last_accounted_height: (TEST_WINDOW -1).try_into().unwrap(),
|
||||
config: TEST_DIFFICULTY_CONFIG,
|
||||
timestamps: timestamps.clone(),
|
||||
// we dont need cumulative_difficulties
|
||||
cumulative_difficulties: VecDeque::new(),
|
||||
};
|
||||
// add the genesis blocks timestamp (always 0)
|
||||
timestamps.push_front(0);
|
||||
timestamps.make_contiguous().sort_unstable();
|
||||
prop_assert_eq!(median(timestamps.make_contiguous()), diff_cache.median_timestamp(TEST_WINDOW).unwrap());
|
||||
// make sure adding the genesis block didn't persist
|
||||
prop_assert_eq!(diff_cache.timestamps.len(), TEST_WINDOW -1 );
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn window_size_kept_constant(mut diff_cache in arb_full_difficulty_cache(), new_blocks in any::<Vec<(u64, u128)>>()) {
|
||||
for (timestamp, cumulative_difficulty) in new_blocks.into_iter() {
|
||||
diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
||||
prop_assert_eq!(diff_cache.timestamps.len(), TEST_TOTAL_ACCOUNTED_BLOCKS);
|
||||
prop_assert_eq!(diff_cache.cumulative_difficulties.len(), TEST_TOTAL_ACCOUNTED_BLOCKS);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -11,6 +11,9 @@ use tracing::instrument;
|
|||
|
||||
use crate::{ConsensusError, Database, DatabaseRequest, DatabaseResponse};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork
|
||||
const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week
|
||||
const BLOCK_TIME_V1: Duration = Duration::from_secs(60);
|
||||
|
@ -25,14 +28,14 @@ pub struct HFInfo {
|
|||
threshold: u64,
|
||||
}
|
||||
impl HFInfo {
|
||||
pub fn new(height: u64, threshold: u64) -> HFInfo {
|
||||
pub const fn new(height: u64, threshold: u64) -> HFInfo {
|
||||
HFInfo { height, threshold }
|
||||
}
|
||||
|
||||
/// Returns the main-net hard-fork information.
|
||||
///
|
||||
/// https://cuprate.github.io/monero-book/consensus_rules/hardforks.html#Mainnet-Hard-Forks
|
||||
pub fn main_net() -> [HFInfo; NUMB_OF_HARD_FORKS] {
|
||||
pub const fn main_net() -> [HFInfo; NUMB_OF_HARD_FORKS] {
|
||||
[
|
||||
HFInfo::new(0, 0),
|
||||
HFInfo::new(1009827, 0),
|
||||
|
@ -69,7 +72,7 @@ impl HardForkConfig {
|
|||
self.forks[*hf as usize - 1]
|
||||
}
|
||||
|
||||
pub fn main_net() -> HardForkConfig {
|
||||
pub const fn main_net() -> HardForkConfig {
|
||||
Self {
|
||||
forks: HFInfo::main_net(),
|
||||
window: DEFAULT_WINDOW_SIZE,
|
||||
|
@ -79,6 +82,7 @@ impl HardForkConfig {
|
|||
|
||||
/// An identifier for every hard-fork Monero has had.
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
|
||||
#[cfg_attr(test, derive(proptest_derive::Arbitrary))]
|
||||
#[repr(u8)]
|
||||
pub enum HardFork {
|
||||
V1 = 1,
|
||||
|
|
114
consensus/src/context/hardforks/tests.rs
Normal file
114
consensus/src/context/hardforks/tests.rs
Normal file
|
@ -0,0 +1,114 @@
|
|||
use std::convert::TryInto;
|
||||
|
||||
use proptest::{arbitrary::any, prop_assert_eq, prop_compose, proptest};
|
||||
|
||||
use super::{HFInfo, HFVotes, HardFork, HardForkConfig, HardForkState, NUMB_OF_HARD_FORKS};
|
||||
use crate::test_utils::mock_db::*;
|
||||
|
||||
const TEST_WINDOW_SIZE: u64 = 25;
|
||||
|
||||
const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [
|
||||
HFInfo::new(0, 0),
|
||||
HFInfo::new(10, 0),
|
||||
HFInfo::new(20, 0),
|
||||
HFInfo::new(30, 0),
|
||||
HFInfo::new(40, 0),
|
||||
HFInfo::new(50, 0),
|
||||
HFInfo::new(60, 0),
|
||||
HFInfo::new(70, 0),
|
||||
HFInfo::new(80, 0),
|
||||
HFInfo::new(90, 0),
|
||||
HFInfo::new(100, 0),
|
||||
HFInfo::new(110, 0),
|
||||
HFInfo::new(120, 0),
|
||||
HFInfo::new(130, 0),
|
||||
HFInfo::new(140, 0),
|
||||
HFInfo::new(150, 0),
|
||||
];
|
||||
|
||||
const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig {
|
||||
window: TEST_WINDOW_SIZE,
|
||||
forks: TEST_HFS,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn next_hard_forks() {
|
||||
let mut prev = HardFork::V1;
|
||||
let mut next = HardFork::V2;
|
||||
for _ in 2..NUMB_OF_HARD_FORKS {
|
||||
assert!(prev < next);
|
||||
prev = next;
|
||||
next = next.next_fork().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hard_forks_defined() {
|
||||
for fork in 1..=NUMB_OF_HARD_FORKS {
|
||||
HardFork::from_version(&fork.try_into().unwrap()).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn hard_fork_set_depends_on_top_block() {
|
||||
let mut db_builder = DummyDatabaseBuilder::default();
|
||||
|
||||
for _ in 0..TEST_WINDOW_SIZE {
|
||||
db_builder.add_block(
|
||||
DummyBlockExtendedHeader::default().with_hard_fork_info(HardFork::V13, HardFork::V16),
|
||||
);
|
||||
}
|
||||
db_builder.add_block(
|
||||
DummyBlockExtendedHeader::default().with_hard_fork_info(HardFork::V14, HardFork::V16),
|
||||
);
|
||||
|
||||
let state = HardForkState::init(TEST_HARD_FORK_CONFIG, db_builder.finish())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(state.current_hardfork, HardFork::V14);
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Generates an arbitrary full [`HFVotes`].
|
||||
fn arb_full_hf_votes()
|
||||
(
|
||||
// we can't use HardFork as for some reason it overflows the stack, so we use u8.
|
||||
votes in any::<[u8; TEST_WINDOW_SIZE as usize]>()
|
||||
) -> HFVotes {
|
||||
let mut vote_count = HFVotes::new(TEST_WINDOW_SIZE as usize);
|
||||
for vote in votes {
|
||||
vote_count.add_vote_for_hf(&HardFork::from_vote(&(vote % 17)));
|
||||
}
|
||||
vote_count
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn hf_vote_counter_total_correct(hf_votes in arb_full_hf_votes()) {
|
||||
prop_assert_eq!(hf_votes.total_votes(), u64::try_from(hf_votes.vote_list.len()).unwrap());
|
||||
|
||||
let mut votes = [0_u64; NUMB_OF_HARD_FORKS];
|
||||
for vote in hf_votes.vote_list.iter() {
|
||||
// manually go through the list of votes tallying
|
||||
votes[*vote as usize - 1] += 1;
|
||||
}
|
||||
|
||||
prop_assert_eq!(votes, hf_votes.votes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn window_size_kept_constant(mut hf_votes in arb_full_hf_votes(), new_votes in any::<Vec<HardFork>>()) {
|
||||
for new_vote in new_votes.into_iter() {
|
||||
hf_votes.add_vote_for_hf(&new_vote);
|
||||
prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn votes_out_of_range(high_vote in (NUMB_OF_HARD_FORKS+ 1).try_into().unwrap()..u8::MAX) {
|
||||
prop_assert_eq!(HardFork::from_vote(&0), HardFork::V1);
|
||||
prop_assert_eq!(HardFork::from_vote(&NUMB_OF_HARD_FORKS.try_into().unwrap()), HardFork::from_vote(&high_vote));
|
||||
}
|
||||
}
|
|
@ -21,6 +21,9 @@ use crate::{
|
|||
helper::median, ConsensusError, Database, DatabaseRequest, DatabaseResponse, HardFork,
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
const PENALTY_FREE_ZONE_1: usize = 20000;
|
||||
const PENALTY_FREE_ZONE_2: usize = 60000;
|
||||
const PENALTY_FREE_ZONE_5: usize = 300000;
|
||||
|
@ -28,12 +31,6 @@ const PENALTY_FREE_ZONE_5: usize = 300000;
|
|||
const SHORT_TERM_WINDOW: u64 = 100;
|
||||
const LONG_TERM_WINDOW: u64 = 100000;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BlockWeightInfo {
|
||||
pub block_weight: usize,
|
||||
pub long_term_weight: usize,
|
||||
}
|
||||
|
||||
/// Calculates the blocks weight.
|
||||
///
|
||||
/// https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#blocks-weight
|
||||
|
@ -66,7 +63,7 @@ pub struct BlockWeightsCacheConfig {
|
|||
}
|
||||
|
||||
impl BlockWeightsCacheConfig {
|
||||
pub fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig {
|
||||
pub const fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig {
|
||||
BlockWeightsCacheConfig {
|
||||
short_term_window,
|
||||
long_term_window,
|
||||
|
@ -151,12 +148,7 @@ impl BlockWeightsCache {
|
|||
///
|
||||
/// The block_height **MUST** be one more than the last height the cache has
|
||||
/// seen.
|
||||
pub async fn new_block(
|
||||
&mut self,
|
||||
block_height: u64,
|
||||
block_weight: usize,
|
||||
long_term_weight: usize,
|
||||
) -> Result<(), ConsensusError> {
|
||||
pub fn new_block(&mut self, block_height: u64, block_weight: usize, long_term_weight: usize) {
|
||||
assert_eq!(self.tip_height + 1, block_height);
|
||||
self.tip_height += 1;
|
||||
tracing::debug!(
|
||||
|
@ -177,8 +169,6 @@ impl BlockWeightsCache {
|
|||
{
|
||||
self.short_term_block_weights.pop_front();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the median long term weight over the last [`LONG_TERM_WINDOW`] blocks, or custom amount of blocks in the config.
|
||||
|
@ -188,23 +178,22 @@ impl BlockWeightsCache {
|
|||
median(&sorted_long_term_weights)
|
||||
}
|
||||
|
||||
pub fn median_short_term_weight(&self) -> usize {
|
||||
let mut sorted_short_term_block_weights: Vec<usize> =
|
||||
self.short_term_block_weights.clone().into();
|
||||
sorted_short_term_block_weights.sort_unstable();
|
||||
median(&sorted_short_term_block_weights)
|
||||
}
|
||||
|
||||
/// Returns the effective median weight, used for block reward calculations and to calculate
|
||||
/// the block weight limit.
|
||||
///
|
||||
/// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#calculating-effective-median-weight
|
||||
pub fn effective_median_block_weight(&self, hf: &HardFork) -> usize {
|
||||
let mut sorted_short_term_weights: Vec<usize> =
|
||||
self.short_term_block_weights.clone().into();
|
||||
sorted_short_term_weights.par_sort_unstable();
|
||||
|
||||
// TODO: this sometimes takes a while (>5s)
|
||||
let mut sorted_long_term_weights: Vec<usize> = self.long_term_weights.clone().into();
|
||||
sorted_long_term_weights.par_sort_unstable();
|
||||
|
||||
calculate_effective_median_block_weight(
|
||||
hf,
|
||||
&sorted_short_term_weights,
|
||||
&sorted_long_term_weights,
|
||||
self.median_short_term_weight(),
|
||||
self.median_long_term_weight(),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -213,10 +202,7 @@ impl BlockWeightsCache {
|
|||
/// https://cuprate.github.io/monero-book/consensus_rules/blocks/reward.html#calculating-block-reward
|
||||
pub fn median_for_block_reward(&self, hf: &HardFork) -> usize {
|
||||
if hf.in_range(&HardFork::V1, &HardFork::V12) {
|
||||
let mut sorted_short_term_weights: Vec<usize> =
|
||||
self.short_term_block_weights.clone().into();
|
||||
sorted_short_term_weights.sort_unstable();
|
||||
median(&sorted_short_term_weights)
|
||||
self.median_short_term_weight()
|
||||
} else {
|
||||
self.effective_median_block_weight(hf)
|
||||
}
|
||||
|
@ -226,15 +212,15 @@ impl BlockWeightsCache {
|
|||
|
||||
fn calculate_effective_median_block_weight(
|
||||
hf: &HardFork,
|
||||
sorted_short_term_window: &[usize],
|
||||
sorted_long_term_window: &[usize],
|
||||
median_short_term_weight: usize,
|
||||
median_long_term_weight: usize,
|
||||
) -> usize {
|
||||
if hf.in_range(&HardFork::V1, &HardFork::V10) {
|
||||
return median(sorted_short_term_window).max(penalty_free_zone(hf));
|
||||
return median_short_term_weight.max(penalty_free_zone(hf));
|
||||
}
|
||||
|
||||
let long_term_median = median(sorted_long_term_window).max(PENALTY_FREE_ZONE_5);
|
||||
let short_term_median = median(sorted_short_term_window);
|
||||
let long_term_median = median_long_term_weight.max(PENALTY_FREE_ZONE_5);
|
||||
let short_term_median = median_short_term_weight;
|
||||
let effective_median = if hf.in_range(&HardFork::V10, &HardFork::V15) {
|
||||
min(
|
||||
max(PENALTY_FREE_ZONE_5, short_term_median),
|
||||
|
|
53
consensus/src/context/weight/tests.rs
Normal file
53
consensus/src/context/weight/tests.rs
Normal file
|
@ -0,0 +1,53 @@
|
|||
use super::{BlockWeightsCache, BlockWeightsCacheConfig};
|
||||
use crate::test_utils::mock_db::*;
|
||||
|
||||
const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000);
|
||||
|
||||
#[tokio::test]
|
||||
async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> {
|
||||
let mut db_builder = DummyDatabaseBuilder::default();
|
||||
for weight in 1..=5000 {
|
||||
let block = DummyBlockExtendedHeader::default().with_weight_into(weight, weight);
|
||||
db_builder.add_block(block);
|
||||
}
|
||||
|
||||
let mut weight_cache = BlockWeightsCache::init(TEST_WEIGHT_CONFIG, db_builder.finish()).await?;
|
||||
assert_eq!(weight_cache.median_long_term_weight(), 2500);
|
||||
assert_eq!(weight_cache.median_short_term_weight(), 4950);
|
||||
|
||||
weight_cache.new_block(5000, 0, 0);
|
||||
weight_cache.new_block(5001, 0, 0);
|
||||
weight_cache.new_block(5002, 0, 0);
|
||||
|
||||
// if blocks outside the window were not removed adding the blocks above would have pulled the median down.
|
||||
assert_eq!(weight_cache.median_long_term_weight(), 2500);
|
||||
assert_eq!(weight_cache.median_short_term_weight(), 4950);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError> {
|
||||
let mut db_builder = DummyDatabaseBuilder::default();
|
||||
// add an initial block as otherwise this will panic.
|
||||
let block = DummyBlockExtendedHeader::default().with_weight_into(0, 0);
|
||||
db_builder.add_block(block);
|
||||
|
||||
let mut weight_cache = BlockWeightsCache::init(TEST_WEIGHT_CONFIG, db_builder.finish()).await?;
|
||||
|
||||
for height in 1..=100 {
|
||||
weight_cache.new_block(height as u64, height, height);
|
||||
|
||||
assert_eq!(weight_cache.median_short_term_weight(), height / 2);
|
||||
assert_eq!(weight_cache.median_long_term_weight(), height / 2);
|
||||
}
|
||||
|
||||
for height in 101..=5000 {
|
||||
weight_cache.new_block(height as u64, height, height);
|
||||
|
||||
assert_eq!(weight_cache.median_long_term_weight(), height / 2);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO: protests
|
|
@ -1,28 +1,10 @@
|
|||
use std::{
|
||||
io::{Cursor, Error, ErrorKind},
|
||||
ops::{Add, Div, Mul, Sub},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use curve25519_dalek::edwards::CompressedEdwardsY;
|
||||
|
||||
/// Deserializes an object using the give `des` function, checking that all the bytes
|
||||
/// are consumed.
|
||||
pub(crate) fn size_check_decode<T>(
|
||||
buf: &[u8],
|
||||
des: impl Fn(&mut Cursor<&[u8]>) -> Result<T, Error>,
|
||||
) -> Result<T, Error> {
|
||||
let mut cur = Cursor::new(buf);
|
||||
let t = des(&mut cur)?;
|
||||
if TryInto::<usize>::try_into(cur.position()).unwrap() != buf.len() {
|
||||
return Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
"Data not fully consumed while decoding!",
|
||||
));
|
||||
}
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
pub(crate) fn get_mid<T>(a: T, b: T) -> T
|
||||
where
|
||||
T: Add<Output = T> + Sub<Output = T> + Div<Output = T> + Mul<Output = T> + Copy + From<u8>,
|
||||
|
@ -33,6 +15,9 @@ where
|
|||
(a / two) + (b / two) + ((a - two * (a / two)) + (b - two * (b / two))) / two
|
||||
}
|
||||
|
||||
/// Gets the median from a sorted slice.
|
||||
///
|
||||
/// If not sorted the output will be invalid.
|
||||
pub(crate) fn median<T>(array: &[T]) -> T
|
||||
where
|
||||
T: Add<Output = T> + Sub<Output = T> + Div<Output = T> + Mul<Output = T> + Copy + From<u8>,
|
||||
|
|
|
@ -6,6 +6,8 @@ pub mod genesis;
|
|||
mod helper;
|
||||
#[cfg(feature = "binaries")]
|
||||
pub mod rpc;
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
pub mod transactions;
|
||||
|
||||
pub use block::{VerifiedBlockInformation, VerifyBlockRequest};
|
||||
|
@ -88,7 +90,7 @@ pub struct OutputOnChain {
|
|||
height: u64,
|
||||
time_lock: monero_serai::transaction::Timelock,
|
||||
key: curve25519_dalek::EdwardsPoint,
|
||||
mask: curve25519_dalek::EdwardsPoint,
|
||||
//mask: curve25519_dalek::EdwardsPoint,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
|
@ -115,7 +117,7 @@ pub enum DatabaseRequest {
|
|||
|
||||
Outputs(HashMap<u64, HashSet<u64>>),
|
||||
NumberOutputsWithAmount(u64),
|
||||
|
||||
|
||||
CheckKIsNotSpent(HashSet<[u8; 32]>),
|
||||
|
||||
#[cfg(feature = "binaries")]
|
||||
|
|
|
@ -95,7 +95,6 @@ pub fn init_rpc_load_balancer(
|
|||
let rpcs = tower::retry::Retry::new(Attempts(10), rpc_buffer);
|
||||
|
||||
let discover = discover::RPCDiscover {
|
||||
rpc: rpcs.clone(),
|
||||
initial_list: addresses,
|
||||
ok_channel: rpc_discoverer_tx,
|
||||
already_connected: Default::default(),
|
||||
|
@ -413,7 +412,7 @@ async fn get_outputs<R: RpcConnection>(
|
|||
struct OutputRes {
|
||||
height: u64,
|
||||
key: [u8; 32],
|
||||
mask: [u8; 32],
|
||||
// mask: [u8; 32],
|
||||
txid: [u8; 32],
|
||||
}
|
||||
|
||||
|
@ -463,10 +462,13 @@ async fn get_outputs<R: RpcConnection>(
|
|||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap(),
|
||||
/*
|
||||
mask: CompressedEdwardsY::from_slice(&out.mask)
|
||||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap(),
|
||||
|
||||
*/
|
||||
},
|
||||
);
|
||||
}
|
||||
|
@ -498,7 +500,7 @@ async fn get_blocks_in_range<R: RpcConnection>(
|
|||
)
|
||||
.await?;
|
||||
|
||||
let blocks: Response = monero_epee_bin_serde::from_bytes(&res)?;
|
||||
let blocks: Response = monero_epee_bin_serde::from_bytes(res)?;
|
||||
|
||||
Ok(DatabaseResponse::BlockBatchInRange(
|
||||
blocks
|
||||
|
|
|
@ -15,7 +15,6 @@ use tower::{discover::Change, load::PeakEwma};
|
|||
use tracing::instrument;
|
||||
|
||||
use super::{cache::ScanningCache, Rpc};
|
||||
use crate::Database;
|
||||
|
||||
#[instrument(skip(cache))]
|
||||
async fn check_rpc(addr: String, cache: Arc<RwLock<ScanningCache>>) -> Option<Rpc<HttpRpc>> {
|
||||
|
@ -32,15 +31,14 @@ async fn check_rpc(addr: String, cache: Arc<RwLock<ScanningCache>>) -> Option<Rp
|
|||
Some(Rpc::new_http(addr, cache))
|
||||
}
|
||||
|
||||
pub(crate) struct RPCDiscover<T> {
|
||||
pub rpc: T,
|
||||
pub(crate) struct RPCDiscover {
|
||||
pub initial_list: Vec<String>,
|
||||
pub ok_channel: mpsc::Sender<Change<usize, PeakEwma<Rpc<HttpRpc>>>>,
|
||||
pub already_connected: HashSet<String>,
|
||||
pub cache: Arc<RwLock<ScanningCache>>,
|
||||
}
|
||||
|
||||
impl<T: Database> RPCDiscover<T> {
|
||||
impl RPCDiscover {
|
||||
async fn found_rpc(&mut self, rpc: Rpc<HttpRpc>) -> Result<(), SendError> {
|
||||
//if self.already_connected.contains(&rpc.addr) {
|
||||
// return Ok(());
|
||||
|
|
1
consensus/src/test_utils.rs
Normal file
1
consensus/src/test_utils.rs
Normal file
|
@ -0,0 +1 @@
|
|||
pub mod mock_db;
|
151
consensus/src/test_utils/mock_db.rs
Normal file
151
consensus/src/test_utils/mock_db.rs
Normal file
|
@ -0,0 +1,151 @@
|
|||
use futures::FutureExt;
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::{Arc, RwLock},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use cuprate_common::BlockID;
|
||||
use tower::{BoxError, Service};
|
||||
|
||||
use crate::{DatabaseRequest, DatabaseResponse, ExtendedBlockHeader, HardFork};
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy)]
|
||||
pub struct DummyBlockExtendedHeader {
|
||||
pub version: Option<HardFork>,
|
||||
pub vote: Option<HardFork>,
|
||||
|
||||
pub timestamp: Option<u64>,
|
||||
pub cumulative_difficulty: Option<u128>,
|
||||
|
||||
pub block_weight: Option<usize>,
|
||||
pub long_term_weight: Option<usize>,
|
||||
}
|
||||
|
||||
impl From<DummyBlockExtendedHeader> for ExtendedBlockHeader {
|
||||
fn from(value: DummyBlockExtendedHeader) -> Self {
|
||||
ExtendedBlockHeader {
|
||||
version: value.version.unwrap_or(HardFork::V1),
|
||||
vote: value.vote.unwrap_or(HardFork::V1),
|
||||
timestamp: value.timestamp.unwrap_or_default(),
|
||||
cumulative_difficulty: value.cumulative_difficulty.unwrap_or_default(),
|
||||
block_weight: value.block_weight.unwrap_or_default(),
|
||||
long_term_weight: value.long_term_weight.unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DummyBlockExtendedHeader {
|
||||
pub fn with_weight_into(
|
||||
mut self,
|
||||
weight: usize,
|
||||
long_term_weight: usize,
|
||||
) -> DummyBlockExtendedHeader {
|
||||
self.block_weight = Some(weight);
|
||||
self.long_term_weight = Some(long_term_weight);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_hard_fork_info(
|
||||
mut self,
|
||||
version: HardFork,
|
||||
vote: HardFork,
|
||||
) -> DummyBlockExtendedHeader {
|
||||
self.vote = Some(vote);
|
||||
self.version = Some(version);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_difficulty_info(
|
||||
mut self,
|
||||
timestamp: u64,
|
||||
cumulative_difficulty: u128,
|
||||
) -> DummyBlockExtendedHeader {
|
||||
self.timestamp = Some(timestamp);
|
||||
self.cumulative_difficulty = Some(cumulative_difficulty);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DummyDatabaseBuilder {
|
||||
blocks: Vec<DummyBlockExtendedHeader>,
|
||||
}
|
||||
|
||||
impl DummyDatabaseBuilder {
|
||||
pub fn add_block(&mut self, block: DummyBlockExtendedHeader) {
|
||||
self.blocks.push(block);
|
||||
}
|
||||
|
||||
pub fn finish(self) -> DummyDatabase {
|
||||
DummyDatabase {
|
||||
blocks: Arc::new(self.blocks.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DummyDatabase {
|
||||
blocks: Arc<RwLock<Vec<DummyBlockExtendedHeader>>>,
|
||||
}
|
||||
|
||||
impl Service<DatabaseRequest> for DummyDatabase {
|
||||
type Response = DatabaseResponse;
|
||||
type Error = BoxError;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
|
||||
let blocks = self.blocks.clone();
|
||||
|
||||
async move {
|
||||
Ok(match req {
|
||||
DatabaseRequest::BlockExtendedHeader(BlockID::Height(id)) => {
|
||||
DatabaseResponse::BlockExtendedHeader(
|
||||
blocks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(usize::try_from(id).unwrap())
|
||||
.copied()
|
||||
.map(Into::into)
|
||||
.ok_or("block not in database!")?,
|
||||
)
|
||||
}
|
||||
DatabaseRequest::BlockHash(id) => {
|
||||
let mut hash = [0; 32];
|
||||
hash[0..8].copy_from_slice(&id.to_le_bytes());
|
||||
DatabaseResponse::BlockHash(hash)
|
||||
}
|
||||
DatabaseRequest::BlockExtendedHeaderInRange(range) => {
|
||||
DatabaseResponse::BlockExtendedHeaderInRange(
|
||||
blocks
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.take(usize::try_from(range.end).unwrap())
|
||||
.skip(usize::try_from(range.start).unwrap())
|
||||
.copied()
|
||||
.map(Into::into)
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
DatabaseRequest::ChainHeight => {
|
||||
let height = u64::try_from(blocks.read().unwrap().len()).unwrap();
|
||||
|
||||
let mut top_hash = [0; 32];
|
||||
top_hash[0..8].copy_from_slice(&height.to_le_bytes());
|
||||
|
||||
DatabaseResponse::ChainHeight(height, top_hash)
|
||||
}
|
||||
DatabaseRequest::GeneratedCoins => DatabaseResponse::GeneratedCoins(0),
|
||||
_ => unimplemented!("the context svc should not need these requests!"),
|
||||
})
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
|
@ -137,14 +137,9 @@ where
|
|||
hf,
|
||||
)
|
||||
.boxed(),
|
||||
VerifyTxRequest::BatchSetup {
|
||||
txs,
|
||||
hf
|
||||
} => batch_setup_transactions(
|
||||
database,
|
||||
txs,
|
||||
hf
|
||||
).boxed(),
|
||||
VerifyTxRequest::BatchSetup { txs, hf } => {
|
||||
batch_setup_transactions(database, txs, hf).boxed()
|
||||
}
|
||||
VerifyTxRequest::BatchSetupVerifyBlock {
|
||||
txs,
|
||||
current_chain_height,
|
||||
|
@ -194,8 +189,8 @@ async fn batch_setup_transactions<D>(
|
|||
txs: Vec<Transaction>,
|
||||
hf: HardFork,
|
||||
) -> Result<VerifyTxResponse, ConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
// Move out of the async runtime and use rayon to parallelize the serialisation and hashing of the txs.
|
||||
let txs = tokio::task::spawn_blocking(|| {
|
||||
|
@ -203,8 +198,8 @@ async fn batch_setup_transactions<D>(
|
|||
.map(|tx| Ok(Arc::new(TransactionVerificationData::new(tx)?)))
|
||||
.collect::<Result<Vec<_>, ConsensusError>>()
|
||||
})
|
||||
.await
|
||||
.unwrap()?;
|
||||
.await
|
||||
.unwrap()?;
|
||||
|
||||
set_missing_ring_members(database, &txs, &hf).await?;
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ fn insert_ring_member_ids(
|
|||
..
|
||||
} => output_ids
|
||||
.entry(amount.unwrap_or(0))
|
||||
.or_insert_with(HashSet::new)
|
||||
.or_default()
|
||||
.extend(get_absolute_offsets(key_offsets)?),
|
||||
// https://cuprate.github.io/monero-book/consensus_rules/transactions.html#input-type
|
||||
_ => {
|
||||
|
@ -132,11 +132,12 @@ fn insert_ring_member_ids(
|
|||
|
||||
/// Represents the ring members of all the inputs.
|
||||
#[derive(Debug)]
|
||||
#[non_exhaustive]
|
||||
pub enum Rings {
|
||||
/// Legacy, pre-ringCT, rings.
|
||||
Legacy(Vec<Vec<EdwardsPoint>>),
|
||||
/// TODO:
|
||||
RingCT,
|
||||
// TODO:
|
||||
// RingCT,
|
||||
}
|
||||
|
||||
impl Rings {
|
||||
|
|
|
@ -1,21 +1,9 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use monero_serai::transaction::Transaction;
|
||||
use multiexp::BatchVerifier as CoreBatchVerifier;
|
||||
|
||||
use crate::{transactions::ring::Rings, ConsensusError};
|
||||
|
||||
mod ring_sigs;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BatchVerifier {
|
||||
batch_verifier: Arc<std::sync::Mutex<CoreBatchVerifier<u64, dalek_ff_group::EdwardsPoint>>>,
|
||||
}
|
||||
|
||||
pub struct BatchVerifierHandle {
|
||||
batch_verifier: BatchVerifier,
|
||||
}
|
||||
|
||||
pub fn verify_signatures(tx: &Transaction, rings: &Rings) -> Result<(), ConsensusError> {
|
||||
match rings {
|
||||
Rings::Legacy(_) => ring_sigs::verify_inputs_signatures(
|
||||
|
@ -24,6 +12,6 @@ pub fn verify_signatures(tx: &Transaction, rings: &Rings) -> Result<(), Consensu
|
|||
rings,
|
||||
&tx.signature_hash(),
|
||||
),
|
||||
_ => panic!("TODO: RCT"),
|
||||
//_ => panic!("TODO: RCT"),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,8 +47,7 @@ pub fn verify_inputs_signatures(
|
|||
}
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
_ => panic!("tried to verify v1 tx with a non v1 ring"),
|
||||
} // _ => panic!("tried to verify v1 tx with a non v1 ring"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -9,23 +9,21 @@ use crate::NetworkAddress;
|
|||
pub(crate) struct TaggedNetworkAddress {
|
||||
#[serde(rename = "type")]
|
||||
ty: u8,
|
||||
#[serde(flatten)]
|
||||
addr: RawNetworkAddress,
|
||||
addr: AllFieldsNetworkAddress,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
#[error("Invalid network address tag")]
|
||||
pub(crate) struct InvalidNetworkAddressTag;
|
||||
#[error("Invalid network address")]
|
||||
pub(crate) struct InvalidNetworkAddress;
|
||||
|
||||
impl TryFrom<TaggedNetworkAddress> for NetworkAddress {
|
||||
type Error = InvalidNetworkAddressTag;
|
||||
type Error = InvalidNetworkAddress;
|
||||
|
||||
fn try_from(value: TaggedNetworkAddress) -> Result<Self, Self::Error> {
|
||||
Ok(match (value.ty, value.addr) {
|
||||
(1, RawNetworkAddress::IPv4(addr)) => NetworkAddress::IPv4(addr),
|
||||
(2, RawNetworkAddress::IPv6(addr)) => NetworkAddress::IPv6(addr),
|
||||
_ => return Err(InvalidNetworkAddressTag),
|
||||
})
|
||||
value
|
||||
.addr
|
||||
.try_into_network_address(value.ty)
|
||||
.ok_or(InvalidNetworkAddress)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,59 +32,45 @@ impl From<NetworkAddress> for TaggedNetworkAddress {
|
|||
match value {
|
||||
NetworkAddress::IPv4(addr) => TaggedNetworkAddress {
|
||||
ty: 1,
|
||||
addr: RawNetworkAddress::IPv4(addr),
|
||||
addr: AllFieldsNetworkAddress {
|
||||
m_ip: Some(u32::from_be_bytes(addr.ip().octets())),
|
||||
m_port: Some(addr.port()),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
NetworkAddress::IPv6(addr) => TaggedNetworkAddress {
|
||||
ty: 2,
|
||||
addr: RawNetworkAddress::IPv6(addr),
|
||||
addr: AllFieldsNetworkAddress {
|
||||
addr: Some(addr.ip().octets()),
|
||||
m_port: Some(addr.port()),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub(crate) enum RawNetworkAddress {
|
||||
/// IPv4
|
||||
IPv4(#[serde(with = "SocketAddrV4Def")] SocketAddrV4),
|
||||
/// IPv6
|
||||
IPv6(#[serde(with = "SocketAddrV6Def")] SocketAddrV6),
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct AllFieldsNetworkAddress {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
m_ip: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
m_port: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
addr: Option<[u8; 16]>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(remote = "SocketAddrV4")]
|
||||
pub(crate) struct SocketAddrV4Def {
|
||||
#[serde(getter = "get_ip_v4")]
|
||||
m_ip: u32,
|
||||
#[serde(getter = "SocketAddrV4::port")]
|
||||
m_port: u16,
|
||||
}
|
||||
|
||||
fn get_ip_v4(addr: &SocketAddrV4) -> u32 {
|
||||
u32::from_be_bytes(addr.ip().octets())
|
||||
}
|
||||
|
||||
impl From<SocketAddrV4Def> for SocketAddrV4 {
|
||||
fn from(def: SocketAddrV4Def) -> SocketAddrV4 {
|
||||
SocketAddrV4::new(Ipv4Addr::from(def.m_ip), def.m_port)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(remote = "SocketAddrV6")]
|
||||
pub(crate) struct SocketAddrV6Def {
|
||||
#[serde(getter = "get_ip_v6")]
|
||||
addr: [u8; 16],
|
||||
#[serde(getter = "SocketAddrV6::port")]
|
||||
m_port: u16,
|
||||
}
|
||||
|
||||
fn get_ip_v6(addr: &SocketAddrV6) -> [u8; 16] {
|
||||
addr.ip().octets()
|
||||
}
|
||||
|
||||
impl From<SocketAddrV6Def> for SocketAddrV6 {
|
||||
fn from(def: SocketAddrV6Def) -> SocketAddrV6 {
|
||||
SocketAddrV6::new(Ipv6Addr::from(def.addr), def.m_port, 0, 0)
|
||||
impl AllFieldsNetworkAddress {
|
||||
fn try_into_network_address(self, ty: u8) -> Option<NetworkAddress> {
|
||||
Some(match ty {
|
||||
1 => NetworkAddress::IPv4(SocketAddrV4::new(Ipv4Addr::from(self.m_ip?), self.m_port?)),
|
||||
2 => NetworkAddress::IPv6(SocketAddrV6::new(
|
||||
Ipv6Addr::from(self.addr?),
|
||||
self.m_port?,
|
||||
0,
|
||||
0,
|
||||
)),
|
||||
_ => return None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ mod tests {
|
|||
186, 15, 178, 70, 173, 170, 187, 31, 70, 50, 227, 11, 116, 111, 112, 95, 118, 101, 114,
|
||||
115, 105, 111, 110, 8, 1,
|
||||
];
|
||||
let handshake: HandshakeRequest = monero_epee_bin_serde::from_bytes(&bytes).unwrap();
|
||||
let handshake: HandshakeRequest = monero_epee_bin_serde::from_bytes(bytes).unwrap();
|
||||
let basic_node_data = BasicNodeData {
|
||||
my_port: 0,
|
||||
network_id: [
|
||||
|
@ -130,7 +130,7 @@ mod tests {
|
|||
|
||||
let encoded_bytes = monero_epee_bin_serde::to_bytes(&handshake).unwrap();
|
||||
let handshake_2: HandshakeRequest =
|
||||
monero_epee_bin_serde::from_bytes(&encoded_bytes).unwrap();
|
||||
monero_epee_bin_serde::from_bytes(encoded_bytes).unwrap();
|
||||
|
||||
assert_eq!(handshake, handshake_2);
|
||||
}
|
||||
|
@ -906,7 +906,7 @@ mod tests {
|
|||
181, 216, 193, 135, 23, 186, 168, 207, 119, 86, 235, 11, 116, 111, 112, 95, 118, 101,
|
||||
114, 115, 105, 111, 110, 8, 16,
|
||||
];
|
||||
let handshake: HandshakeResponse = monero_epee_bin_serde::from_bytes(&bytes).unwrap();
|
||||
let handshake: HandshakeResponse = monero_epee_bin_serde::from_bytes(bytes).unwrap();
|
||||
|
||||
let basic_node_data = BasicNodeData {
|
||||
my_port: 18080,
|
||||
|
@ -937,7 +937,7 @@ mod tests {
|
|||
|
||||
let encoded_bytes = monero_epee_bin_serde::to_bytes(&handshake).unwrap();
|
||||
let handshake_2: HandshakeResponse =
|
||||
monero_epee_bin_serde::from_bytes(&encoded_bytes).unwrap();
|
||||
monero_epee_bin_serde::from_bytes(encoded_bytes).unwrap();
|
||||
|
||||
assert_eq!(handshake, handshake_2);
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
//! admin messages.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_bytes::ByteBuf;
|
||||
|
||||
use super::common::BlockCompleteEntry;
|
||||
use crate::serde_helpers::*;
|
||||
|
@ -36,13 +37,13 @@ pub struct NewBlock {
|
|||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct NewTransactions {
|
||||
/// Tx Blobs
|
||||
pub txs: Vec<Vec<u8>>,
|
||||
pub txs: Vec<ByteBuf>,
|
||||
/// Dandelionpp true if fluff - backwards compatible mode is fluff
|
||||
#[serde(default = "default_true")]
|
||||
pub dandelionpp_fluff: bool,
|
||||
/// Padding
|
||||
#[serde(rename = "_")]
|
||||
pub padding: Vec<u8>,
|
||||
pub padding: ByteBuf,
|
||||
}
|
||||
|
||||
/// A Request For Blocks
|
||||
|
@ -669,19 +670,13 @@ mod tests {
|
|||
248, 248, 91, 110, 107, 144, 12, 175, 253, 21, 121, 28,
|
||||
];
|
||||
|
||||
let now = std::time::Instant::now();
|
||||
for _ in 0..1000 {
|
||||
let _new_transactions: NewTransactions = epee_encoding::from_bytes(&bytes).unwrap();
|
||||
}
|
||||
println!("in: {}ms", now.elapsed().as_millis());
|
||||
|
||||
let new_transactions: NewTransactions = epee_encoding::from_bytes(&bytes).unwrap();
|
||||
let new_transactions: NewTransactions = monero_epee_bin_serde::from_bytes(bytes).unwrap();
|
||||
|
||||
assert_eq!(4, new_transactions.txs.len());
|
||||
|
||||
let encoded_bytes = epee_encoding::to_bytes(&new_transactions).unwrap();
|
||||
let encoded_bytes = monero_epee_bin_serde::to_bytes(&new_transactions).unwrap();
|
||||
let new_transactions_2: NewTransactions =
|
||||
epee_encoding::from_bytes(&encoded_bytes).unwrap();
|
||||
monero_epee_bin_serde::from_bytes(encoded_bytes).unwrap();
|
||||
|
||||
assert_eq!(new_transactions, new_transactions_2);
|
||||
}
|
||||
|
@ -1027,10 +1022,11 @@ mod tests {
|
|||
101, 110, 116, 95, 98, 108, 111, 99, 107, 99, 104, 97, 105, 110, 95, 104, 101, 105,
|
||||
103, 104, 116, 5, 209, 45, 42, 0, 0, 0, 0, 0,
|
||||
];
|
||||
let fluffy_block: NewFluffyBlock = epee_encoding::from_bytes(&bytes).unwrap();
|
||||
let fluffy_block: NewFluffyBlock = monero_epee_bin_serde::from_bytes(bytes).unwrap();
|
||||
|
||||
let encoded_bytes = epee_encoding::to_bytes(&fluffy_block).unwrap();
|
||||
let fluffy_block_2: NewFluffyBlock = epee_encoding::from_bytes(&encoded_bytes).unwrap();
|
||||
let encoded_bytes = monero_epee_bin_serde::to_bytes(&fluffy_block).unwrap();
|
||||
let fluffy_block_2: NewFluffyBlock =
|
||||
monero_epee_bin_serde::from_bytes(encoded_bytes).unwrap();
|
||||
|
||||
assert_eq!(fluffy_block, fluffy_block_2);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue