mirror of
https://github.com/Cuprate/cuprate.git
synced 2024-12-23 03:59:31 +00:00
keep track of blockchain context validity internally.
This commit is contained in:
parent
e1eaaf80d9
commit
34bb293f95
11 changed files with 216 additions and 104 deletions
|
@ -45,6 +45,7 @@ cryptonight-cuprate = {path = "../cryptonight"}
|
|||
|
||||
rayon = "1"
|
||||
tokio = "1"
|
||||
tokio-util = "0.7"
|
||||
|
||||
# used in binaries
|
||||
monero-wire = {path="../net/monero-wire", optional = true}
|
||||
|
|
|
@ -122,11 +122,14 @@ where
|
|||
Tx: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ConsensusError>,
|
||||
{
|
||||
tracing::debug!("getting blockchain context");
|
||||
let context = context_svc
|
||||
let checked_context = context_svc
|
||||
.oneshot(BlockChainContextRequest)
|
||||
.await
|
||||
.map_err(Into::<ConsensusError>::into)?;
|
||||
|
||||
// TODO: should we unwrap here, we did just get the data so it should be ok.
|
||||
let context = checked_context.blockchain_context().unwrap();
|
||||
|
||||
tracing::debug!("got blockchain context: {:?}", context);
|
||||
|
||||
let block_weight = block.miner_tx.weight() + txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
||||
|
@ -208,11 +211,14 @@ where
|
|||
Tx: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ConsensusError>,
|
||||
{
|
||||
tracing::debug!("getting blockchain context");
|
||||
let context = context_svc
|
||||
let checked_context = context_svc
|
||||
.oneshot(BlockChainContextRequest)
|
||||
.await
|
||||
.map_err(Into::<ConsensusError>::into)?;
|
||||
|
||||
// TODO: should we unwrap here, we did just get the data so it should be ok.
|
||||
let context = checked_context.blockchain_context().unwrap();
|
||||
|
||||
tracing::debug!("got blockchain context: {:?}", context);
|
||||
|
||||
// TODO: reorder these tests so we do the cheap tests first.
|
||||
|
|
|
@ -16,13 +16,17 @@ use std::{
|
|||
|
||||
use futures::FutureExt;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use crate::{helper::current_time, ConsensusError, Database, DatabaseRequest, DatabaseResponse};
|
||||
|
||||
pub mod difficulty;
|
||||
pub mod hardforks;
|
||||
pub mod weight;
|
||||
mod difficulty;
|
||||
mod hardforks;
|
||||
mod weight;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub use difficulty::DifficultyCacheConfig;
|
||||
pub use hardforks::{HardFork, HardForkConfig};
|
||||
|
@ -31,9 +35,9 @@ pub use weight::BlockWeightsCacheConfig;
|
|||
const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60;
|
||||
|
||||
pub struct ContextConfig {
|
||||
hard_fork_cfg: HardForkConfig,
|
||||
difficulty_cfg: DifficultyCacheConfig,
|
||||
weights_config: BlockWeightsCacheConfig,
|
||||
pub hard_fork_cfg: HardForkConfig,
|
||||
pub difficulty_cfg: DifficultyCacheConfig,
|
||||
pub weights_config: BlockWeightsCacheConfig,
|
||||
}
|
||||
|
||||
impl ContextConfig {
|
||||
|
@ -114,6 +118,7 @@ where
|
|||
let context_svc = BlockChainContextService {
|
||||
internal_blockchain_context: Arc::new(
|
||||
InternalBlockChainContext {
|
||||
current_validity_token: CancellationToken::new(),
|
||||
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
|
||||
weight_cache: weight_cache_handle.await.unwrap()?,
|
||||
hardfork_state: hardfork_state_handle.await.unwrap()?,
|
||||
|
@ -130,8 +135,10 @@ where
|
|||
Ok((context_svc_update.clone(), context_svc_update))
|
||||
}
|
||||
|
||||
/// Raw blockchain context, gotten from [`BlockChainContext`]. This data may turn invalid so is not ok to keep
|
||||
/// around. You should keep around [`BlockChainContext`] instead.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct BlockChainContext {
|
||||
pub struct RawBlockChainContext {
|
||||
/// The next blocks difficulty.
|
||||
pub next_difficulty: u128,
|
||||
/// The current cumulative difficulty.
|
||||
|
@ -156,7 +163,7 @@ pub struct BlockChainContext {
|
|||
pub current_hard_fork: HardFork,
|
||||
}
|
||||
|
||||
impl BlockChainContext {
|
||||
impl RawBlockChainContext {
|
||||
/// Returns the timestamp the should be used when checking locked outputs.
|
||||
///
|
||||
/// https://cuprate.github.io/monero-book/consensus_rules/transactions/unlock_time.html#getting-the-current-time
|
||||
|
@ -197,11 +204,44 @@ impl BlockChainContext {
|
|||
}
|
||||
}
|
||||
|
||||
/// Blockchain context which keeps a token of validity so users will know when the data is no longer valid.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlockChainContext {
|
||||
/// A token representing this data's validity.
|
||||
validity_token: CancellationToken,
|
||||
/// The actual block chain context.
|
||||
raw: RawBlockChainContext,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, thiserror::Error)]
|
||||
#[error("data is no longer valid")]
|
||||
pub struct DataNoLongerValid;
|
||||
|
||||
impl BlockChainContext {
|
||||
/// Checks if the data is still valid.
|
||||
pub fn is_still_valid(&self) -> bool {
|
||||
!self.validity_token.is_cancelled()
|
||||
}
|
||||
|
||||
/// Checks if the data is valid returning an Err if not and a reference to the blockchain context if
|
||||
/// it is.
|
||||
pub fn blockchain_context(&self) -> Result<RawBlockChainContext, DataNoLongerValid> {
|
||||
if !self.is_still_valid() {
|
||||
return Err(DataNoLongerValid);
|
||||
}
|
||||
Ok(self.raw)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlockChainContextRequest;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct InternalBlockChainContext {
|
||||
/// A token used to invalidate previous contexts when a new
|
||||
/// block is added to the chain.
|
||||
current_validity_token: CancellationToken,
|
||||
|
||||
difficulty_cache: difficulty::DifficultyCache,
|
||||
weight_cache: weight::BlockWeightsCache,
|
||||
hardfork_state: hardforks::HardForkState,
|
||||
|
@ -233,6 +273,7 @@ impl Service<BlockChainContextRequest> for BlockChainContextService {
|
|||
let internal_blockchain_context_lock = internal_blockchain_context.read().await;
|
||||
|
||||
let InternalBlockChainContext {
|
||||
current_validity_token,
|
||||
difficulty_cache,
|
||||
weight_cache,
|
||||
hardfork_state,
|
||||
|
@ -244,18 +285,24 @@ impl Service<BlockChainContextRequest> for BlockChainContextService {
|
|||
let current_hf = hardfork_state.current_hardfork();
|
||||
|
||||
Ok(BlockChainContext {
|
||||
validity_token: current_validity_token.child_token(),
|
||||
raw: RawBlockChainContext {
|
||||
next_difficulty: difficulty_cache.next_difficulty(¤t_hf),
|
||||
cumulative_difficulty: difficulty_cache.cumulative_difficulty(),
|
||||
effective_median_weight: weight_cache.effective_median_block_weight(¤t_hf),
|
||||
effective_median_weight: weight_cache
|
||||
.effective_median_block_weight(¤t_hf),
|
||||
median_long_term_weight: weight_cache.median_long_term_weight(),
|
||||
median_weight_for_block_reward: weight_cache.median_for_block_reward(¤t_hf),
|
||||
median_weight_for_block_reward: weight_cache
|
||||
.median_for_block_reward(¤t_hf),
|
||||
already_generated_coins: *already_generated_coins,
|
||||
top_block_timestamp: difficulty_cache.top_block_timestamp(),
|
||||
median_block_timestamp: difficulty_cache
|
||||
.median_timestamp(usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap()),
|
||||
median_block_timestamp: difficulty_cache.median_timestamp(
|
||||
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
|
||||
),
|
||||
chain_height: *chain_height,
|
||||
top_hash: *top_block_hash,
|
||||
current_hard_fork: current_hf,
|
||||
},
|
||||
})
|
||||
}
|
||||
.boxed()
|
||||
|
@ -291,6 +338,7 @@ impl tower::Service<UpdateBlockchainCacheRequest> for BlockChainContextService {
|
|||
let mut internal_blockchain_context_lock = internal_blockchain_context.write().await;
|
||||
|
||||
let InternalBlockChainContext {
|
||||
current_validity_token,
|
||||
difficulty_cache,
|
||||
weight_cache,
|
||||
hardfork_state,
|
||||
|
@ -299,11 +347,14 @@ impl tower::Service<UpdateBlockchainCacheRequest> for BlockChainContextService {
|
|||
already_generated_coins,
|
||||
} = internal_blockchain_context_lock.deref_mut();
|
||||
|
||||
// Cancel the validity token and replace it with a new one.
|
||||
std::mem::replace(current_validity_token, CancellationToken::new()).cancel();
|
||||
|
||||
difficulty_cache.new_block(new.height, new.timestamp, new.cumulative_difficulty);
|
||||
|
||||
weight_cache.new_block(new.height, new.weight, new.long_term_weight);
|
||||
|
||||
hardfork_state.new_block(new.vote, new.height).await?;
|
||||
hardfork_state.new_block(new.vote, new.height);
|
||||
|
||||
*chain_height = new.height + 1;
|
||||
*top_block_hash = new.new_top_hash;
|
||||
|
|
|
@ -8,7 +8,7 @@ use crate::{
|
|||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
pub(super) mod tests;
|
||||
|
||||
/// The amount of blocks we account for to calculate difficulty
|
||||
const DIFFICULTY_WINDOW: usize = 720;
|
||||
|
@ -69,22 +69,6 @@ pub struct DifficultyCache {
|
|||
}
|
||||
|
||||
impl DifficultyCache {
|
||||
pub async fn init<D: Database + Clone>(
|
||||
config: DifficultyCacheConfig,
|
||||
mut database: D,
|
||||
) -> Result<Self, ConsensusError> {
|
||||
let DatabaseResponse::ChainHeight(chain_height, _) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response")
|
||||
};
|
||||
|
||||
DifficultyCache::init_from_chain_height(chain_height, config, database).await
|
||||
}
|
||||
|
||||
#[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: u64,
|
||||
|
|
|
@ -11,7 +11,7 @@ const TEST_LAG: usize = 2;
|
|||
|
||||
const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG;
|
||||
|
||||
const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig =
|
||||
pub const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig =
|
||||
DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG);
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -21,7 +21,8 @@ async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> {
|
|||
db_builder.add_block(genesis);
|
||||
|
||||
let mut difficulty_cache =
|
||||
DifficultyCache::init(TEST_DIFFICULTY_CONFIG, db_builder.finish()).await?;
|
||||
DifficultyCache::init_from_chain_height(1, TEST_DIFFICULTY_CONFIG, db_builder.finish())
|
||||
.await?;
|
||||
|
||||
for height in 1..3 {
|
||||
assert_eq!(difficulty_cache.next_difficulty(&HardFork::V1), 1);
|
||||
|
@ -35,7 +36,9 @@ async fn genesis_block_skipped() -> Result<(), tower::BoxError> {
|
|||
let mut db_builder = DummyDatabaseBuilder::default();
|
||||
let genesis = DummyBlockExtendedHeader::default().with_difficulty_info(0, 1);
|
||||
db_builder.add_block(genesis);
|
||||
let diff_cache = DifficultyCache::init(TEST_DIFFICULTY_CONFIG, db_builder.finish()).await?;
|
||||
let diff_cache =
|
||||
DifficultyCache::init_from_chain_height(1, TEST_DIFFICULTY_CONFIG, db_builder.finish())
|
||||
.await?;
|
||||
assert!(diff_cache.cumulative_difficulties.is_empty());
|
||||
assert!(diff_cache.timestamps.is_empty());
|
||||
Ok(())
|
||||
|
|
|
@ -12,7 +12,7 @@ use tracing::instrument;
|
|||
use crate::{ConsensusError, Database, DatabaseRequest, DatabaseResponse};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
pub(super) mod tests;
|
||||
|
||||
// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork
|
||||
const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week
|
||||
|
@ -263,24 +263,6 @@ pub struct HardForkState {
|
|||
}
|
||||
|
||||
impl HardForkState {
|
||||
pub async fn init<D: Database + Clone>(
|
||||
config: HardForkConfig,
|
||||
mut database: D,
|
||||
) -> Result<Self, ConsensusError> {
|
||||
let DatabaseResponse::ChainHeight(chain_height, _) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response")
|
||||
};
|
||||
|
||||
let hfs = HardForkState::init_from_chain_height(chain_height, config, database).await?;
|
||||
|
||||
Ok(hfs)
|
||||
}
|
||||
|
||||
#[instrument(name = "init_hardfork_state", skip(config, database), level = "info")]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: u64,
|
||||
|
@ -336,7 +318,7 @@ impl HardForkState {
|
|||
Ok(hfs)
|
||||
}
|
||||
|
||||
pub async fn new_block(&mut self, vote: HardFork, height: u64) -> Result<(), ConsensusError> {
|
||||
pub fn new_block(&mut self, vote: HardFork, height: u64) {
|
||||
assert_eq!(self.last_height + 1, height);
|
||||
self.last_height += 1;
|
||||
|
||||
|
@ -353,7 +335,6 @@ impl HardForkState {
|
|||
}
|
||||
|
||||
self.check_set_new_hf();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the next hard-fork should be activated and activates it if it should.
|
||||
|
|
|
@ -26,7 +26,7 @@ const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [
|
|||
HFInfo::new(150, 0),
|
||||
];
|
||||
|
||||
const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig {
|
||||
pub const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig {
|
||||
window: TEST_WINDOW_SIZE,
|
||||
forks: TEST_HFS,
|
||||
};
|
||||
|
@ -62,7 +62,11 @@ async fn hard_fork_set_depends_on_top_block() {
|
|||
DummyBlockExtendedHeader::default().with_hard_fork_info(HardFork::V14, HardFork::V16),
|
||||
);
|
||||
|
||||
let state = HardForkState::init(TEST_HARD_FORK_CONFIG, db_builder.finish())
|
||||
let state = HardForkState::init_from_chain_height(
|
||||
TEST_WINDOW_SIZE + 1,
|
||||
TEST_HARD_FORK_CONFIG,
|
||||
db_builder.finish(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
|
74
consensus/src/context/tests.rs
Normal file
74
consensus/src/context/tests.rs
Normal file
|
@ -0,0 +1,74 @@
|
|||
use proptest::strategy::ValueTree;
|
||||
use proptest::{strategy::Strategy, test_runner::TestRunner};
|
||||
use tower::ServiceExt;
|
||||
|
||||
use super::{
|
||||
difficulty::tests::TEST_DIFFICULTY_CONFIG, hardforks::tests::TEST_HARD_FORK_CONFIG,
|
||||
initialize_blockchain_context, weight::tests::TEST_WEIGHT_CONFIG, BlockChainContextRequest,
|
||||
ContextConfig, UpdateBlockchainCacheRequest,
|
||||
};
|
||||
use crate::{test_utils::mock_db::*, HardFork};
|
||||
|
||||
const TEST_CONTEXT_CONFIG: ContextConfig = ContextConfig {
|
||||
hard_fork_cfg: TEST_HARD_FORK_CONFIG,
|
||||
difficulty_cfg: TEST_DIFFICULTY_CONFIG,
|
||||
weights_config: TEST_WEIGHT_CONFIG,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> {
|
||||
const BLOCKCHAIN_HEIGHT: u64 = 6000;
|
||||
|
||||
let mut runner = TestRunner::default();
|
||||
let db = arb_dummy_database(BLOCKCHAIN_HEIGHT.try_into().unwrap())
|
||||
.new_tree(&mut runner)
|
||||
.unwrap()
|
||||
.current();
|
||||
|
||||
let (ctx_svc, updater) = initialize_blockchain_context(TEST_CONTEXT_CONFIG, db).await?;
|
||||
|
||||
let context = ctx_svc.oneshot(BlockChainContextRequest).await?;
|
||||
|
||||
assert!(context.is_still_valid());
|
||||
assert!(context.is_still_valid());
|
||||
assert!(context.is_still_valid());
|
||||
|
||||
updater
|
||||
.oneshot(UpdateBlockchainCacheRequest {
|
||||
new_top_hash: [0; 32],
|
||||
height: BLOCKCHAIN_HEIGHT,
|
||||
timestamp: 0,
|
||||
weight: 0,
|
||||
long_term_weight: 0,
|
||||
generated_coins: 0,
|
||||
vote: HardFork::V1,
|
||||
cumulative_difficulty: 0,
|
||||
})
|
||||
.await?;
|
||||
|
||||
assert!(!context.is_still_valid());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn context_height_correct() -> Result<(), tower::BoxError> {
|
||||
const BLOCKCHAIN_HEIGHT: u64 = 6000;
|
||||
|
||||
let mut runner = TestRunner::default();
|
||||
let db = arb_dummy_database(BLOCKCHAIN_HEIGHT.try_into().unwrap())
|
||||
.new_tree(&mut runner)
|
||||
.unwrap()
|
||||
.current();
|
||||
|
||||
let (ctx_svc, _) = initialize_blockchain_context(TEST_CONTEXT_CONFIG, db).await?;
|
||||
|
||||
let context = ctx_svc.oneshot(BlockChainContextRequest).await?;
|
||||
|
||||
assert_eq!(
|
||||
context.blockchain_context().unwrap().chain_height,
|
||||
BLOCKCHAIN_HEIGHT
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -12,7 +12,6 @@ use std::{
|
|||
ops::Range,
|
||||
};
|
||||
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use rayon::prelude::*;
|
||||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
@ -22,7 +21,7 @@ use crate::{
|
|||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
pub(super) mod tests;
|
||||
|
||||
const PENALTY_FREE_ZONE_1: usize = 20000;
|
||||
const PENALTY_FREE_ZONE_2: usize = 60000;
|
||||
|
@ -31,16 +30,6 @@ const PENALTY_FREE_ZONE_5: usize = 300000;
|
|||
const SHORT_TERM_WINDOW: u64 = 100;
|
||||
const LONG_TERM_WINDOW: u64 = 100000;
|
||||
|
||||
/// Calculates the blocks weight.
|
||||
///
|
||||
/// https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#blocks-weight
|
||||
pub fn block_weight(block: &Block, txs: &[Transaction]) -> usize {
|
||||
txs.iter()
|
||||
.chain([&block.miner_tx])
|
||||
.map(|tx| tx.weight())
|
||||
.sum()
|
||||
}
|
||||
|
||||
/// Returns the penalty free zone
|
||||
///
|
||||
/// https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#penalty-free-zone
|
||||
|
@ -94,23 +83,6 @@ pub struct BlockWeightsCache {
|
|||
}
|
||||
|
||||
impl BlockWeightsCache {
|
||||
/// Initialize the [`BlockWeightsCache`] at the the height of the database.
|
||||
pub async fn init<D: Database + Clone>(
|
||||
config: BlockWeightsCacheConfig,
|
||||
mut database: D,
|
||||
) -> Result<Self, ConsensusError> {
|
||||
let DatabaseResponse::ChainHeight(chain_height, _) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
Self::init_from_chain_height(chain_height, config, database).await
|
||||
}
|
||||
|
||||
/// Initialize the [`BlockWeightsCache`] at the the given chain height.
|
||||
#[instrument(name = "init_weight_cache", level = "info", skip(database, config))]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use super::{BlockWeightsCache, BlockWeightsCacheConfig};
|
||||
use crate::test_utils::mock_db::*;
|
||||
|
||||
const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000);
|
||||
pub const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000);
|
||||
|
||||
#[tokio::test]
|
||||
async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> {
|
||||
|
@ -11,7 +11,9 @@ async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> {
|
|||
db_builder.add_block(block);
|
||||
}
|
||||
|
||||
let mut weight_cache = BlockWeightsCache::init(TEST_WEIGHT_CONFIG, db_builder.finish()).await?;
|
||||
let mut weight_cache =
|
||||
BlockWeightsCache::init_from_chain_height(5000, TEST_WEIGHT_CONFIG, db_builder.finish())
|
||||
.await?;
|
||||
assert_eq!(weight_cache.median_long_term_weight(), 2500);
|
||||
assert_eq!(weight_cache.median_short_term_weight(), 4950);
|
||||
|
||||
|
@ -33,7 +35,9 @@ async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError>
|
|||
let block = DummyBlockExtendedHeader::default().with_weight_into(0, 0);
|
||||
db_builder.add_block(block);
|
||||
|
||||
let mut weight_cache = BlockWeightsCache::init(TEST_WEIGHT_CONFIG, db_builder.finish()).await?;
|
||||
let mut weight_cache =
|
||||
BlockWeightsCache::init_from_chain_height(1, TEST_WEIGHT_CONFIG, db_builder.finish())
|
||||
.await?;
|
||||
|
||||
for height in 1..=100 {
|
||||
weight_cache.new_block(height as u64, height, height);
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
use futures::FutureExt;
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
|
@ -6,20 +5,53 @@ use std::{
|
|||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use cuprate_common::BlockID;
|
||||
use futures::FutureExt;
|
||||
use proptest::{
|
||||
arbitrary::{any, any_with},
|
||||
prop_compose,
|
||||
sample::size_range,
|
||||
strategy::Strategy,
|
||||
};
|
||||
use proptest_derive::Arbitrary;
|
||||
use tower::{BoxError, Service};
|
||||
|
||||
use cuprate_common::BlockID;
|
||||
|
||||
use crate::{DatabaseRequest, DatabaseResponse, ExtendedBlockHeader, HardFork};
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy)]
|
||||
prop_compose! {
|
||||
/// Generates an arbitrary full [`DummyDatabase`], it is not safe to do consensus checks on the returned database
|
||||
/// but is ok for testing certain parts of the code with.
|
||||
pub fn arb_dummy_database(height: usize)
|
||||
(
|
||||
mut blocks in any_with::<Vec<DummyBlockExtendedHeader>>(size_range(height).lift())
|
||||
) -> DummyDatabase {
|
||||
let mut builder = DummyDatabaseBuilder::default();
|
||||
|
||||
blocks.sort_by(|a, b| a.cumulative_difficulty.cmp(&b.cumulative_difficulty));
|
||||
|
||||
for block in blocks {
|
||||
builder.add_block(block);
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Arbitrary)]
|
||||
pub struct DummyBlockExtendedHeader {
|
||||
#[proptest(strategy = "any::<HardFork>().prop_map(Some)")]
|
||||
pub version: Option<HardFork>,
|
||||
#[proptest(strategy = "any::<HardFork>().prop_map(Some)")]
|
||||
pub vote: Option<HardFork>,
|
||||
|
||||
#[proptest(strategy = "any::<u64>().prop_map(Some)")]
|
||||
pub timestamp: Option<u64>,
|
||||
#[proptest(strategy = "any::<u128>().prop_map(|x| Some(x % u128::from(u64::MAX)))")]
|
||||
pub cumulative_difficulty: Option<u128>,
|
||||
|
||||
#[proptest(strategy = "any::<usize>().prop_map(|x| Some(x % 100_000_000))")]
|
||||
pub block_weight: Option<usize>,
|
||||
#[proptest(strategy = "any::<usize>().prop_map(|x| Some(x % 100_000_000))")]
|
||||
pub long_term_weight: Option<usize>,
|
||||
}
|
||||
|
||||
|
@ -85,7 +117,7 @@ impl DummyDatabaseBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyDatabase {
|
||||
blocks: Arc<RwLock<Vec<DummyBlockExtendedHeader>>>,
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue