add alt context cache
Some checks failed
Audit / audit (push) Has been cancelled
Deny / audit (push) Has been cancelled

This commit is contained in:
Boog900 2024-07-12 01:52:44 +01:00
parent b586ad6795
commit a1bdb78112
No known key found for this signature in database
GPG key ID: 42AB1287CB0041C2
11 changed files with 87 additions and 28 deletions

1
Cargo.lock generated
View file

@ -533,6 +533,7 @@ dependencies = [
"multiexp",
"proptest",
"proptest-derive",
"rand",
"randomx-rs",
"rayon",
"thiserror",

View file

@ -29,6 +29,7 @@ tokio = { workspace = true, features = ["rt"] }
tokio-util = { workspace = true }
hex = { workspace = true }
rand = { workspace = true }
[dev-dependencies]
cuprate-test-utils = { path = "../test-utils" }

View file

@ -27,6 +27,7 @@ pub(crate) mod hardforks;
pub(crate) mod rx_vms;
pub(crate) mod weight;
mod alt_chains;
mod task;
mod tokens;

View file

@ -12,7 +12,7 @@ use tower::ServiceExt;
use tracing::instrument;
use cuprate_helper::num::median;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use cuprate_types::blockchain::{BCReadRequest, BCResponse, Chain};
use crate::{Database, ExtendedConsensusError, HardFork};
@ -87,6 +87,7 @@ impl DifficultyCache {
chain_height: u64,
config: DifficultyCacheConfig,
database: D,
chain: Chain,
) -> Result<Self, ExtendedConsensusError> {
tracing::info!("Initializing difficulty cache this may take a while.");
@ -98,7 +99,7 @@ impl DifficultyCache {
}
let (timestamps, cumulative_difficulties) =
get_blocks_in_pow_info(database.clone(), block_start..chain_height).await?;
get_blocks_in_pow_info(database.clone(), block_start..chain_height, chain).await?;
debug_assert_eq!(timestamps.len() as u64, chain_height - block_start);
@ -121,8 +122,12 @@ impl DifficultyCache {
/// Pop some blocks from the top of the cache.
///
/// The cache will be returned to the state it would have been in `numb_blocks` ago.
///
/// # Invariant
///
/// This _must_ only be used on a main-chain cache.
#[instrument(name = "pop_blocks_diff_cache", skip_all, fields(numb_blocks = numb_blocks))]
pub async fn pop_blocks<D: Database + Clone>(
pub async fn pop_blocks_main_chain<D: Database + Clone>(
&mut self,
numb_blocks: u64,
database: D,
@ -137,6 +142,7 @@ impl DifficultyCache {
self.last_accounted_height - numb_blocks + 1,
self.config,
database,
Chain::Main,
)
.await?;
@ -159,6 +165,7 @@ impl DifficultyCache {
new_start_height
// current_chain_height - self.timestamps.len() blocks are already in the cache.
..(current_chain_height - u64::try_from(self.timestamps.len()).unwrap()),
Chain::Main,
)
.await?;
@ -359,11 +366,15 @@ fn get_window_start_and_end(
async fn get_blocks_in_pow_info<D: Database + Clone>(
database: D,
block_heights: Range<u64>,
chain: Chain,
) -> Result<(VecDeque<u64>, VecDeque<u128>), ExtendedConsensusError> {
tracing::info!("Getting blocks timestamps");
let BCResponse::BlockExtendedHeaderInRange(ext_header) = database
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights))
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(
block_heights,
chain,
))
.await?
else {
panic!("Database sent incorrect response");

View file

@ -4,7 +4,7 @@ use tower::ServiceExt;
use tracing::instrument;
use cuprate_consensus_rules::{HFVotes, HFsInfo, HardFork};
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use cuprate_types::blockchain::{BCReadRequest, BCResponse, Chain};
use crate::{Database, ExtendedConsensusError};
@ -168,7 +168,10 @@ async fn get_votes_in_range<D: Database>(
let mut votes = HFVotes::new(window_size);
let BCResponse::BlockExtendedHeaderInRange(vote_list) = database
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights))
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(
block_heights,
Chain::Main,
))
.await?
else {
panic!("Database sent incorrect response!");

View file

@ -9,7 +9,7 @@ use tower::ServiceExt;
use tracing::Instrument;
use cuprate_consensus_rules::blocks::ContextToVerifyBlock;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use cuprate_types::blockchain::{BCReadRequest, BCResponse, Chain};
use super::{
difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest,
@ -95,14 +95,24 @@ impl ContextTask {
let db = database.clone();
let difficulty_cache_handle = tokio::spawn(async move {
difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db)
.await
difficulty::DifficultyCache::init_from_chain_height(
chain_height,
difficulty_cfg,
db,
Chain::Main,
)
.await
});
let db = database.clone();
let weight_cache_handle = tokio::spawn(async move {
weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db)
.await
weight::BlockWeightsCache::init_from_chain_height(
chain_height,
weights_config,
db,
Chain::Main,
)
.await
});
// Wait for the hardfork state to finish first as we need it to start the randomX VM cache.

View file

@ -16,7 +16,7 @@ use tracing::instrument;
use cuprate_consensus_rules::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5};
use cuprate_helper::{asynch::rayon_spawn_async, num::RollingMedian};
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use cuprate_types::blockchain::{BCReadRequest, BCResponse, Chain};
use crate::{Database, ExtendedConsensusError, HardFork};
@ -66,7 +66,7 @@ pub struct BlockWeightsCache {
/// The height of the top block.
tip_height: u64,
config: BlockWeightsCacheConfig,
pub(crate) config: BlockWeightsCacheConfig,
}
impl BlockWeightsCache {
@ -76,18 +76,21 @@ impl BlockWeightsCache {
chain_height: u64,
config: BlockWeightsCacheConfig,
database: D,
chain: Chain,
) -> Result<Self, ExtendedConsensusError> {
tracing::info!("Initializing weight cache this may take a while.");
let long_term_weights = get_long_term_weight_in_range(
chain_height.saturating_sub(config.long_term_window)..chain_height,
database.clone(),
chain,
)
.await?;
let short_term_block_weights = get_blocks_weight_in_range(
chain_height.saturating_sub(config.short_term_window)..chain_height,
database,
chain,
)
.await?;
@ -128,6 +131,7 @@ impl BlockWeightsCache {
self.tip_height - numb_blocks + 1,
self.config,
database,
Chain::Main,
)
.await?;
@ -145,6 +149,7 @@ impl BlockWeightsCache {
// current_chain_height - self.long_term_weights.len() blocks are already in the cache.
..(chain_height - u64::try_from(self.long_term_weights.window_len()).unwrap()),
database.clone(),
Chain::Main,
)
.await?;
@ -157,6 +162,7 @@ impl BlockWeightsCache {
// current_chain_height - self.long_term_weights.len() blocks are already in the cache.
..(chain_height - u64::try_from(self.short_term_block_weights.window_len()).unwrap()),
database,
Chain::Main
)
.await?;
@ -283,11 +289,12 @@ pub fn calculate_block_long_term_weight(
async fn get_blocks_weight_in_range<D: Database + Clone>(
range: Range<u64>,
database: D,
chain: Chain,
) -> Result<Vec<usize>, ExtendedConsensusError> {
tracing::info!("getting block weights.");
let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(range))
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(range, chain))
.await?
else {
panic!("Database sent incorrect response!")
@ -304,11 +311,12 @@ async fn get_blocks_weight_in_range<D: Database + Clone>(
async fn get_long_term_weight_in_range<D: Database + Clone>(
range: Range<u64>,
database: D,
chain: Chain,
) -> Result<Vec<usize>, ExtendedConsensusError> {
tracing::info!("getting block long term weights.");
let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(range))
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(range, chain))
.await?
else {
panic!("Database sent incorrect response!")

View file

@ -225,7 +225,7 @@ proptest! {
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
}
new_cache.pop_blocks(blocks_to_pop as u64, database).await?;
new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?;
prop_assert_eq!(new_cache, old_cache);
@ -249,7 +249,7 @@ proptest! {
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
}
new_cache.pop_blocks(blocks_to_pop as u64, database).await?;
new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?;
prop_assert_eq!(new_cache, old_cache);

View file

@ -15,6 +15,7 @@ use tokio_util::sync::PollSemaphore;
use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError};
use cuprate_helper::{asynch::InfallibleOneshotReceiver, map::combine_low_high_bits_to_u128};
use cuprate_types::blockchain::Chain;
use cuprate_types::{
blockchain::{BCReadRequest, BCResponse},
ExtendedBlockHeader, OutputOnChain,
@ -207,8 +208,11 @@ fn map_request(
let response = match request {
R::BlockExtendedHeader(block) => block_extended_header(env, block),
R::BlockHash(block) => block_hash(env, block),
BCReadRequest::FindBlock(_) => todo!("Add alt blocks to DB"),
R::FilterUnknownHashes(hashes) => filter_unknown_hashes(env, hashes),
R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range),
R::BlockExtendedHeaderInRange(range, chain) => {
block_extended_header_in_range(env, range, chain)
}
R::ChainHeight => chain_height(env),
R::GeneratedCoins => generated_coins(env),
R::Outputs(map) => outputs(env, map),
@ -356,6 +360,7 @@ fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet<BlockHash>) -> R
fn block_extended_header_in_range(
env: &ConcreteEnv,
range: std::ops::Range<BlockHeight>,
chain: Chain,
) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
let env_inner = env.env_inner();
@ -363,14 +368,17 @@ fn block_extended_header_in_range(
let tables = thread_local(env);
// Collect results using `rayon`.
let vec = range
.into_par_iter()
.map(|block_height| {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
get_block_extended_header_from_height(&block_height, tables)
})
.collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?;
let vec = match chain {
Chain::Main => range
.into_par_iter()
.map(|block_height| {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
get_block_extended_header_from_height(&block_height, tables)
})
.collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?,
Chain::Alt(_) => todo!("Add alt blocks to DB"),
};
Ok(BCResponse::BlockExtendedHeaderInRange(vec))
}

View file

@ -11,6 +11,15 @@ use std::{
use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation};
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
pub struct ChainID(pub u64);
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
pub enum Chain {
Main,
Alt(ChainID),
}
//---------------------------------------------------------------------------------------------------- ReadRequest
/// A read request to the blockchain database.
///
@ -32,6 +41,11 @@ pub enum BCReadRequest {
/// The input is the block's height.
BlockHash(u64),
/// Request to check if we have a block and where it is.
///
/// The input is the block's hash.
FindBlock([u8; 32]),
/// Removes the block hashes that are not in the _main_ chain.
///
/// This should filter (remove) hashes in alt-blocks as well.
@ -40,7 +54,7 @@ pub enum BCReadRequest {
/// Request a range of block extended headers.
///
/// The input is a range of block heights.
BlockExtendedHeaderInRange(Range<u64>),
BlockExtendedHeaderInRange(Range<u64>, Chain),
/// Request the current chain height.
///
@ -129,6 +143,8 @@ pub enum BCResponse {
/// Inner value is the hash of the requested block.
BlockHash([u8; 32]),
FindBlock(Option<(Chain, u64)>),
/// Response to [`BCReadRequest::FilterUnknownHashes`].
///
/// Inner value is the list of hashes that were in the main chain.

View file

@ -58,7 +58,7 @@
clippy::nursery,
clippy::cargo,
unused_mut,
missing_docs,
//missing_docs,
deprecated,
unused_comparisons,
nonstandard_style