diff --git a/Cargo.lock b/Cargo.lock index 68ccc3ae..d8628217 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -506,6 +506,7 @@ dependencies = [ "monero-serai", "paste", "pretty_assertions", + "proptest", "rayon", "tempfile", "thread_local", diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index 7d0ab7e2..81640e90 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -121,7 +121,7 @@ pub enum ChainSvcResponse { /// The response for [`ChainSvcRequest::FindFirstUnknown`]. /// /// Contains the index of the first unknown block and its expected height. - FindFirstUnknown(usize, u64), + FindFirstUnknown(Option<(usize, u64)>), /// The response for [`ChainSvcRequest::CumulativeDifficulty`]. /// /// The current cumulative difficulty of our chain. diff --git a/p2p/p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs index f8b53194..471635bf 100644 --- a/p2p/p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -198,7 +198,7 @@ where tracing::debug!("Highest chin entry contained {} block Ids", hashes.len()); // Find the first unknown block in the batch. - let ChainSvcResponse::FindFirstUnknown(first_unknown, expected_height) = our_chain_svc + let ChainSvcResponse::FindFirstUnknown(first_unknown_ret) = our_chain_svc .ready() .await? .call(ChainSvcRequest::FindFirstUnknown(hashes.clone())) @@ -207,18 +207,18 @@ where panic!("chain service sent wrong response."); }; + // We know all the blocks already + // TODO: The peer could still be on a different chain, however the chain might just be too far split. + let Some((first_unknown, expected_height)) = first_unknown_ret else { + return Err(BlockDownloadError::FailedToFindAChainToFollow); + }; + // The peer must send at least one block we already know. if first_unknown == 0 { peer_handle.ban_peer(MEDIUM_BAN); return Err(BlockDownloadError::PeerSentNoOverlappingBlocks); } - // We know all the blocks already - // TODO: The peer could still be on a different chain, however the chain might just be too far split. - if first_unknown == hashes.len() { - return Err(BlockDownloadError::FailedToFindAChainToFollow); - } - let previous_id = hashes[first_unknown - 1]; let first_entry = ChainEntry { diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 5d4225cd..bf342727 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -314,7 +314,9 @@ impl Service for OurChainSvc { block_ids: vec![genesis], cumulative_difficulty: 1, }, - ChainSvcRequest::FindFirstUnknown(_) => ChainSvcResponse::FindFirstUnknown(1, 1), + ChainSvcRequest::FindFirstUnknown(_) => { + ChainSvcResponse::FindFirstUnknown(Some((1, 1))) + } ChainSvcRequest::CumulativeDifficulty => ChainSvcResponse::CumulativeDifficulty(1), }) } diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index bab582d6..8a882142 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -45,8 +45,8 @@ rayon = { workspace = true, optional = true } cuprate-helper = { path = "../../helper", features = ["thread"] } cuprate-test-utils = { path = "../../test-utils" } -bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } tempfile = { version = "3.10.0" } pretty_assertions = { workspace = true } +proptest = { workspace = true } hex = { workspace = true } hex-literal = { workspace = true } diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index 3ff8d6eb..3701f66f 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -33,8 +33,69 @@ pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle), Ok((readers, writer)) } -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - // use super::*; +//---------------------------------------------------------------------------------------------------- Compact history +/// Given a position in the compact history, returns the height offset that should be in that position. +/// +/// The height offset is the difference between the top block's height and the block height that should be in that position. +#[inline] +pub(super) const fn compact_history_index_to_height_offset( + i: u64, +) -> u64 { + // If the position is below the initial blocks just return the position back + if i <= INITIAL_BLOCKS { + i + } else { + // Otherwise we go with power of 2 offsets, the same as monerod. + // So (INITIAL_BLOCKS + 2), (INITIAL_BLOCKS + 2 + 4), (INITIAL_BLOCKS + 2 + 4 + 8) + // ref: + INITIAL_BLOCKS + (2 << (i - INITIAL_BLOCKS)) - 2 + } +} + +/// Returns if the genesis block was _NOT_ included when calculating the height offsets. +/// +/// The genesis must always be included in the compact history. +#[inline] +pub(super) const fn compact_history_genesis_not_included( + top_block_height: u64, +) -> bool { + // If the top block height is less than the initial blocks then it will always be included. + // Otherwise, we use the fact that to reach the genesis block this statement must be true (for a + // single `i`): + // + // `top_block_height - INITIAL_BLOCKS - 2^i + 2 == 0` + // which then means: + // `top_block_height - INITIAL_BLOCKS + 2 == 2^i` + // So if `top_block_height - INITIAL_BLOCKS + 2` is a power of 2 then the genesis block is in + // the compact history already. + top_block_height > INITIAL_BLOCKS && !(top_block_height - INITIAL_BLOCKS + 2).is_power_of_two() +} + +//---------------------------------------------------------------------------------------------------- Tests + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use super::*; + + proptest! { + #[test] + fn compact_history(top_height in 0_u64..500_000_000) { + let mut heights = (0..) + .map(compact_history_index_to_height_offset::<11>) + .map_while(|i| top_height.checked_sub(i)) + .collect::>(); + + if compact_history_genesis_not_included::<11>(top_height) { + heights.push(0); + } + + // Make sure the genesis and top block are always included. + assert_eq!(*heights.last().unwrap(), 0); + assert_eq!(*heights.first().unwrap(), top_height); + + heights.windows(2).for_each(|window| assert_ne!(window[0], window[1])); + } + } } diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 20aebf9c..7f856ccd 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -14,7 +14,7 @@ use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio_util::sync::PollSemaphore; use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; -use cuprate_helper::asynch::InfallibleOneshotReceiver; +use cuprate_helper::{asynch::InfallibleOneshotReceiver, map::combine_low_high_bits_to_u128}; use cuprate_types::{ blockchain::{BCReadRequest, BCResponse}, ExtendedBlockHeader, OutputOnChain, @@ -23,17 +23,20 @@ use cuprate_types::{ use crate::{ config::ReaderThreads, open_tables::OpenTables, - ops::block::block_exists, ops::{ - block::{get_block_extended_header_from_height, get_block_info}, + block::{ + block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, + }, blockchain::{cumulative_generated_coins, top_block_height}, key_image::key_image_exists, output::id_to_output_on_chain, }, - service::types::{ResponseReceiver, ResponseResult, ResponseSender}, + service::{ + free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, + types::{ResponseReceiver, ResponseResult, ResponseSender}, + }, tables::{BlockHeights, BlockInfos, Tables}, - types::BlockHash, - types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId}, + types::{Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId}, }; //---------------------------------------------------------------------------------------------------- DatabaseReadHandle @@ -204,13 +207,15 @@ fn map_request( let response = match request { R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockHash(block) => block_hash(env, block), - R::FilterUnknownHashes(hashes) => filter_unknown_hahses(env, hashes), + R::FilterUnknownHashes(hashes) => filter_unknown_hashes(env, hashes), R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range), R::ChainHeight => chain_height(env), R::GeneratedCoins => generated_coins(env), R::Outputs(map) => outputs(env, map), R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::KeyImagesSpent(set) => key_images_spent(env, set), + R::CompactChainHistory => compact_chain_history(env), + R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), }; if let Err(e) = response_sender.send(response) { @@ -320,7 +325,7 @@ fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { /// [`BCReadRequest::FilterUnknownHashes`]. #[inline] -fn filter_unknown_hahses(env: &ConcreteEnv, mut hashes: HashSet) -> ResponseResult { +fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; @@ -525,3 +530,81 @@ fn key_images_spent(env: &ConcreteEnv, key_images: HashSet) -> Respons Some(Err(e)) => Err(e), // A database error occurred. } } + +/// [`BCReadRequest::CompactChainHistory`] +fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + + let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; + let table_block_infos = env_inner.open_db_ro::(&tx_ro)?; + + let top_block_height = top_block_height(&table_block_heights)?; + + let top_block_info = get_block_info(&top_block_height, &table_block_infos)?; + let cumulative_difficulty = combine_low_high_bits_to_u128( + top_block_info.cumulative_difficulty_low, + top_block_info.cumulative_difficulty_high, + ); + + /// The amount of top block IDs in the compact chain. + const INITIAL_BLOCKS: u64 = 11; + + // rayon is not used here because the amount of block IDs is expected to be small. + let mut block_ids = (0..) + .map(compact_history_index_to_height_offset::) + .map_while(|i| top_block_height.checked_sub(i)) + .map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash)) + .collect::, RuntimeError>>()?; + + if compact_history_genesis_not_included::(top_block_height) { + block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash); + } + + Ok(BCResponse::CompactChainHistory { + cumulative_difficulty, + block_ids, + }) +} + +/// [`BCReadRequest::FindFirstUnknown`] +/// +/// # Invariant +/// `block_ids` must be sorted in chronological block order, or else +/// the returned result is unspecified and meaningless, as this function +/// performs a binary search. +fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseResult { + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + + let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; + + let mut err = None; + + // Do a binary search to find the first unknown block in the batch. + let idx = + block_ids.partition_point( + |block_id| match block_exists(block_id, &table_block_heights) { + Ok(exists) => exists, + Err(e) => { + err.get_or_insert(e); + // if this happens the search is scrapped, just return `false` back. + false + } + }, + ); + + if let Some(e) = err { + return Err(e); + } + + Ok(if idx == block_ids.len() { + BCResponse::FindFirstUnknown(None) + } else if idx == 0 { + BCResponse::FindFirstUnknown(Some((0, 0))) + } else { + let last_known_height = get_block_height(&block_ids[idx - 1], &table_block_heights)?; + + BCResponse::FindFirstUnknown(Some((idx, last_known_height + 1))) + }) +} diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index 42390f9d..5a09ca3d 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -83,10 +83,21 @@ pub enum BCReadRequest { /// The input is a list of output amounts. NumberOutputsWithAmount(Vec), - /// Check that all key images within a set arer not spent. + /// Check that all key images within a set are not spent. /// /// Input is a set of key images. KeyImagesSpent(HashSet<[u8; 32]>), + + /// A request for the compact chain history. + CompactChainHistory, + + /// A request to find the first unknown block ID in a list of block IDs. + //// + /// # Invariant + /// The [`Vec`] containing the block IDs must be sorted in chronological block + /// order, or else the returned response is unspecified and meaningless, + /// as this request performs a binary search. + FindFirstUnknown(Vec<[u8; 32]>), } //---------------------------------------------------------------------------------------------------- WriteRequest @@ -164,6 +175,23 @@ pub enum BCResponse { /// The inner value is `false` if _none_ of the key images were spent. KeyImagesSpent(bool), + /// Response to [`BCReadRequest::CompactChainHistory`]. + CompactChainHistory { + /// A list of blocks IDs in our chain, starting with the most recent block, all the way to the genesis block. + /// + /// These blocks should be in reverse chronological order, not every block is needed. + block_ids: Vec<[u8; 32]>, + /// The current cumulative difficulty of the chain. + cumulative_difficulty: u128, + }, + + /// The response for [`BCReadRequest::FindFirstUnknown`]. + /// + /// Contains the index of the first unknown block and its expected height. + /// + /// This will be [`None`] if all blocks were known. + FindFirstUnknown(Option<(usize, u64)>), + //------------------------------------------------------ Writes /// Response to [`BCWriteRequest::WriteBlock`]. ///