Storage: add blockchain history requests (#206)
Some checks failed
CI / fmt (push) Waiting to run
CI / typo (push) Waiting to run
CI / ci (macos-latest, stable, bash) (push) Waiting to run
CI / ci (ubuntu-latest, stable, bash) (push) Waiting to run
CI / ci (windows-latest, stable-x86_64-pc-windows-gnu, msys2 {0}) (push) Waiting to run
Audit / audit (push) Has been cancelled
Deny / audit (push) Has been cancelled

* Add database requests for chain history

* misc fixes

* review comments

* fix clippy

* add link and fix typo

* Apply suggestions from code review

Co-authored-by: hinto-janai <hinto.janai@protonmail.com>

* add comment

---------

Co-authored-by: hinto-janai <hinto.janai@protonmail.com>
This commit is contained in:
Boog900 2024-07-02 22:08:19 +00:00 committed by GitHub
parent 6ce177aeca
commit 7c8466f4ba
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 198 additions and 23 deletions

1
Cargo.lock generated
View file

@ -506,6 +506,7 @@ dependencies = [
"monero-serai", "monero-serai",
"paste", "paste",
"pretty_assertions", "pretty_assertions",
"proptest",
"rayon", "rayon",
"tempfile", "tempfile",
"thread_local", "thread_local",

View file

@ -121,7 +121,7 @@ pub enum ChainSvcResponse {
/// The response for [`ChainSvcRequest::FindFirstUnknown`]. /// The response for [`ChainSvcRequest::FindFirstUnknown`].
/// ///
/// Contains the index of the first unknown block and its expected height. /// Contains the index of the first unknown block and its expected height.
FindFirstUnknown(usize, u64), FindFirstUnknown(Option<(usize, u64)>),
/// The response for [`ChainSvcRequest::CumulativeDifficulty`]. /// The response for [`ChainSvcRequest::CumulativeDifficulty`].
/// ///
/// The current cumulative difficulty of our chain. /// The current cumulative difficulty of our chain.

View file

@ -198,7 +198,7 @@ where
tracing::debug!("Highest chin entry contained {} block Ids", hashes.len()); tracing::debug!("Highest chin entry contained {} block Ids", hashes.len());
// Find the first unknown block in the batch. // Find the first unknown block in the batch.
let ChainSvcResponse::FindFirstUnknown(first_unknown, expected_height) = our_chain_svc let ChainSvcResponse::FindFirstUnknown(first_unknown_ret) = our_chain_svc
.ready() .ready()
.await? .await?
.call(ChainSvcRequest::FindFirstUnknown(hashes.clone())) .call(ChainSvcRequest::FindFirstUnknown(hashes.clone()))
@ -207,18 +207,18 @@ where
panic!("chain service sent wrong response."); panic!("chain service sent wrong response.");
}; };
// We know all the blocks already
// TODO: The peer could still be on a different chain, however the chain might just be too far split.
let Some((first_unknown, expected_height)) = first_unknown_ret else {
return Err(BlockDownloadError::FailedToFindAChainToFollow);
};
// The peer must send at least one block we already know. // The peer must send at least one block we already know.
if first_unknown == 0 { if first_unknown == 0 {
peer_handle.ban_peer(MEDIUM_BAN); peer_handle.ban_peer(MEDIUM_BAN);
return Err(BlockDownloadError::PeerSentNoOverlappingBlocks); return Err(BlockDownloadError::PeerSentNoOverlappingBlocks);
} }
// We know all the blocks already
// TODO: The peer could still be on a different chain, however the chain might just be too far split.
if first_unknown == hashes.len() {
return Err(BlockDownloadError::FailedToFindAChainToFollow);
}
let previous_id = hashes[first_unknown - 1]; let previous_id = hashes[first_unknown - 1];
let first_entry = ChainEntry { let first_entry = ChainEntry {

View file

@ -314,7 +314,9 @@ impl Service<ChainSvcRequest> for OurChainSvc {
block_ids: vec![genesis], block_ids: vec![genesis],
cumulative_difficulty: 1, cumulative_difficulty: 1,
}, },
ChainSvcRequest::FindFirstUnknown(_) => ChainSvcResponse::FindFirstUnknown(1, 1), ChainSvcRequest::FindFirstUnknown(_) => {
ChainSvcResponse::FindFirstUnknown(Some((1, 1)))
}
ChainSvcRequest::CumulativeDifficulty => ChainSvcResponse::CumulativeDifficulty(1), ChainSvcRequest::CumulativeDifficulty => ChainSvcResponse::CumulativeDifficulty(1),
}) })
} }

View file

@ -45,8 +45,8 @@ rayon = { workspace = true, optional = true }
cuprate-helper = { path = "../../helper", features = ["thread"] } cuprate-helper = { path = "../../helper", features = ["thread"] }
cuprate-test-utils = { path = "../../test-utils" } cuprate-test-utils = { path = "../../test-utils" }
bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] }
tempfile = { version = "3.10.0" } tempfile = { version = "3.10.0" }
pretty_assertions = { workspace = true } pretty_assertions = { workspace = true }
proptest = { workspace = true }
hex = { workspace = true } hex = { workspace = true }
hex-literal = { workspace = true } hex-literal = { workspace = true }

View file

@ -33,8 +33,69 @@ pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle),
Ok((readers, writer)) Ok((readers, writer))
} }
//---------------------------------------------------------------------------------------------------- Tests //---------------------------------------------------------------------------------------------------- Compact history
#[cfg(test)] /// Given a position in the compact history, returns the height offset that should be in that position.
mod test { ///
// use super::*; /// The height offset is the difference between the top block's height and the block height that should be in that position.
#[inline]
pub(super) const fn compact_history_index_to_height_offset<const INITIAL_BLOCKS: u64>(
i: u64,
) -> u64 {
// If the position is below the initial blocks just return the position back
if i <= INITIAL_BLOCKS {
i
} else {
// Otherwise we go with power of 2 offsets, the same as monerod.
// So (INITIAL_BLOCKS + 2), (INITIAL_BLOCKS + 2 + 4), (INITIAL_BLOCKS + 2 + 4 + 8)
// ref: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/cryptonote_core/blockchain.cpp#L727>
INITIAL_BLOCKS + (2 << (i - INITIAL_BLOCKS)) - 2
}
}
/// Returns if the genesis block was _NOT_ included when calculating the height offsets.
///
/// The genesis must always be included in the compact history.
#[inline]
pub(super) const fn compact_history_genesis_not_included<const INITIAL_BLOCKS: u64>(
top_block_height: u64,
) -> bool {
// If the top block height is less than the initial blocks then it will always be included.
// Otherwise, we use the fact that to reach the genesis block this statement must be true (for a
// single `i`):
//
// `top_block_height - INITIAL_BLOCKS - 2^i + 2 == 0`
// which then means:
// `top_block_height - INITIAL_BLOCKS + 2 == 2^i`
// So if `top_block_height - INITIAL_BLOCKS + 2` is a power of 2 then the genesis block is in
// the compact history already.
top_block_height > INITIAL_BLOCKS && !(top_block_height - INITIAL_BLOCKS + 2).is_power_of_two()
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use super::*;
proptest! {
#[test]
fn compact_history(top_height in 0_u64..500_000_000) {
let mut heights = (0..)
.map(compact_history_index_to_height_offset::<11>)
.map_while(|i| top_height.checked_sub(i))
.collect::<Vec<_>>();
if compact_history_genesis_not_included::<11>(top_height) {
heights.push(0);
}
// Make sure the genesis and top block are always included.
assert_eq!(*heights.last().unwrap(), 0);
assert_eq!(*heights.first().unwrap(), top_height);
heights.windows(2).for_each(|window| assert_ne!(window[0], window[1]));
}
}
} }

View file

@ -14,7 +14,7 @@ use tokio::sync::{OwnedSemaphorePermit, Semaphore};
use tokio_util::sync::PollSemaphore; use tokio_util::sync::PollSemaphore;
use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError};
use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_helper::{asynch::InfallibleOneshotReceiver, map::combine_low_high_bits_to_u128};
use cuprate_types::{ use cuprate_types::{
blockchain::{BCReadRequest, BCResponse}, blockchain::{BCReadRequest, BCResponse},
ExtendedBlockHeader, OutputOnChain, ExtendedBlockHeader, OutputOnChain,
@ -23,17 +23,20 @@ use cuprate_types::{
use crate::{ use crate::{
config::ReaderThreads, config::ReaderThreads,
open_tables::OpenTables, open_tables::OpenTables,
ops::block::block_exists,
ops::{ ops::{
block::{get_block_extended_header_from_height, get_block_info}, block::{
block_exists, get_block_extended_header_from_height, get_block_height, get_block_info,
},
blockchain::{cumulative_generated_coins, top_block_height}, blockchain::{cumulative_generated_coins, top_block_height},
key_image::key_image_exists, key_image::key_image_exists,
output::id_to_output_on_chain, output::id_to_output_on_chain,
}, },
service::types::{ResponseReceiver, ResponseResult, ResponseSender}, service::{
free::{compact_history_genesis_not_included, compact_history_index_to_height_offset},
types::{ResponseReceiver, ResponseResult, ResponseSender},
},
tables::{BlockHeights, BlockInfos, Tables}, tables::{BlockHeights, BlockInfos, Tables},
types::BlockHash, types::{Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId},
types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId},
}; };
//---------------------------------------------------------------------------------------------------- DatabaseReadHandle //---------------------------------------------------------------------------------------------------- DatabaseReadHandle
@ -204,13 +207,15 @@ fn map_request(
let response = match request { let response = match request {
R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockExtendedHeader(block) => block_extended_header(env, block),
R::BlockHash(block) => block_hash(env, block), R::BlockHash(block) => block_hash(env, block),
R::FilterUnknownHashes(hashes) => filter_unknown_hahses(env, hashes), R::FilterUnknownHashes(hashes) => filter_unknown_hashes(env, hashes),
R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range), R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range),
R::ChainHeight => chain_height(env), R::ChainHeight => chain_height(env),
R::GeneratedCoins => generated_coins(env), R::GeneratedCoins => generated_coins(env),
R::Outputs(map) => outputs(env, map), R::Outputs(map) => outputs(env, map),
R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec),
R::KeyImagesSpent(set) => key_images_spent(env, set), R::KeyImagesSpent(set) => key_images_spent(env, set),
R::CompactChainHistory => compact_chain_history(env),
R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids),
}; };
if let Err(e) = response_sender.send(response) { if let Err(e) = response_sender.send(response) {
@ -320,7 +325,7 @@ fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
/// [`BCReadRequest::FilterUnknownHashes`]. /// [`BCReadRequest::FilterUnknownHashes`].
#[inline] #[inline]
fn filter_unknown_hahses(env: &ConcreteEnv, mut hashes: HashSet<BlockHash>) -> ResponseResult { fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet<BlockHash>) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required. // Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner(); let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?; let tx_ro = env_inner.tx_ro()?;
@ -525,3 +530,81 @@ fn key_images_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> Respons
Some(Err(e)) => Err(e), // A database error occurred. Some(Err(e)) => Err(e), // A database error occurred.
} }
} }
/// [`BCReadRequest::CompactChainHistory`]
fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult {
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
let top_block_height = top_block_height(&table_block_heights)?;
let top_block_info = get_block_info(&top_block_height, &table_block_infos)?;
let cumulative_difficulty = combine_low_high_bits_to_u128(
top_block_info.cumulative_difficulty_low,
top_block_info.cumulative_difficulty_high,
);
/// The amount of top block IDs in the compact chain.
const INITIAL_BLOCKS: u64 = 11;
// rayon is not used here because the amount of block IDs is expected to be small.
let mut block_ids = (0..)
.map(compact_history_index_to_height_offset::<INITIAL_BLOCKS>)
.map_while(|i| top_block_height.checked_sub(i))
.map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash))
.collect::<Result<Vec<_>, RuntimeError>>()?;
if compact_history_genesis_not_included::<INITIAL_BLOCKS>(top_block_height) {
block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash);
}
Ok(BCResponse::CompactChainHistory {
cumulative_difficulty,
block_ids,
})
}
/// [`BCReadRequest::FindFirstUnknown`]
///
/// # Invariant
/// `block_ids` must be sorted in chronological block order, or else
/// the returned result is unspecified and meaningless, as this function
/// performs a binary search.
fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseResult {
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
let mut err = None;
// Do a binary search to find the first unknown block in the batch.
let idx =
block_ids.partition_point(
|block_id| match block_exists(block_id, &table_block_heights) {
Ok(exists) => exists,
Err(e) => {
err.get_or_insert(e);
// if this happens the search is scrapped, just return `false` back.
false
}
},
);
if let Some(e) = err {
return Err(e);
}
Ok(if idx == block_ids.len() {
BCResponse::FindFirstUnknown(None)
} else if idx == 0 {
BCResponse::FindFirstUnknown(Some((0, 0)))
} else {
let last_known_height = get_block_height(&block_ids[idx - 1], &table_block_heights)?;
BCResponse::FindFirstUnknown(Some((idx, last_known_height + 1)))
})
}

View file

@ -83,10 +83,21 @@ pub enum BCReadRequest {
/// The input is a list of output amounts. /// The input is a list of output amounts.
NumberOutputsWithAmount(Vec<u64>), NumberOutputsWithAmount(Vec<u64>),
/// Check that all key images within a set arer not spent. /// Check that all key images within a set are not spent.
/// ///
/// Input is a set of key images. /// Input is a set of key images.
KeyImagesSpent(HashSet<[u8; 32]>), KeyImagesSpent(HashSet<[u8; 32]>),
/// A request for the compact chain history.
CompactChainHistory,
/// A request to find the first unknown block ID in a list of block IDs.
////
/// # Invariant
/// The [`Vec`] containing the block IDs must be sorted in chronological block
/// order, or else the returned response is unspecified and meaningless,
/// as this request performs a binary search.
FindFirstUnknown(Vec<[u8; 32]>),
} }
//---------------------------------------------------------------------------------------------------- WriteRequest //---------------------------------------------------------------------------------------------------- WriteRequest
@ -164,6 +175,23 @@ pub enum BCResponse {
/// The inner value is `false` if _none_ of the key images were spent. /// The inner value is `false` if _none_ of the key images were spent.
KeyImagesSpent(bool), KeyImagesSpent(bool),
/// Response to [`BCReadRequest::CompactChainHistory`].
CompactChainHistory {
/// A list of blocks IDs in our chain, starting with the most recent block, all the way to the genesis block.
///
/// These blocks should be in reverse chronological order, not every block is needed.
block_ids: Vec<[u8; 32]>,
/// The current cumulative difficulty of the chain.
cumulative_difficulty: u128,
},
/// The response for [`BCReadRequest::FindFirstUnknown`].
///
/// Contains the index of the first unknown block and its expected height.
///
/// This will be [`None`] if all blocks were known.
FindFirstUnknown(Option<(usize, u64)>),
//------------------------------------------------------ Writes //------------------------------------------------------ Writes
/// Response to [`BCWriteRequest::WriteBlock`]. /// Response to [`BCWriteRequest::WriteBlock`].
/// ///