Cuprated: Fix reorgs ()

* fix reorgs

* fix off by 1 and add a test

* commit file

* docs

* remove unneeded fn

* fix tests
This commit is contained in:
Boog900 2025-03-21 17:52:10 +00:00 committed by GitHub
parent c5cbe51300
commit b97bbab593
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 263 additions and 18 deletions
Cargo.lock
binaries/cuprated
consensus
p2p/p2p/src
storage/blockchain/src

1
Cargo.lock generated
View file

@ -1109,6 +1109,7 @@ dependencies = [
"serde",
"serde_bytes",
"serde_json",
"tempfile",
"thiserror",
"thread_local",
"tokio",

View file

@ -78,5 +78,8 @@ tracing-appender = { workspace = true }
tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] }
tracing = { workspace = true, features = ["default"] }
[dev-dependencies]
tempfile = { workspace = true }
[lints]
workspace = true

View file

@ -33,6 +33,9 @@ use crate::{
mod commands;
mod handler;
#[cfg(test)]
mod tests;
pub use commands::{BlockchainManagerCommand, IncomingBlockOk};
/// Initialize the blockchain manager.

View file

@ -396,7 +396,7 @@ impl super::BlockchainManager {
.await
.expect(PANIC_CRITICAL_SERVICE_ERROR)
.call(BlockchainWriteRequest::PopBlocks(
current_main_chain_height - split_height + 1,
current_main_chain_height - split_height,
))
.await
.expect(PANIC_CRITICAL_SERVICE_ERROR)
@ -409,7 +409,7 @@ impl super::BlockchainManager {
.await
.expect(PANIC_CRITICAL_SERVICE_ERROR)
.call(BlockChainContextRequest::PopBlocks {
numb_blocks: current_main_chain_height - split_height + 1,
numb_blocks: current_main_chain_height - split_height,
})
.await
.expect(PANIC_CRITICAL_SERVICE_ERROR);

View file

@ -0,0 +1,204 @@
use std::{collections::HashMap, env::temp_dir, path::PathBuf, sync::Arc};
use monero_serai::{
block::{Block, BlockHeader},
transaction::{Input, Output, Timelock, Transaction, TransactionPrefix},
};
use tokio::sync::{oneshot, watch};
use tower::BoxError;
use cuprate_consensus_context::{BlockchainContext, ContextConfig};
use cuprate_consensus_rules::{hard_forks::HFInfo, miner_tx::calculate_block_reward, HFsInfo};
use cuprate_helper::network::Network;
use cuprate_p2p::BroadcastSvc;
use crate::blockchain::{
check_add_genesis, manager::BlockchainManager, manager::BlockchainManagerCommand,
ConsensusBlockchainReadHandle,
};
async fn mock_manager(data_dir: PathBuf) -> BlockchainManager {
let blockchain_config = cuprate_blockchain::config::ConfigBuilder::new()
.data_directory(data_dir.clone())
.build();
let txpool_config = cuprate_txpool::config::ConfigBuilder::new()
.data_directory(data_dir)
.build();
let (mut blockchain_read_handle, mut blockchain_write_handle, _) =
cuprate_blockchain::service::init(blockchain_config).unwrap();
let (txpool_read_handle, txpool_write_handle, _) =
cuprate_txpool::service::init(txpool_config).unwrap();
check_add_genesis(
&mut blockchain_read_handle,
&mut blockchain_write_handle,
Network::Mainnet,
)
.await;
let mut context_config = ContextConfig::main_net();
context_config.difficulty_cfg.fixed_difficulty = Some(1);
context_config.hard_fork_cfg.info = HFsInfo::new([HFInfo::new(0, 0); 16]);
let blockchain_read_handle =
ConsensusBlockchainReadHandle::new(blockchain_read_handle, BoxError::from);
let blockchain_context_service = cuprate_consensus_context::initialize_blockchain_context(
context_config,
blockchain_read_handle.clone(),
)
.await
.unwrap();
BlockchainManager {
blockchain_write_handle,
blockchain_read_handle,
txpool_write_handle,
blockchain_context_service,
stop_current_block_downloader: Arc::new(Default::default()),
broadcast_svc: BroadcastSvc::mock(),
}
}
fn generate_block(context: &BlockchainContext) -> Block {
Block {
header: BlockHeader {
hardfork_version: 16,
hardfork_signal: 16,
timestamp: 1000,
previous: context.top_hash,
nonce: 0,
},
miner_transaction: Transaction::V2 {
prefix: TransactionPrefix {
additional_timelock: Timelock::Block(context.chain_height + 60),
inputs: vec![Input::Gen(context.chain_height)],
outputs: vec![Output {
// we can set the block weight to 1 as the true value won't get us into the penalty zone.
amount: Some(calculate_block_reward(
1,
context.median_weight_for_block_reward,
context.already_generated_coins,
context.current_hf,
)),
key: Default::default(),
view_tag: Some(1),
}],
extra: rand::random::<[u8; 32]>().to_vec(),
},
proofs: None,
},
transactions: vec![],
}
}
#[tokio::test]
async fn simple_reorg() {
// create 2 managers
let data_dir_1 = tempfile::tempdir().unwrap();
let mut manager_1 = mock_manager(data_dir_1.path().to_path_buf()).await;
let data_dir_2 = tempfile::tempdir().unwrap();
let mut manager_2 = mock_manager(data_dir_2.path().to_path_buf()).await;
// give both managers the same first non-genesis block
let block_1 = generate_block(manager_1.blockchain_context_service.blockchain_context());
manager_1
.handle_command(BlockchainManagerCommand::AddBlock {
block: block_1.clone(),
prepped_txs: HashMap::new(),
response_tx: oneshot::channel().0,
})
.await;
manager_2
.handle_command(BlockchainManagerCommand::AddBlock {
block: block_1,
prepped_txs: HashMap::new(),
response_tx: oneshot::channel().0,
})
.await;
assert_eq!(
manager_1.blockchain_context_service.blockchain_context(),
manager_2.blockchain_context_service.blockchain_context()
);
// give managers different 2nd block
let block_2a = generate_block(manager_1.blockchain_context_service.blockchain_context());
let block_2b = generate_block(manager_2.blockchain_context_service.blockchain_context());
manager_1
.handle_command(BlockchainManagerCommand::AddBlock {
block: block_2a,
prepped_txs: HashMap::new(),
response_tx: oneshot::channel().0,
})
.await;
manager_2
.handle_command(BlockchainManagerCommand::AddBlock {
block: block_2b.clone(),
prepped_txs: HashMap::new(),
response_tx: oneshot::channel().0,
})
.await;
let manager_1_context = manager_1
.blockchain_context_service
.blockchain_context()
.clone();
assert_ne!(
&manager_1_context,
manager_2.blockchain_context_service.blockchain_context()
);
// give manager 1 missing block
manager_1
.handle_command(BlockchainManagerCommand::AddBlock {
block: block_2b,
prepped_txs: HashMap::new(),
response_tx: oneshot::channel().0,
})
.await;
// make sure this didn't change the context
assert_eq!(
&manager_1_context,
manager_1.blockchain_context_service.blockchain_context()
);
// give both managers new block (built of manager 2's chain)
let block_3 = generate_block(manager_2.blockchain_context_service.blockchain_context());
manager_1
.handle_command(BlockchainManagerCommand::AddBlock {
block: block_3.clone(),
prepped_txs: HashMap::new(),
response_tx: oneshot::channel().0,
})
.await;
manager_2
.handle_command(BlockchainManagerCommand::AddBlock {
block: block_3,
prepped_txs: HashMap::new(),
response_tx: oneshot::channel().0,
})
.await;
// make sure manager 1 reorged.
assert_eq!(
manager_1.blockchain_context_service.blockchain_context(),
manager_2.blockchain_context_service.blockchain_context()
);
assert_eq!(
manager_1
.blockchain_context_service
.blockchain_context()
.chain_height,
4
);
}

View file

@ -52,9 +52,10 @@ impl AltChainContextCache {
block_weight: usize,
long_term_block_weight: usize,
timestamp: u64,
cumulative_difficulty: u128,
) {
if let Some(difficulty_cache) = &mut self.difficulty_cache {
difficulty_cache.new_block(height, timestamp, difficulty_cache.cumulative_difficulty());
difficulty_cache.new_block(height, timestamp, cumulative_difficulty);
}
if let Some(weight_cache) = &mut self.weight_cache {

View file

@ -36,17 +36,11 @@ pub struct DifficultyCacheConfig {
pub window: usize,
pub cut: usize,
pub lag: usize,
/// If [`Some`] the difficulty cache will always return this value as the current difficulty.
pub fixed_difficulty: Option<u128>,
}
impl DifficultyCacheConfig {
/// Create a new difficulty cache config.
///
/// # Notes
/// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead.
pub const fn new(window: usize, cut: usize, lag: usize) -> Self {
Self { window, cut, lag }
}
/// Returns the total amount of blocks we need to track to calculate difficulty
pub const fn total_block_count(&self) -> usize {
self.window + self.lag
@ -64,6 +58,7 @@ impl DifficultyCacheConfig {
window: DIFFICULTY_WINDOW,
cut: DIFFICULTY_CUT,
lag: DIFFICULTY_LAG,
fixed_difficulty: None,
}
}
}
@ -297,6 +292,10 @@ fn next_difficulty(
cumulative_difficulties: &VecDeque<u128>,
hf: HardFork,
) -> u128 {
if let Some(fixed_difficulty) = config.fixed_difficulty {
return fixed_difficulty;
}
if timestamps.len() <= 1 {
return 1;
}

View file

@ -173,6 +173,7 @@ where
block_info.weight,
block_info.long_term_weight,
block_info.block.header.timestamp,
cumulative_difficulty,
);
// Add this alt cache back to the context service.

View file

@ -17,8 +17,12 @@ const TEST_LAG: usize = 2;
const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG;
pub(crate) const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig =
DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG);
pub(crate) const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig = DifficultyCacheConfig {
window: TEST_WINDOW,
cut: TEST_CUT,
lag: TEST_LAG,
fixed_difficulty: None,
};
#[tokio::test]
async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> {

View file

@ -159,6 +159,13 @@ pub struct BroadcastSvc<N: NetworkZone> {
tx_broadcast_channel_inbound: broadcast::Sender<BroadcastTxInfo<N>>,
}
impl<N: NetworkZone> BroadcastSvc<N> {
/// Create a mock [`BroadcastSvc`] that does nothing, useful for testing.
pub fn mock() -> Self {
init_broadcast_channels(BroadcastConfig::default()).0
}
}
impl<N: NetworkZone> Service<BroadcastRequest<N>> for BroadcastSvc<N> {
type Response = ();
type Error = std::convert::Infallible;

View file

@ -4,8 +4,8 @@
use cuprate_database::{DatabaseRo, DbResult, RuntimeError};
use crate::{
ops::{block::block_exists, macros::doc_error},
tables::{BlockHeights, BlockInfos},
ops::{block, macros::doc_error},
tables::{AltBlockHeights, BlockHeights, BlockInfos},
types::{BlockHash, BlockHeight},
};
@ -91,13 +91,21 @@ pub fn cumulative_generated_coins(
pub fn find_split_point(
block_ids: &[BlockHash],
chronological_order: bool,
include_alt_blocks: bool,
table_block_heights: &impl DatabaseRo<BlockHeights>,
table_alt_block_heights: &impl DatabaseRo<AltBlockHeights>,
) -> Result<usize, RuntimeError> {
let mut err = None;
let block_exists = |block_id| {
block::block_exists(&block_id, table_block_heights).and_then(|exists| {
Ok(exists | (include_alt_blocks & table_alt_block_heights.contains(&block_id)?))
})
};
// Do a binary search to find the first unknown/known block in the batch.
let idx = block_ids.partition_point(|block_id| {
match block_exists(block_id, table_block_heights) {
match block_exists(*block_id) {
Ok(exists) => exists == chronological_order,
Err(e) => {
err.get_or_insert(e);

View file

@ -649,9 +649,16 @@ fn next_chain_entry(
let tables = env_inner.open_tables(&tx_ro)?;
let table_block_heights = tables.block_heights();
let table_alt_block_heights = tables.alt_block_heights();
let table_block_infos = tables.block_infos_iter();
let idx = find_split_point(block_ids, false, table_block_heights)?;
let idx = find_split_point(
block_ids,
false,
false,
table_block_heights,
table_alt_block_heights,
)?;
// This will happen if we have a different genesis block.
if idx == block_ids.len() {
@ -712,8 +719,15 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
let tx_ro = env_inner.tx_ro()?;
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
let table_alt_block_heights = env_inner.open_db_ro::<AltBlockHeights>(&tx_ro)?;
let idx = find_split_point(block_ids, true, &table_block_heights)?;
let idx = find_split_point(
block_ids,
true,
true,
&table_block_heights,
&table_alt_block_heights,
)?;
Ok(if idx == block_ids.len() {
BlockchainResponse::FindFirstUnknown(None)