diff --git a/binaries/cuprated/src/p2p/request_handler.rs b/binaries/cuprated/src/p2p/request_handler.rs index 0f39ecec..c93f0580 100644 --- a/binaries/cuprated/src/p2p/request_handler.rs +++ b/binaries/cuprated/src/p2p/request_handler.rs @@ -25,7 +25,9 @@ use cuprate_helper::{ cast::usize_to_u64, map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, }; -use cuprate_p2p::constants::{MAX_BLOCK_BATCH_LEN, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN}; +use cuprate_p2p::constants::{ + MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MAX_BLOCK_BATCH_LEN, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN, +}; use cuprate_p2p_core::client::InternalPeerID; use cuprate_p2p_core::{ client::PeerInformation, NetZoneAddress, NetworkZone, ProtocolRequest, ProtocolResponse, @@ -206,7 +208,7 @@ async fn get_chain( request: ChainRequest, mut blockchain_read_handle: BlockchainReadHandle, ) -> anyhow::Result { - if request.block_ids.len() > 25_000 { + if request.block_ids.len() > MAX_BLOCKS_IDS_IN_CHAIN_ENTRY { anyhow::bail!("Peer sent too many block hashes in chain request.") } @@ -387,6 +389,7 @@ where let context = context.unchecked_blockchain_context(); + // If we are more than 2 blocks behind the peer then ignore the txs - we are probably still syncing. if usize_to_u64(context.chain_height + 2) < peer_information .core_sync_data @@ -405,15 +408,18 @@ where } }; - drop(request.padding); + // Drop all the data except the stuff we still need. + let NewTransactions { + txs, + dandelionpp_fluff: _, + padding: _, + } = request; + let res = incoming_tx_handler .ready() .await .expect(PANIC_CRITICAL_SERVICE_ERROR) - .call(IncomingTxs { - txs: request.txs, - state, - }) + .call(IncomingTxs { txs, state }) .await; match res { diff --git a/p2p/p2p/src/constants.rs b/p2p/p2p/src/constants.rs index 59c2e1a3..a81557c2 100644 --- a/p2p/p2p/src/constants.rs +++ b/p2p/p2p/src/constants.rs @@ -67,7 +67,7 @@ pub const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000; /// /// ref: // TODO: link to the protocol book when this section is added. -pub(crate) const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000; +pub const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000; /// The amount of failures downloading a specific batch before we stop attempting to download it. pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5;