Merge branch 'main' into network-init

This commit is contained in:
Boog900 2024-05-22 01:59:47 +01:00
commit 7f5e44f7b1
No known key found for this signature in database
GPG key ID: 42AB1287CB0041C2
5 changed files with 64 additions and 175 deletions

View file

@ -34,7 +34,7 @@ use crate::constants::{
};
/// The configuration for the [`BroadcastSvc`].
#[derive(Debug, Clone)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct BroadcastConfig {
/// The average number of seconds between diffusion flushes for outbound connections.
pub diffusion_flush_average_seconds_outbound: Duration,
@ -58,7 +58,7 @@ impl Default for BroadcastConfig {
/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers.
/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers.
pub fn init_broadcast_channels<N: NetworkZone>(
config: &BroadcastConfig,
config: BroadcastConfig,
) -> (
BroadcastSvc<N>,
impl Fn(InternalPeerID<N::Addr>) -> BroadcastMessageStream<N> + Clone + Send + 'static,
@ -95,9 +95,9 @@ pub fn init_broadcast_channels<N: NetworkZone>(
// wrap the tx broadcast channels in a wrapper that impls Clone so the closures later on impl clone.
let tx_channel_outbound_receiver_wrapped =
CloneableBroadcastRecover(tx_broadcast_channel_outbound_receiver);
CloneableBroadcastReceiver(tx_broadcast_channel_outbound_receiver);
let tx_channel_inbound_receiver_wrapped =
CloneableBroadcastRecover(tx_broadcast_channel_inbound_receiver);
CloneableBroadcastReceiver(tx_broadcast_channel_inbound_receiver);
// Create the closures that will be used to start the broadcast streams that the connection task will hold to listen
// for messages to broadcast.
@ -173,8 +173,7 @@ impl<N: NetworkZone> Service<BroadcastRequest<N>> for BroadcastSvc<N> {
current_blockchain_height,
} => {
tracing::debug!(
"queuing block at chain height {} for broadcast",
current_blockchain_height
"queuing block at chain height {current_blockchain_height} for broadcast"
);
self.new_block_watch.send_replace(NewBlockInfo {
@ -192,6 +191,7 @@ impl<N: NetworkZone> Service<BroadcastRequest<N>> for BroadcastSvc<N> {
received_from,
};
// An error here means _all_ receivers were dropped which we assume will never happen.
let _ = match direction {
Some(ConnectionDirection::InBound) => {
self.tx_broadcast_channel_inbound.send(nex_tx_info)
@ -215,9 +215,9 @@ impl<N: NetworkZone> Service<BroadcastRequest<N>> for BroadcastSvc<N> {
///
/// The clone impl just calls [`Receiver::resubscribe`](broadcast::Receiver::resubscribe), which isn't _exactly_
/// a clone but is what we need for our use case.
struct CloneableBroadcastRecover<T: Clone>(broadcast::Receiver<T>);
struct CloneableBroadcastReceiver<T: Clone>(broadcast::Receiver<T>);
impl<T: Clone> Clone for CloneableBroadcastRecover<T> {
impl<T: Clone> Clone for CloneableBroadcastReceiver<T> {
fn clone(&self) -> Self {
Self(self.0.resubscribe())
}
@ -381,8 +381,7 @@ fn get_txs_to_broadcast<N: NetworkZone>(
}
TryRecvError::Lagged(lag) => {
tracing::debug!(
"{} transaction broadcast messages were missed, continuing.",
lag
"{lag} transaction broadcast messages were missed, continuing."
);
continue;
}
@ -413,7 +412,7 @@ mod tests {
#[tokio::test]
async fn tx_broadcast_direction_correct() {
let (mut brcst, outbound_mkr, inbound_mkr) =
init_broadcast_channels::<TestNetZone<true, true, true>>(&TEST_CONFIG);
init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
let mut inbound_stream = pin!(inbound_mkr(InternalPeerID::Unknown(1)));
@ -472,7 +471,7 @@ mod tests {
#[tokio::test]
async fn block_broadcast_sent_to_all() {
let (mut brcst, outbound_mkr, inbound_mkr) =
init_broadcast_channels::<TestNetZone<true, true, true>>(&TEST_CONFIG);
init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
let mut inbound_stream = pin!(inbound_mkr(InternalPeerID::Unknown(1)));
@ -498,7 +497,7 @@ mod tests {
#[tokio::test]
async fn tx_broadcast_skipped_for_received_from_peer() {
let (mut brcst, outbound_mkr, inbound_mkr) =
init_broadcast_channels::<TestNetZone<true, true, true>>(&TEST_CONFIG);
init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
let mut outbound_stream_from = pin!(outbound_mkr(InternalPeerID::Unknown(0)));

View file

@ -1,7 +1,7 @@
use std::time::Duration;
/// The timeout we set on handshakes.
pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(15);
pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(30);
/// The maximum amount of connections to make to seed nodes for when we need peers.
pub(crate) const MAX_SEED_CONNECTIONS: usize = 3;
@ -9,29 +9,33 @@ pub(crate) const MAX_SEED_CONNECTIONS: usize = 3;
/// The timeout for when we fail to find a peer to connect to.
pub(crate) const OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_secs(5);
/// The durations of a short ban.
pub(crate) const SHORT_BAN: Duration = Duration::from_secs(60 * 10);
/// The default amount of time between inbound diffusion flushes.
pub(crate) const DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND: Duration = Duration::from_secs(5);
/// The default amount of time between outbound diffusion flushes.
///
/// Shorter than inbound as we control these connections.
pub(crate) const DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND: Duration = Duration::from_millis(2500);
/// This size limit on [`NewTransactions`](monero_wire::protocol::NewTransactions) messages that we create.
pub(crate) const SOFT_TX_MESSAGE_SIZE_SIZE_LIMIT: usize = 10 * 1024 * 1024;
/// The amount of transactions in the broadcast queue. When this value is hit old transactions will be dropped from
/// The amount of transactions in the broadcast queue. When this value is hit, old transactions will be dropped from
/// the queue.
///
/// Because of internal implementation details this value is _always_ hit, i.e. transactions will not be dropped until
/// 50 more after it are added.
/// Because of internal implementation details this value is _always_ hit, i.e. a transaction will not be dropped until
/// 50 more transactions after it are added to the queue.
pub(crate) const MAX_TXS_IN_BROADCAST_CHANNEL: usize = 50;
/// The durations of a short ban.
pub(crate) const SHORT_BAN: Duration = Duration::from_secs(60 * 10);
#[cfg(test)]
mod tests {
use super::*;
/// The time to sleep after an inbound connection comes in.
///
/// This is a safety measure to prevent Cuprate from getting spammed with a load of inbound connections.
/// TODO: it might be a good idea to make this configurable.
pub(crate) const INBOUND_CONNECTION_COOL_DOWN: Duration = Duration::from_millis(750);
/// Outbound diffusion flushes should be shorter than
/// inbound ones as we control these connections.
#[test]
fn outbound_diffusion_flush_shorter_than_inbound() {
assert!(DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND < DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND);
}
}

View file

@ -1,126 +1,17 @@
//! Cuprate's P2P Crate.
//!
//! This crate contains a [`ClientPool`](client_pool::ClientPool) which holds connected peers on a single [`NetworkZone`](monero_p2p::NetworkZone).
//!
//! This crate also contains the different routing methods that control how messages should be sent, i.e. broadcast to all,
//! or send to a single peer.
//!
#![allow(dead_code)]
use std::sync::Arc;
use tokio::sync::{mpsc, watch};
use tower::buffer::Buffer;
use tracing::{instrument, Instrument, Span};
use monero_p2p::{CoreSyncSvc, NetworkZone, PeerRequestHandler};
mod broadcast;
mod client_pool;
pub mod client_pool;
pub mod config;
pub mod connection_maintainer;
mod constants;
mod inbound_server;
mod sync_states;
use crate::connection_maintainer::MakeConnectionRequest;
pub use config::P2PConfig;
use monero_p2p::client::Connector;
/// Initializes the P2P [`NetworkInterface`] for a specific [`NetworkZone`].
///
/// This function starts all the tasks to maintain connections/ accept connections/ make connections.
///
/// To use you must provide, a peer request handler, which is given to each connection and a core sync service
/// which keeps track of the sync state of our node.
#[instrument(level="debug", name="net", skip_all, fields(zone=N::NAME))]
pub async fn initialize_network<N, R, CS>(
peer_req_handler: R,
core_sync_svc: CS,
config: P2PConfig<N>,
) -> Result<NetworkInterface<N>, tower::BoxError>
where
N: NetworkZone,
R: PeerRequestHandler + Clone,
CS: CoreSyncSvc + Clone,
{
let address_book =
monero_address_book::init_address_book(config.address_book_config.clone()).await?;
let address_book = Buffer::new(
address_book,
config.max_inbound_connections + config.outbound_connections,
);
let (sync_states_svc, top_block_watch) = sync_states::PeerSyncSvc::new();
let sync_states_svc = Buffer::new(
sync_states_svc,
config.max_inbound_connections + config.outbound_connections,
);
// Use the default config. Changing the defaults effects tx fluff times, which could effect D++ so for now don't allow changing
// this.
let (broadcast_svc, outbound_mkr, inbound_mkr) =
broadcast::init_broadcast_channels(&broadcast::BroadcastConfig::default());
let mut basic_node_data = config.basic_node_data();
if !N::CHECK_NODE_ID {
// TODO: make sure this is the value monerod sets for anon networks.
basic_node_data.peer_id = 1;
}
let outbound_handshaker = monero_p2p::client::HandShaker::new(
address_book.clone(),
sync_states_svc.clone(),
core_sync_svc.clone(),
peer_req_handler.clone(),
outbound_mkr,
basic_node_data.clone(),
);
let inbound_handshaker = monero_p2p::client::HandShaker::new(
address_book.clone(),
sync_states_svc,
core_sync_svc.clone(),
peer_req_handler,
inbound_mkr,
basic_node_data,
);
let client_pool = client_pool::ClientPool::new();
let (make_connection_tx, make_connection_rx) = mpsc::channel(3);
let outbound_connector = Connector::new(outbound_handshaker);
let outbound_connection_maintainer = connection_maintainer::OutboundConnectionKeeper::new(
config.clone(),
client_pool.clone(),
make_connection_rx,
address_book.clone(),
outbound_connector,
);
tokio::spawn(
outbound_connection_maintainer
.run()
.instrument(Span::current()),
);
tokio::spawn(
inbound_server::inbound_server(client_pool.clone(), inbound_handshaker, config)
.instrument(Span::current()),
);
Ok(NetworkInterface {
pool: client_pool,
broadcast_svc,
top_block_watch,
make_connection_tx,
})
}
/// The interface to Monero's P2P network on a certain [`NetworkZone`].
pub struct NetworkInterface<N: NetworkZone> {
/// A pool of free connected peers.
pool: Arc<client_pool::ClientPool<N>>,
/// A [`Service`](tower::Service) that allows broadcasting to all connected peers.
broadcast_svc: broadcast::BroadcastSvc<N>,
/// A [`watch`] channel that contains the highest seen cumulative difficulty and other info
/// on that claimed chain.
top_block_watch: watch::Receiver<sync_states::NewSyncInfo>,
/// A channel to request extra connections.
make_connection_tx: mpsc::Sender<MakeConnectionRequest>,
}

View file

@ -1,7 +1,7 @@
//! # Sync States
//!
//! This module contains a [`PeerSyncSvc`] which keeps track of connected peers claimed chain states,
//! to allow checking if we are behind and getting a list of peers who claim they are ahead.
//! This module contains a [`PeerSyncSvc`], which keeps track of the claimed chain states of connected peers.
//! This allows checking if we are behind and getting a list of peers who claim they are ahead.
use std::{
cmp::Ordering,
collections::{BTreeMap, HashMap, HashSet},
@ -28,26 +28,24 @@ use crate::{client_pool::disconnect_monitor::PeerDisconnectFut, constants::SHORT
#[derive(Debug)]
pub struct NewSyncInfo {
/// The peers chain height.
pub chain_height: u64,
chain_height: u64,
/// The peers top block's hash.
pub top_hash: [u8; 32],
top_hash: [u8; 32],
/// The peers cumulative difficulty.
pub cumulative_difficulty: u128,
cumulative_difficulty: u128,
}
/// A service that keeps track of our peers blockchains.
///
/// This is the service that handles finding out if we need to sync and giving the peers that should
/// be synced from to the requester.
/// This is the service that handles:
/// 1. Finding out if we need to sync
/// 1. Giving the peers that should be synced _from_, to the requester
pub struct PeerSyncSvc<N: NetworkZone> {
/// A map of cumulative difficulties to peers.
cumulative_difficulties: BTreeMap<u128, HashSet<InternalPeerID<N::Addr>>>,
/// A map of peers to cumulative difficulties.
peers: HashMap<InternalPeerID<N::Addr>, (u128, PruningSeed)>,
/// A watch channel for *a* top synced peer info.
///
/// This is guaranteed to hold the sync info of a peer with the highest cumulative difficulty seen,
/// this makes no guarantees about which peer will be chosen in case of a tie.
new_height_watcher: watch::Sender<NewSyncInfo>,
/// The handle to the peer that has data in `new_height_watcher`.
last_peer_in_watcher_handle: Option<ConnectionHandle>,
@ -57,7 +55,7 @@ pub struct PeerSyncSvc<N: NetworkZone> {
impl<N: NetworkZone> PeerSyncSvc<N> {
/// Creates a new [`PeerSyncSvc`] with a [`Receiver`](watch::Receiver) that will be updated with
/// the highest seen sync data.
/// the highest seen sync data, this makes no guarantees about which peer will be chosen in case of a tie.
pub fn new() -> (Self, watch::Receiver<NewSyncInfo>) {
let (watch_tx, mut watch_rx) = watch::channel(NewSyncInfo {
chain_height: 0,
@ -85,26 +83,26 @@ impl<N: NetworkZone> PeerSyncSvc<N> {
tracing::trace!("Peer {peer_id} disconnected, removing from peers sync info service.");
let (peer_cum_diff, _) = self.peers.remove(&peer_id).unwrap();
let cum_dif_peers = self
let cum_diff_peers = self
.cumulative_difficulties
.get_mut(&peer_cum_diff)
.unwrap();
cum_dif_peers.remove(&peer_id);
if cum_dif_peers.is_empty() {
cum_diff_peers.remove(&peer_id);
if cum_diff_peers.is_empty() {
// If this was the last peer remove the whole entry for this cumulative difficulty.
self.cumulative_difficulties.remove(&peer_cum_diff);
}
}
}
/// Returns a list of peers that claim to have a higher cumulative difficulty than `current_cum_dif`.
/// Returns a list of peers that claim to have a higher cumulative difficulty than `current_cum_diff`.
fn peers_to_sync_from(
&self,
current_cum_dif: u128,
current_cum_diff: u128,
block_needed: Option<u64>,
) -> Vec<InternalPeerID<N::Addr>> {
self.cumulative_difficulties
.range((current_cum_dif + 1)..)
.range((current_cum_diff + 1)..)
.flat_map(|(_, peers)| peers)
.filter(|peer| {
if let Some(block_needed) = block_needed {
@ -137,8 +135,8 @@ impl<N: NetworkZone> PeerSyncSvc<N> {
let new_cumulative_difficulty = core_sync_data.cumulative_difficulty();
if let Some((old_cum_dif, _)) = self.peers.get_mut(&peer_id) {
match (*old_cum_dif).cmp(&new_cumulative_difficulty) {
if let Some((old_cum_diff, _)) = self.peers.get_mut(&peer_id) {
match (*old_cum_diff).cmp(&new_cumulative_difficulty) {
Ordering::Equal => {
// If the cumulative difficulty of the peers chain hasn't changed then no need to update anything.
return Ok(());
@ -156,14 +154,14 @@ impl<N: NetworkZone> PeerSyncSvc<N> {
}
// Remove the old cumulative difficulty entry for this peer
let old_cum_dif_peers = self.cumulative_difficulties.get_mut(old_cum_dif).unwrap();
old_cum_dif_peers.remove(&peer_id);
if old_cum_dif_peers.is_empty() {
let old_cum_diff_peers = self.cumulative_difficulties.get_mut(old_cum_diff).unwrap();
old_cum_diff_peers.remove(&peer_id);
if old_cum_diff_peers.is_empty() {
// If this was the last peer remove the whole entry for this cumulative difficulty.
self.cumulative_difficulties.remove(old_cum_dif);
self.cumulative_difficulties.remove(old_cum_diff);
}
// update the cumulative difficulty
*old_cum_dif = new_cumulative_difficulty;
*old_cum_diff = new_cumulative_difficulty;
} else {
// The peer is new so add it the list of peers.
self.peers.insert(
@ -195,7 +193,7 @@ impl<N: NetworkZone> PeerSyncSvc<N> {
.is_some_and(|handle| handle.is_closed())
{
tracing::debug!(
"Updating sync watcher channel with new highest seen cumulative difficulty."
"Updating sync watcher channel with new highest seen cumulative difficulty: {new_cumulative_difficulty}"
);
let _ = self.new_height_watcher.send(NewSyncInfo {
top_hash: core_sync_data.top_id,

View file

@ -126,7 +126,7 @@ impl PruningSeed {
}
}
/// Returns if a peer with this pruning seed should have a non-pruned version of a block.
/// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block.
pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool {
match self {
PruningSeed::NotPruned => true,
@ -311,15 +311,12 @@ impl DecompressedPruningSeed {
| ((self.stripe - 1) << PRUNING_SEED_STRIPE_SHIFT)
}
/// Returns if a peer with this pruning seed should have a non-pruned version of a block.
/// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block.
pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool {
let Some(block_stripe) =
get_block_pruning_stripe(height, blockchain_height, self.log_stripes)
else {
return true;
};
self.stripe == block_stripe
match get_block_pruning_stripe(height, blockchain_height, self.log_stripes) {
Some(block_stripe) => self.stripe == block_stripe,
None => true,
}
}
/// Gets the next unpruned block for a given `block_height` and `blockchain_height`