mirror of
https://github.com/hinto-janai/cuprate.git
synced 2025-03-26 17:18:53 +00:00
p2p changes (#38)
* start re-working p2p to work with change monero-wire * start re-working p2p to work with change monero-wire adds back some changes from #22 * change the peer module to use the new API + fix a couple bugs * remove peer set for now * add try_from/from conversion between `Message` and `Request`/`Response` * Allow specifying other parameters in levin-cuprate * add new `LevinCommand` enum and clean up monero-wire message de/encoding * fix issues with merge * start splitting up p2p crate into smaller crates. * add monerod action from serai to test network code * remove tracing in tests
This commit is contained in:
parent
343e979e82
commit
8557073c15
59 changed files with 5079 additions and 1902 deletions
62
.github/actions/monerod-regtest/action.yml
vendored
Normal file
62
.github/actions/monerod-regtest/action.yml
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
# MIT License
|
||||||
|
#
|
||||||
|
# Copyright (c) 2022-2023 Luke Parker
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# Initially taken from Serai Dex: https://github.com/serai-dex/serai/blob/b823413c9b7ae6747b9af99e18379cfc49f4271a/.github/actions/monero/action.yml.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
name: monero-regtest
|
||||||
|
description: Spawns a regtest Monero daemon
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: "Version to download and run"
|
||||||
|
required: false
|
||||||
|
default: v0.18.2.0
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Monero Daemon Cache
|
||||||
|
id: cache-monerod
|
||||||
|
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84
|
||||||
|
with:
|
||||||
|
path: monerod
|
||||||
|
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|
||||||
|
- name: Download the Monero Daemon
|
||||||
|
if: steps.cache-monerod.outputs.cache-hit != 'true'
|
||||||
|
# Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due
|
||||||
|
# to the contained folder not following the same naming scheme and
|
||||||
|
# requiring further expansion not worth doing right now
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
RUNNER_OS=${{ runner.os }}
|
||||||
|
RUNNER_ARCH=${{ runner.arch }}
|
||||||
|
|
||||||
|
RUNNER_OS=${RUNNER_OS,,}
|
||||||
|
RUNNER_ARCH=${RUNNER_ARCH,,}
|
||||||
|
|
||||||
|
RUNNER_OS=linux
|
||||||
|
RUNNER_ARCH=x64
|
||||||
|
|
||||||
|
FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2
|
||||||
|
wget https://downloads.getmonero.org/cli/$FILE
|
||||||
|
tar -xvf $FILE
|
||||||
|
|
||||||
|
mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod monerod
|
||||||
|
|
||||||
|
- name: Monero Regtest Daemon
|
||||||
|
shell: bash
|
||||||
|
run: ./monerod --regtest --fixed-difficulty=1 --detach --out-peers 0
|
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
|
@ -35,6 +35,9 @@ jobs:
|
||||||
path: target
|
path: target
|
||||||
key: ${{ matrix.os }}
|
key: ${{ matrix.os }}
|
||||||
|
|
||||||
|
- name: Spawn monerod
|
||||||
|
uses: ./.github/actions/monerod-regtest
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: sudo apt install -y libboost-dev
|
run: sudo apt install -y libboost-dev
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,8 @@ members = [
|
||||||
# "database",
|
# "database",
|
||||||
"net/levin",
|
"net/levin",
|
||||||
"net/monero-wire",
|
"net/monero-wire",
|
||||||
# "p2p",
|
"p2p/monero-peer",
|
||||||
# "p2p/sync-states"
|
"test-utils"
|
||||||
]
|
]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
|
|
|
@ -50,6 +50,7 @@ pub enum PruningError {
|
||||||
///
|
///
|
||||||
// Internally we use an Option<u32> to represent if a pruning seed is 0 (None)which means
|
// Internally we use an Option<u32> to represent if a pruning seed is 0 (None)which means
|
||||||
// no pruning will take place.
|
// no pruning will take place.
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
pub struct PruningSeed(Option<u32>);
|
pub struct PruningSeed(Option<u32>);
|
||||||
|
|
||||||
impl PruningSeed {
|
impl PruningSeed {
|
||||||
|
|
|
@ -10,11 +10,11 @@ use std::{
|
||||||
use curve25519_dalek::edwards::CompressedEdwardsY;
|
use curve25519_dalek::edwards::CompressedEdwardsY;
|
||||||
use futures::{
|
use futures::{
|
||||||
channel::{mpsc, oneshot},
|
channel::{mpsc, oneshot},
|
||||||
ready, FutureExt, SinkExt, StreamExt, TryStreamExt,
|
FutureExt, StreamExt,
|
||||||
};
|
};
|
||||||
use monero_serai::{
|
use monero_serai::{
|
||||||
block::Block,
|
block::Block,
|
||||||
rpc::{HttpRpc, Rpc, RpcError},
|
rpc::{HttpRpc, Rpc},
|
||||||
transaction::Transaction,
|
transaction::Transaction,
|
||||||
};
|
};
|
||||||
use monero_wire::common::{BlockCompleteEntry, TransactionBlobs};
|
use monero_wire::common::{BlockCompleteEntry, TransactionBlobs};
|
||||||
|
@ -216,7 +216,7 @@ impl RpcConnection {
|
||||||
|
|
||||||
let blocks: Response = monero_epee_bin_serde::from_bytes(res)?;
|
let blocks: Response = monero_epee_bin_serde::from_bytes(res)?;
|
||||||
|
|
||||||
Ok(rayon_spawn_async(|| {
|
rayon_spawn_async(|| {
|
||||||
blocks
|
blocks
|
||||||
.blocks
|
.blocks
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
|
@ -237,7 +237,7 @@ impl RpcConnection {
|
||||||
})
|
})
|
||||||
.collect::<Result<_, tower::BoxError>>()
|
.collect::<Result<_, tower::BoxError>>()
|
||||||
})
|
})
|
||||||
.await?)
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_outputs(
|
async fn get_outputs(
|
||||||
|
|
|
@ -22,36 +22,79 @@ use bytes::{Buf, BufMut, BytesMut};
|
||||||
use tokio_util::codec::{Decoder, Encoder};
|
use tokio_util::codec::{Decoder, Encoder};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Bucket, BucketBuilder, BucketError, BucketHead, LevinBody, MessageType,
|
Bucket, BucketBuilder, BucketError, BucketHead, LevinBody, LevinCommand, MessageType, Protocol,
|
||||||
LEVIN_DEFAULT_MAX_PACKET_SIZE,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The levin tokio-codec for decoding and encoding levin buckets
|
#[derive(Debug, Clone)]
|
||||||
#[derive(Default)]
|
pub enum LevinBucketState<C> {
|
||||||
pub enum LevinCodec {
|
|
||||||
/// Waiting for the peer to send a header.
|
/// Waiting for the peer to send a header.
|
||||||
#[default]
|
|
||||||
WaitingForHeader,
|
WaitingForHeader,
|
||||||
/// Waiting for a peer to send a body.
|
/// Waiting for a peer to send a body.
|
||||||
WaitingForBody(BucketHead),
|
WaitingForBody(BucketHead<C>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decoder for LevinCodec {
|
/// The levin tokio-codec for decoding and encoding raw levin buckets
|
||||||
type Item = Bucket;
|
///
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct LevinBucketCodec<C> {
|
||||||
|
state: LevinBucketState<C>,
|
||||||
|
protocol: Protocol,
|
||||||
|
handshake_message_seen: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Default for LevinBucketCodec<C> {
|
||||||
|
fn default() -> Self {
|
||||||
|
LevinBucketCodec {
|
||||||
|
state: LevinBucketState::WaitingForHeader,
|
||||||
|
protocol: Protocol::default(),
|
||||||
|
handshake_message_seen: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> LevinBucketCodec<C> {
|
||||||
|
pub fn new(protocol: Protocol) -> Self {
|
||||||
|
LevinBucketCodec {
|
||||||
|
state: LevinBucketState::WaitingForHeader,
|
||||||
|
protocol,
|
||||||
|
handshake_message_seen: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C: LevinCommand> Decoder for LevinBucketCodec<C> {
|
||||||
|
type Item = Bucket<C>;
|
||||||
type Error = BucketError;
|
type Error = BucketError;
|
||||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||||
loop {
|
loop {
|
||||||
match self {
|
match &self.state {
|
||||||
LevinCodec::WaitingForHeader => {
|
LevinBucketState::WaitingForHeader => {
|
||||||
if src.len() < BucketHead::SIZE {
|
if src.len() < BucketHead::<C>::SIZE {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
let head = BucketHead::from_bytes(src)?;
|
let head = BucketHead::<C>::from_bytes(src);
|
||||||
let _ = std::mem::replace(self, LevinCodec::WaitingForBody(head));
|
|
||||||
|
if head.size > self.protocol.max_packet_size
|
||||||
|
|| head.size > head.command.bucket_size_limit()
|
||||||
|
{
|
||||||
|
return Err(BucketError::BucketExceededMaxSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.handshake_message_seen {
|
||||||
|
if head.size > self.protocol.max_packet_size_before_handshake {
|
||||||
|
return Err(BucketError::BucketExceededMaxSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
if head.command.is_handshake() {
|
||||||
|
self.handshake_message_seen = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ =
|
||||||
|
std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head));
|
||||||
}
|
}
|
||||||
LevinCodec::WaitingForBody(head) => {
|
LevinBucketState::WaitingForBody(head) => {
|
||||||
// We size check header while decoding it.
|
|
||||||
let body_len = head
|
let body_len = head
|
||||||
.size
|
.size
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -61,8 +104,8 @@ impl Decoder for LevinCodec {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
let LevinCodec::WaitingForBody(header) =
|
let LevinBucketState::WaitingForBody(header) =
|
||||||
std::mem::replace(self, LevinCodec::WaitingForHeader)
|
std::mem::replace(&mut self.state, LevinBucketState::WaitingForHeader)
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
@ -77,10 +120,10 @@ impl Decoder for LevinCodec {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encoder<Bucket> for LevinCodec {
|
impl<C: LevinCommand> Encoder<Bucket<C>> for LevinBucketCodec<C> {
|
||||||
type Error = BucketError;
|
type Error = BucketError;
|
||||||
fn encode(&mut self, item: Bucket, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
fn encode(&mut self, item: Bucket<C>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||||
if dst.capacity() < BucketHead::SIZE + item.body.len() {
|
if dst.capacity() < BucketHead::<C>::SIZE + item.body.len() {
|
||||||
return Err(BucketError::IO(std::io::Error::new(
|
return Err(BucketError::IO(std::io::Error::new(
|
||||||
ErrorKind::OutOfMemory,
|
ErrorKind::OutOfMemory,
|
||||||
"Not enough capacity to write the bucket",
|
"Not enough capacity to write the bucket",
|
||||||
|
@ -92,19 +135,30 @@ impl Encoder<Bucket> for LevinCodec {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default, Debug, Clone)]
|
||||||
enum MessageState {
|
enum MessageState<C> {
|
||||||
#[default]
|
#[default]
|
||||||
WaitingForBucket,
|
WaitingForBucket,
|
||||||
WaitingForRestOfFragment(Vec<u8>, MessageType, u32),
|
WaitingForRestOfFragment(Vec<u8>, MessageType, C),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A tokio-codec for levin messages or in other words the decoded body
|
/// A tokio-codec for levin messages or in other words the decoded body
|
||||||
/// of a levin bucket.
|
/// of a levin bucket.
|
||||||
pub struct LevinMessageCodec<T> {
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct LevinMessageCodec<T: LevinBody> {
|
||||||
message_ty: PhantomData<T>,
|
message_ty: PhantomData<T>,
|
||||||
bucket_codec: LevinCodec,
|
bucket_codec: LevinBucketCodec<T::Command>,
|
||||||
state: MessageState,
|
state: MessageState<T::Command>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: LevinBody> Default for LevinMessageCodec<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
message_ty: Default::default(),
|
||||||
|
bucket_codec: Default::default(),
|
||||||
|
state: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
||||||
|
@ -118,23 +172,20 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
let end_fragment = bucket.header.flags.end_fragment;
|
let flags = &bucket.header.flags;
|
||||||
let start_fragment = bucket.header.flags.start_fragment;
|
|
||||||
let request = bucket.header.flags.request;
|
|
||||||
let response = bucket.header.flags.response;
|
|
||||||
|
|
||||||
if start_fragment && end_fragment {
|
if flags.is_start_fragment() && flags.is_end_fragment() {
|
||||||
// Dummy message
|
// Dummy message
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
if end_fragment {
|
if flags.is_end_fragment() {
|
||||||
return Err(BucketError::InvalidHeaderFlags(
|
return Err(BucketError::InvalidHeaderFlags(
|
||||||
"Flag end fragment received before a start fragment",
|
"Flag end fragment received before a start fragment",
|
||||||
));
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
if !request && !response {
|
if !flags.is_request() && !flags.is_response() {
|
||||||
return Err(BucketError::InvalidHeaderFlags(
|
return Err(BucketError::InvalidHeaderFlags(
|
||||||
"Request and response flags both not set",
|
"Request and response flags both not set",
|
||||||
));
|
));
|
||||||
|
@ -145,13 +196,13 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
||||||
bucket.header.have_to_return_data,
|
bucket.header.have_to_return_data,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if start_fragment {
|
if flags.is_start_fragment() {
|
||||||
let _ = std::mem::replace(
|
let _ = std::mem::replace(
|
||||||
&mut self.state,
|
&mut self.state,
|
||||||
MessageState::WaitingForRestOfFragment(
|
MessageState::WaitingForRestOfFragment(
|
||||||
bucket.body.to_vec(),
|
bucket.body.to_vec(),
|
||||||
message_type,
|
message_type,
|
||||||
bucket.header.protocol_version,
|
bucket.header.command,
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -169,17 +220,14 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
let end_fragment = bucket.header.flags.end_fragment;
|
let flags = &bucket.header.flags;
|
||||||
let start_fragment = bucket.header.flags.start_fragment;
|
|
||||||
let request = bucket.header.flags.request;
|
|
||||||
let response = bucket.header.flags.response;
|
|
||||||
|
|
||||||
if start_fragment && end_fragment {
|
if flags.is_start_fragment() && flags.is_end_fragment() {
|
||||||
// Dummy message
|
// Dummy message
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
if !request && !response {
|
if !flags.is_request() && !flags.is_response() {
|
||||||
return Err(BucketError::InvalidHeaderFlags(
|
return Err(BucketError::InvalidHeaderFlags(
|
||||||
"Request and response flags both not set",
|
"Request and response flags both not set",
|
||||||
));
|
));
|
||||||
|
@ -198,12 +246,12 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
||||||
|
|
||||||
if bucket.header.command != *command {
|
if bucket.header.command != *command {
|
||||||
return Err(BucketError::InvalidFragmentedMessage(
|
return Err(BucketError::InvalidFragmentedMessage(
|
||||||
"Command not consistent across message",
|
"Command not consistent across fragments",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytes.len() + bucket.body.len()
|
if bytes.len().saturating_add(bucket.body.len())
|
||||||
> LEVIN_DEFAULT_MAX_PACKET_SIZE.try_into().unwrap()
|
> command.bucket_size_limit().try_into().unwrap()
|
||||||
{
|
{
|
||||||
return Err(BucketError::InvalidFragmentedMessage(
|
return Err(BucketError::InvalidFragmentedMessage(
|
||||||
"Fragmented message exceeded maximum size",
|
"Fragmented message exceeded maximum size",
|
||||||
|
@ -212,7 +260,7 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
||||||
|
|
||||||
bytes.append(&mut bucket.body.to_vec());
|
bytes.append(&mut bucket.body.to_vec());
|
||||||
|
|
||||||
if end_fragment {
|
if flags.is_end_fragment() {
|
||||||
let MessageState::WaitingForRestOfFragment(bytes, ty, command) =
|
let MessageState::WaitingForRestOfFragment(bytes, ty, command) =
|
||||||
std::mem::replace(&mut self.state, MessageState::WaitingForBucket)
|
std::mem::replace(&mut self.state, MessageState::WaitingForBucket)
|
||||||
else {
|
else {
|
||||||
|
|
|
@ -13,13 +13,27 @@
|
||||||
// copies or substantial portions of the Software.
|
// copies or substantial portions of the Software.
|
||||||
//
|
//
|
||||||
|
|
||||||
|
// Rust Levin Library
|
||||||
|
// Written in 2023 by
|
||||||
|
// Cuprate Contributors
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
|
||||||
//! This module provides a struct BucketHead for the header of a levin protocol
|
//! This module provides a struct BucketHead for the header of a levin protocol
|
||||||
//! message.
|
//! message.
|
||||||
|
|
||||||
use crate::LEVIN_DEFAULT_MAX_PACKET_SIZE;
|
|
||||||
use bytes::{Buf, BufMut, BytesMut};
|
use bytes::{Buf, BufMut, BytesMut};
|
||||||
|
|
||||||
use super::{BucketError, LEVIN_SIGNATURE, PROTOCOL_VERSION};
|
use crate::LevinCommand;
|
||||||
|
|
||||||
const REQUEST: u32 = 0b0000_0001;
|
const REQUEST: u32 = 0b0000_0001;
|
||||||
const RESPONSE: u32 = 0b0000_0010;
|
const RESPONSE: u32 = 0b0000_0010;
|
||||||
|
@ -28,57 +42,41 @@ const END_FRAGMENT: u32 = 0b0000_1000;
|
||||||
|
|
||||||
/// Levin header flags
|
/// Levin header flags
|
||||||
#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)]
|
#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)]
|
||||||
pub struct Flags {
|
pub struct Flags(u32);
|
||||||
/// Q bit
|
|
||||||
pub request: bool,
|
impl Flags {
|
||||||
/// S bit
|
pub const REQUEST: Flags = Flags(REQUEST);
|
||||||
pub response: bool,
|
pub const RESPONSE: Flags = Flags(RESPONSE);
|
||||||
/// B bit
|
|
||||||
pub start_fragment: bool,
|
pub fn is_request(&self) -> bool {
|
||||||
/// E bit
|
self.0 & REQUEST != 0
|
||||||
pub end_fragment: bool,
|
}
|
||||||
|
pub fn is_response(&self) -> bool {
|
||||||
|
self.0 & RESPONSE != 0
|
||||||
|
}
|
||||||
|
pub fn is_start_fragment(&self) -> bool {
|
||||||
|
self.0 & START_FRAGMENT != 0
|
||||||
|
}
|
||||||
|
pub fn is_end_fragment(&self) -> bool {
|
||||||
|
self.0 & END_FRAGMENT != 0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<u32> for Flags {
|
impl From<u32> for Flags {
|
||||||
type Error = BucketError;
|
fn from(value: u32) -> Self {
|
||||||
fn try_from(value: u32) -> Result<Self, Self::Error> {
|
Flags(value)
|
||||||
let flags = Flags {
|
|
||||||
request: value & REQUEST > 0,
|
|
||||||
response: value & RESPONSE > 0,
|
|
||||||
start_fragment: value & START_FRAGMENT > 0,
|
|
||||||
end_fragment: value & END_FRAGMENT > 0,
|
|
||||||
};
|
|
||||||
if flags.request && flags.response {
|
|
||||||
return Err(BucketError::InvalidHeaderFlags(
|
|
||||||
"Request and Response bits set",
|
|
||||||
));
|
|
||||||
};
|
|
||||||
Ok(flags)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Flags> for u32 {
|
impl From<Flags> for u32 {
|
||||||
fn from(value: Flags) -> Self {
|
fn from(value: Flags) -> Self {
|
||||||
let mut ret = 0;
|
value.0
|
||||||
if value.request {
|
|
||||||
ret |= REQUEST;
|
|
||||||
};
|
|
||||||
if value.response {
|
|
||||||
ret |= RESPONSE;
|
|
||||||
};
|
|
||||||
if value.start_fragment {
|
|
||||||
ret |= START_FRAGMENT;
|
|
||||||
};
|
|
||||||
if value.end_fragment {
|
|
||||||
ret |= END_FRAGMENT;
|
|
||||||
};
|
|
||||||
ret
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The Header of a Bucket. This contains
|
/// The Header of a Bucket. This contains
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||||
pub struct BucketHead {
|
pub struct BucketHead<C> {
|
||||||
/// The network signature, should be `LEVIN_SIGNATURE` for Monero
|
/// The network signature, should be `LEVIN_SIGNATURE` for Monero
|
||||||
pub signature: u64,
|
pub signature: u64,
|
||||||
/// The size of the body
|
/// The size of the body
|
||||||
|
@ -87,7 +85,7 @@ pub struct BucketHead {
|
||||||
/// messages require responses but don't have this set (some notifications)
|
/// messages require responses but don't have this set (some notifications)
|
||||||
pub have_to_return_data: bool,
|
pub have_to_return_data: bool,
|
||||||
/// Command
|
/// Command
|
||||||
pub command: u32,
|
pub command: C,
|
||||||
/// Return Code - will be 0 for requests and >0 for ok responses otherwise will be
|
/// Return Code - will be 0 for requests and >0 for ok responses otherwise will be
|
||||||
/// a negative number corresponding to the error
|
/// a negative number corresponding to the error
|
||||||
pub return_code: i32,
|
pub return_code: i32,
|
||||||
|
@ -97,61 +95,36 @@ pub struct BucketHead {
|
||||||
pub protocol_version: u32,
|
pub protocol_version: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BucketHead {
|
impl<C: LevinCommand> BucketHead<C> {
|
||||||
/// The size of the header (in bytes)
|
/// The size of the header (in bytes)
|
||||||
pub const SIZE: usize = 33;
|
pub const SIZE: usize = 33;
|
||||||
|
|
||||||
/// Builds the header in a Monero specific way
|
|
||||||
pub fn build_monero(
|
|
||||||
payload_size: u64,
|
|
||||||
have_to_return_data: bool,
|
|
||||||
command: u32,
|
|
||||||
flags: Flags,
|
|
||||||
return_code: i32,
|
|
||||||
) -> BucketHead {
|
|
||||||
BucketHead {
|
|
||||||
signature: LEVIN_SIGNATURE,
|
|
||||||
size: payload_size,
|
|
||||||
have_to_return_data,
|
|
||||||
command,
|
|
||||||
return_code,
|
|
||||||
flags,
|
|
||||||
protocol_version: PROTOCOL_VERSION,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Builds the header from bytes, this function does not check any fields should
|
/// Builds the header from bytes, this function does not check any fields should
|
||||||
/// match the expected ones (signature, protocol_version)
|
/// match the expected ones.
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// This function will panic if there aren't enough bytes to fill the header.
|
/// This function will panic if there aren't enough bytes to fill the header.
|
||||||
/// Currently ['SIZE'](BucketHead::SIZE)
|
/// Currently ['SIZE'](BucketHead::SIZE)
|
||||||
pub fn from_bytes(buf: &mut BytesMut) -> Result<BucketHead, BucketError> {
|
pub fn from_bytes(buf: &mut BytesMut) -> BucketHead<C> {
|
||||||
let header = BucketHead {
|
BucketHead {
|
||||||
signature: buf.get_u64_le(),
|
signature: buf.get_u64_le(),
|
||||||
size: buf.get_u64_le(),
|
size: buf.get_u64_le(),
|
||||||
have_to_return_data: buf.get_u8() != 0,
|
have_to_return_data: buf.get_u8() != 0,
|
||||||
command: buf.get_u32_le(),
|
command: buf.get_u32_le().into(),
|
||||||
return_code: buf.get_i32_le(),
|
return_code: buf.get_i32_le(),
|
||||||
flags: Flags::try_from(buf.get_u32_le())?,
|
flags: Flags::from(buf.get_u32_le()),
|
||||||
protocol_version: buf.get_u32_le(),
|
protocol_version: buf.get_u32_le(),
|
||||||
};
|
|
||||||
|
|
||||||
if header.size > LEVIN_DEFAULT_MAX_PACKET_SIZE {
|
|
||||||
return Err(BucketError::BucketExceededMaxSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(header)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serializes the header
|
/// Serializes the header
|
||||||
pub fn write_bytes(&self, dst: &mut BytesMut) {
|
pub fn write_bytes(&self, dst: &mut BytesMut) {
|
||||||
dst.reserve(BucketHead::SIZE);
|
dst.reserve(Self::SIZE);
|
||||||
|
|
||||||
dst.put_u64_le(self.signature);
|
dst.put_u64_le(self.signature);
|
||||||
dst.put_u64_le(self.size);
|
dst.put_u64_le(self.size);
|
||||||
dst.put_u8(if self.have_to_return_data { 1 } else { 0 });
|
dst.put_u8(if self.have_to_return_data { 1 } else { 0 });
|
||||||
dst.put_u32_le(self.command);
|
dst.put_u32_le(self.command.clone().into());
|
||||||
dst.put_i32_le(self.return_code);
|
dst.put_i32_le(self.return_code);
|
||||||
dst.put_u32_le(self.flags.into());
|
dst.put_u32_le(self.flags.into());
|
||||||
dst.put_u32_le(self.protocol_version);
|
dst.put_u32_le(self.protocol_version);
|
||||||
|
|
|
@ -36,16 +36,17 @@
|
||||||
pub mod codec;
|
pub mod codec;
|
||||||
pub mod header;
|
pub mod header;
|
||||||
|
|
||||||
pub use codec::LevinCodec;
|
pub use codec::*;
|
||||||
pub use header::BucketHead;
|
pub use header::BucketHead;
|
||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
const PROTOCOL_VERSION: u32 = 1;
|
const MONERO_PROTOCOL_VERSION: u32 = 1;
|
||||||
const LEVIN_SIGNATURE: u64 = 0x0101010101012101;
|
const MONERO_LEVIN_SIGNATURE: u64 = 0x0101010101012101;
|
||||||
const LEVIN_DEFAULT_MAX_PACKET_SIZE: u64 = 100_000_000; // 100MB
|
const MONERO_MAX_PACKET_SIZE_BEFORE_HANDSHAKE: u64 = 256 * 1000; // 256 KiB
|
||||||
|
const MONERO_MAX_PACKET_SIZE: u64 = 100_000_000; // 100MB
|
||||||
|
|
||||||
/// Possible Errors when working with levin buckets
|
/// Possible Errors when working with levin buckets
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
|
@ -59,28 +60,53 @@ pub enum BucketError {
|
||||||
/// Invalid Fragmented Message
|
/// Invalid Fragmented Message
|
||||||
#[error("Levin fragmented message was invalid: {0}")]
|
#[error("Levin fragmented message was invalid: {0}")]
|
||||||
InvalidFragmentedMessage(&'static str),
|
InvalidFragmentedMessage(&'static str),
|
||||||
|
/// The Header did not have the correct signature
|
||||||
|
#[error("Levin header had incorrect signature")]
|
||||||
|
InvalidHeaderSignature,
|
||||||
/// Error decoding the body
|
/// Error decoding the body
|
||||||
#[error("Error decoding bucket body: {0}")]
|
#[error("Error decoding bucket body")]
|
||||||
BodyDecodingError(Box<dyn std::error::Error>),
|
BodyDecodingError(Box<dyn std::error::Error + Send + Sync>),
|
||||||
/// The levin command is unknown
|
/// Unknown command ID
|
||||||
#[error("The levin command is unknown")]
|
#[error("Unknown command ID")]
|
||||||
UnknownCommand,
|
UnknownCommand,
|
||||||
/// I/O error
|
/// I/O error
|
||||||
#[error("I/O error: {0}")]
|
#[error("I/O error: {0}")]
|
||||||
IO(#[from] std::io::Error),
|
IO(#[from] std::io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Levin protocol settings, allows setting custom parameters.
|
||||||
|
///
|
||||||
|
/// For Monero use [`Protocol::default()`]
|
||||||
|
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||||
|
pub struct Protocol {
|
||||||
|
pub version: u32,
|
||||||
|
pub signature: u64,
|
||||||
|
pub max_packet_size_before_handshake: u64,
|
||||||
|
pub max_packet_size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Protocol {
|
||||||
|
fn default() -> Self {
|
||||||
|
Protocol {
|
||||||
|
version: MONERO_PROTOCOL_VERSION,
|
||||||
|
signature: MONERO_LEVIN_SIGNATURE,
|
||||||
|
max_packet_size_before_handshake: MONERO_MAX_PACKET_SIZE_BEFORE_HANDSHAKE,
|
||||||
|
max_packet_size: MONERO_MAX_PACKET_SIZE,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A levin Bucket
|
/// A levin Bucket
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Bucket {
|
pub struct Bucket<C> {
|
||||||
/// The bucket header
|
/// The bucket header
|
||||||
pub header: BucketHead,
|
pub header: BucketHead<C>,
|
||||||
/// The bucket body
|
/// The bucket body
|
||||||
pub body: Vec<u8>,
|
pub body: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An enum representing if the message is a request, response or notification.
|
/// An enum representing if the message is a request, response or notification.
|
||||||
#[derive(Debug, Eq, PartialEq)]
|
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
|
||||||
pub enum MessageType {
|
pub enum MessageType {
|
||||||
/// Request
|
/// Request
|
||||||
Request,
|
Request,
|
||||||
|
@ -104,11 +130,11 @@ impl MessageType {
|
||||||
flags: header::Flags,
|
flags: header::Flags,
|
||||||
have_to_return: bool,
|
have_to_return: bool,
|
||||||
) -> Result<Self, BucketError> {
|
) -> Result<Self, BucketError> {
|
||||||
if flags.request && have_to_return {
|
if flags.is_request() && have_to_return {
|
||||||
Ok(MessageType::Request)
|
Ok(MessageType::Request)
|
||||||
} else if flags.request {
|
} else if flags.is_request() {
|
||||||
Ok(MessageType::Notification)
|
Ok(MessageType::Notification)
|
||||||
} else if flags.response && !have_to_return {
|
} else if flags.is_response() && !have_to_return {
|
||||||
Ok(MessageType::Response)
|
Ok(MessageType::Response)
|
||||||
} else {
|
} else {
|
||||||
Err(BucketError::InvalidHeaderFlags(
|
Err(BucketError::InvalidHeaderFlags(
|
||||||
|
@ -119,42 +145,36 @@ impl MessageType {
|
||||||
|
|
||||||
pub fn as_flags(&self) -> header::Flags {
|
pub fn as_flags(&self) -> header::Flags {
|
||||||
match self {
|
match self {
|
||||||
MessageType::Request | MessageType::Notification => header::Flags {
|
MessageType::Request | MessageType::Notification => header::Flags::REQUEST,
|
||||||
request: true,
|
MessageType::Response => header::Flags::RESPONSE,
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
MessageType::Response => header::Flags {
|
|
||||||
response: true,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct BucketBuilder {
|
pub struct BucketBuilder<C> {
|
||||||
signature: Option<u64>,
|
signature: Option<u64>,
|
||||||
ty: Option<MessageType>,
|
ty: Option<MessageType>,
|
||||||
command: Option<u32>,
|
command: Option<C>,
|
||||||
return_code: Option<i32>,
|
return_code: Option<i32>,
|
||||||
protocol_version: Option<u32>,
|
protocol_version: Option<u32>,
|
||||||
body: Option<Vec<u8>>,
|
body: Option<Vec<u8>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BucketBuilder {
|
impl<C> Default for BucketBuilder<C> {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
signature: Some(LEVIN_SIGNATURE),
|
signature: Some(MONERO_LEVIN_SIGNATURE),
|
||||||
ty: None,
|
ty: None,
|
||||||
command: None,
|
command: None,
|
||||||
return_code: None,
|
return_code: None,
|
||||||
protocol_version: Some(PROTOCOL_VERSION),
|
protocol_version: Some(MONERO_PROTOCOL_VERSION),
|
||||||
body: None,
|
body: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BucketBuilder {
|
impl<C: LevinCommand> BucketBuilder<C> {
|
||||||
pub fn set_signature(&mut self, sig: u64) {
|
pub fn set_signature(&mut self, sig: u64) {
|
||||||
self.signature = Some(sig)
|
self.signature = Some(sig)
|
||||||
}
|
}
|
||||||
|
@ -163,7 +183,7 @@ impl BucketBuilder {
|
||||||
self.ty = Some(ty)
|
self.ty = Some(ty)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_command(&mut self, command: u32) {
|
pub fn set_command(&mut self, command: C) {
|
||||||
self.command = Some(command)
|
self.command = Some(command)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,7 +199,7 @@ impl BucketBuilder {
|
||||||
self.body = Some(body)
|
self.body = Some(body)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn finish(self) -> Bucket {
|
pub fn finish(self) -> Bucket<C> {
|
||||||
let body = self.body.unwrap();
|
let body = self.body.unwrap();
|
||||||
let ty = self.ty.unwrap();
|
let ty = self.ty.unwrap();
|
||||||
Bucket {
|
Bucket {
|
||||||
|
@ -199,9 +219,28 @@ impl BucketBuilder {
|
||||||
|
|
||||||
/// A levin body
|
/// A levin body
|
||||||
pub trait LevinBody: Sized {
|
pub trait LevinBody: Sized {
|
||||||
|
type Command: LevinCommand;
|
||||||
|
|
||||||
/// Decodes the message from the data in the header
|
/// Decodes the message from the data in the header
|
||||||
fn decode_message(body: &[u8], typ: MessageType, command: u32) -> Result<Self, BucketError>;
|
fn decode_message(
|
||||||
|
body: &[u8],
|
||||||
|
typ: MessageType,
|
||||||
|
command: Self::Command,
|
||||||
|
) -> Result<Self, BucketError>;
|
||||||
|
|
||||||
/// Encodes the message
|
/// Encodes the message
|
||||||
fn encode(&self, builder: &mut BucketBuilder) -> Result<(), BucketError>;
|
fn encode(&self, builder: &mut BucketBuilder<Self::Command>) -> Result<(), BucketError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The levin commands.
|
||||||
|
///
|
||||||
|
/// Implementers should account for all possible u32 values, this means
|
||||||
|
/// you will probably need some sort of `Unknown` variant.
|
||||||
|
pub trait LevinCommand: From<u32> + Into<u32> + PartialEq + Clone {
|
||||||
|
/// Returns the size limit for this command.
|
||||||
|
///
|
||||||
|
/// must be less than [`usize::MAX`]
|
||||||
|
fn bucket_size_limit(&self) -> u64;
|
||||||
|
/// Returns if this is a handshake
|
||||||
|
fn is_handshake(&self) -> bool;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,18 +22,12 @@
|
||||||
//!
|
//!
|
||||||
//! This project is licensed under the MIT License.
|
//! This project is licensed under the MIT License.
|
||||||
|
|
||||||
// Coding conventions
|
|
||||||
#![forbid(unsafe_code)]
|
|
||||||
#![deny(non_upper_case_globals)]
|
|
||||||
#![deny(non_camel_case_types)]
|
|
||||||
#![deny(unused_mut)]
|
|
||||||
//#![deny(missing_docs)]
|
|
||||||
|
|
||||||
pub mod network_address;
|
pub mod network_address;
|
||||||
pub mod p2p;
|
pub mod p2p;
|
||||||
mod serde_helpers;
|
mod serde_helpers;
|
||||||
|
|
||||||
pub use network_address::NetworkAddress;
|
pub use levin_cuprate::BucketError;
|
||||||
|
pub use network_address::{NetZone, NetworkAddress};
|
||||||
pub use p2p::*;
|
pub use p2p::*;
|
||||||
|
|
||||||
pub type MoneroWireCodec = levin_cuprate::codec::LevinMessageCodec<Message>;
|
pub type MoneroWireCodec = levin_cuprate::codec::LevinMessageCodec<Message>;
|
||||||
|
|
|
@ -17,8 +17,7 @@
|
||||||
//! Monero network. Core Monero has 4 main addresses: IPv4, IPv6, Tor,
|
//! Monero network. Core Monero has 4 main addresses: IPv4, IPv6, Tor,
|
||||||
//! I2p. Currently this module only has IPv(4/6).
|
//! I2p. Currently this module only has IPv(4/6).
|
||||||
//!
|
//!
|
||||||
use std::net::{SocketAddrV4, SocketAddrV6};
|
use std::{hash::Hash, net, net::SocketAddr};
|
||||||
use std::{hash::Hash, net};
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -38,16 +37,13 @@ pub enum NetZone {
|
||||||
#[serde(try_from = "TaggedNetworkAddress")]
|
#[serde(try_from = "TaggedNetworkAddress")]
|
||||||
#[serde(into = "TaggedNetworkAddress")]
|
#[serde(into = "TaggedNetworkAddress")]
|
||||||
pub enum NetworkAddress {
|
pub enum NetworkAddress {
|
||||||
/// IPv4
|
Clear(SocketAddr),
|
||||||
IPv4(SocketAddrV4),
|
|
||||||
/// IPv6
|
|
||||||
IPv6(SocketAddrV6),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkAddress {
|
impl NetworkAddress {
|
||||||
pub fn get_zone(&self) -> NetZone {
|
pub fn get_zone(&self) -> NetZone {
|
||||||
match self {
|
match self {
|
||||||
NetworkAddress::IPv4(_) | NetworkAddress::IPv6(_) => NetZone::Public,
|
NetworkAddress::Clear(_) => NetZone::Public,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,29 +59,42 @@ impl NetworkAddress {
|
||||||
|
|
||||||
pub fn port(&self) -> u16 {
|
pub fn port(&self) -> u16 {
|
||||||
match self {
|
match self {
|
||||||
NetworkAddress::IPv4(ip) => ip.port(),
|
NetworkAddress::Clear(ip) => ip.port(),
|
||||||
NetworkAddress::IPv6(ip) => ip.port(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<net::SocketAddrV4> for NetworkAddress {
|
impl From<net::SocketAddrV4> for NetworkAddress {
|
||||||
fn from(value: net::SocketAddrV4) -> Self {
|
fn from(value: net::SocketAddrV4) -> Self {
|
||||||
NetworkAddress::IPv4(value)
|
NetworkAddress::Clear(value.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<net::SocketAddrV6> for NetworkAddress {
|
impl From<net::SocketAddrV6> for NetworkAddress {
|
||||||
fn from(value: net::SocketAddrV6) -> Self {
|
fn from(value: net::SocketAddrV6) -> Self {
|
||||||
NetworkAddress::IPv6(value)
|
NetworkAddress::Clear(value.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<net::SocketAddr> for NetworkAddress {
|
impl From<SocketAddr> for NetworkAddress {
|
||||||
fn from(value: net::SocketAddr) -> Self {
|
fn from(value: SocketAddr) -> Self {
|
||||||
match value {
|
match value {
|
||||||
net::SocketAddr::V4(v4) => v4.into(),
|
SocketAddr::V4(v4) => v4.into(),
|
||||||
net::SocketAddr::V6(v6) => v6.into(),
|
SocketAddr::V6(v6) => v6.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone, Eq, PartialEq, thiserror::Error)]
|
||||||
|
#[error("Network address is not in the correct zone")]
|
||||||
|
pub struct NetworkAddressIncorrectZone;
|
||||||
|
|
||||||
|
impl TryFrom<NetworkAddress> for SocketAddr {
|
||||||
|
type Error = NetworkAddressIncorrectZone;
|
||||||
|
fn try_from(value: NetworkAddress) -> Result<Self, Self::Error> {
|
||||||
|
match value {
|
||||||
|
NetworkAddress::Clear(addr) => Ok(addr),
|
||||||
|
//_ => Err(NetworkAddressIncorrectZone)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6};
|
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
@ -30,20 +30,22 @@ impl TryFrom<TaggedNetworkAddress> for NetworkAddress {
|
||||||
impl From<NetworkAddress> for TaggedNetworkAddress {
|
impl From<NetworkAddress> for TaggedNetworkAddress {
|
||||||
fn from(value: NetworkAddress) -> Self {
|
fn from(value: NetworkAddress) -> Self {
|
||||||
match value {
|
match value {
|
||||||
NetworkAddress::IPv4(addr) => TaggedNetworkAddress {
|
NetworkAddress::Clear(addr) => match addr {
|
||||||
ty: 1,
|
SocketAddr::V4(addr) => TaggedNetworkAddress {
|
||||||
addr: AllFieldsNetworkAddress {
|
ty: 1,
|
||||||
m_ip: Some(u32::from_be_bytes(addr.ip().octets())),
|
addr: AllFieldsNetworkAddress {
|
||||||
m_port: Some(addr.port()),
|
m_ip: Some(u32::from_be_bytes(addr.ip().octets())),
|
||||||
..Default::default()
|
m_port: Some(addr.port()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
SocketAddr::V6(addr) => TaggedNetworkAddress {
|
||||||
NetworkAddress::IPv6(addr) => TaggedNetworkAddress {
|
ty: 2,
|
||||||
ty: 2,
|
addr: AllFieldsNetworkAddress {
|
||||||
addr: AllFieldsNetworkAddress {
|
addr: Some(addr.ip().octets()),
|
||||||
addr: Some(addr.ip().octets()),
|
m_port: Some(addr.port()),
|
||||||
m_port: Some(addr.port()),
|
..Default::default()
|
||||||
..Default::default()
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -63,8 +65,8 @@ struct AllFieldsNetworkAddress {
|
||||||
impl AllFieldsNetworkAddress {
|
impl AllFieldsNetworkAddress {
|
||||||
fn try_into_network_address(self, ty: u8) -> Option<NetworkAddress> {
|
fn try_into_network_address(self, ty: u8) -> Option<NetworkAddress> {
|
||||||
Some(match ty {
|
Some(match ty {
|
||||||
1 => NetworkAddress::IPv4(SocketAddrV4::new(Ipv4Addr::from(self.m_ip?), self.m_port?)),
|
1 => NetworkAddress::from(SocketAddrV4::new(Ipv4Addr::from(self.m_ip?), self.m_port?)),
|
||||||
2 => NetworkAddress::IPv6(SocketAddrV6::new(
|
2 => NetworkAddress::from(SocketAddrV6::new(
|
||||||
Ipv6Addr::from(self.addr?),
|
Ipv6Addr::from(self.addr?),
|
||||||
self.m_port?,
|
self.m_port?,
|
||||||
0,
|
0,
|
||||||
|
|
|
@ -16,7 +16,10 @@
|
||||||
//! This module defines a Monero `Message` enum which contains
|
//! This module defines a Monero `Message` enum which contains
|
||||||
//! every possible Monero network message (levin body)
|
//! every possible Monero network message (levin body)
|
||||||
|
|
||||||
use levin_cuprate::{BucketBuilder, BucketError, LevinBody, MessageType};
|
use levin_cuprate::{
|
||||||
|
BucketBuilder, BucketError, LevinBody, LevinCommand as LevinCommandTrait, MessageType,
|
||||||
|
};
|
||||||
|
use std::fmt::Formatter;
|
||||||
|
|
||||||
pub mod admin;
|
pub mod admin;
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
@ -26,6 +29,127 @@ use admin::*;
|
||||||
pub use common::{BasicNodeData, CoreSyncData, PeerListEntryBase};
|
pub use common::{BasicNodeData, CoreSyncData, PeerListEntryBase};
|
||||||
use protocol::*;
|
use protocol::*;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||||
|
pub enum LevinCommand {
|
||||||
|
Handshake,
|
||||||
|
TimedSync,
|
||||||
|
Ping,
|
||||||
|
SupportFlags,
|
||||||
|
|
||||||
|
NewBlock,
|
||||||
|
NewTransactions,
|
||||||
|
GetObjectsRequest,
|
||||||
|
GetObjectsResponse,
|
||||||
|
ChainRequest,
|
||||||
|
ChainResponse,
|
||||||
|
NewFluffyBlock,
|
||||||
|
FluffyMissingTxsRequest,
|
||||||
|
GetTxPoolCompliment,
|
||||||
|
|
||||||
|
Unknown(u32),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for LevinCommand {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||||
|
if let LevinCommand::Unknown(id) = self {
|
||||||
|
return f.write_str(&format!("unknown id: {}", id));
|
||||||
|
}
|
||||||
|
|
||||||
|
f.write_str(match self {
|
||||||
|
LevinCommand::Handshake => "handshake",
|
||||||
|
LevinCommand::TimedSync => "timed sync",
|
||||||
|
LevinCommand::Ping => "ping",
|
||||||
|
LevinCommand::SupportFlags => "support flags",
|
||||||
|
|
||||||
|
LevinCommand::NewBlock => "new block",
|
||||||
|
LevinCommand::NewTransactions => "new transactions",
|
||||||
|
LevinCommand::GetObjectsRequest => "get objects request",
|
||||||
|
LevinCommand::GetObjectsResponse => "get objects response",
|
||||||
|
LevinCommand::ChainRequest => "chain request",
|
||||||
|
LevinCommand::ChainResponse => "chain response",
|
||||||
|
LevinCommand::NewFluffyBlock => "new fluffy block",
|
||||||
|
LevinCommand::FluffyMissingTxsRequest => "fluffy missing transaction request",
|
||||||
|
LevinCommand::GetTxPoolCompliment => "get transaction pool compliment",
|
||||||
|
|
||||||
|
LevinCommand::Unknown(_) => unreachable!(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LevinCommandTrait for LevinCommand {
|
||||||
|
fn bucket_size_limit(&self) -> u64 {
|
||||||
|
// https://github.com/monero-project/monero/blob/00fd416a99686f0956361d1cd0337fe56e58d4a7/src/cryptonote_basic/connection_context.cpp#L37
|
||||||
|
match self {
|
||||||
|
LevinCommand::Handshake => 65536,
|
||||||
|
LevinCommand::TimedSync => 65536,
|
||||||
|
LevinCommand::Ping => 4096,
|
||||||
|
LevinCommand::SupportFlags => 4096,
|
||||||
|
|
||||||
|
LevinCommand::NewBlock => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
||||||
|
LevinCommand::NewTransactions => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
||||||
|
LevinCommand::GetObjectsRequest => 1024 * 1024 * 2, // 2 MB
|
||||||
|
LevinCommand::GetObjectsResponse => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
||||||
|
LevinCommand::ChainRequest => 512 * 1024, // 512 kB
|
||||||
|
LevinCommand::ChainResponse => 1024 * 1024 * 4, // 4 MB
|
||||||
|
LevinCommand::NewFluffyBlock => 1024 * 1024 * 4, // 4 MB
|
||||||
|
LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB
|
||||||
|
LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB
|
||||||
|
|
||||||
|
LevinCommand::Unknown(_) => usize::MAX.try_into().unwrap_or(u64::MAX),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_handshake(&self) -> bool {
|
||||||
|
matches!(self, LevinCommand::Handshake)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<u32> for LevinCommand {
|
||||||
|
fn from(value: u32) -> Self {
|
||||||
|
match value {
|
||||||
|
1001 => LevinCommand::Handshake,
|
||||||
|
1002 => LevinCommand::TimedSync,
|
||||||
|
1003 => LevinCommand::Ping,
|
||||||
|
1007 => LevinCommand::SupportFlags,
|
||||||
|
|
||||||
|
2001 => LevinCommand::NewBlock,
|
||||||
|
2002 => LevinCommand::NewTransactions,
|
||||||
|
2003 => LevinCommand::GetObjectsRequest,
|
||||||
|
2004 => LevinCommand::GetObjectsResponse,
|
||||||
|
2006 => LevinCommand::ChainRequest,
|
||||||
|
2007 => LevinCommand::ChainResponse,
|
||||||
|
2008 => LevinCommand::NewFluffyBlock,
|
||||||
|
2009 => LevinCommand::FluffyMissingTxsRequest,
|
||||||
|
2010 => LevinCommand::GetTxPoolCompliment,
|
||||||
|
|
||||||
|
x => LevinCommand::Unknown(x),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<LevinCommand> for u32 {
|
||||||
|
fn from(value: LevinCommand) -> Self {
|
||||||
|
match value {
|
||||||
|
LevinCommand::Handshake => 1001,
|
||||||
|
LevinCommand::TimedSync => 1002,
|
||||||
|
LevinCommand::Ping => 1003,
|
||||||
|
LevinCommand::SupportFlags => 1007,
|
||||||
|
|
||||||
|
LevinCommand::NewBlock => 2001,
|
||||||
|
LevinCommand::NewTransactions => 2002,
|
||||||
|
LevinCommand::GetObjectsRequest => 2003,
|
||||||
|
LevinCommand::GetObjectsResponse => 2004,
|
||||||
|
LevinCommand::ChainRequest => 2006,
|
||||||
|
LevinCommand::ChainResponse => 2007,
|
||||||
|
LevinCommand::NewFluffyBlock => 2008,
|
||||||
|
LevinCommand::FluffyMissingTxsRequest => 2009,
|
||||||
|
LevinCommand::GetTxPoolCompliment => 2010,
|
||||||
|
|
||||||
|
LevinCommand::Unknown(x) => x,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn decode_message<T: serde::de::DeserializeOwned, Ret>(
|
fn decode_message<T: serde::de::DeserializeOwned, Ret>(
|
||||||
ret: impl FnOnce(T) -> Ret,
|
ret: impl FnOnce(T) -> Ret,
|
||||||
buf: &[u8],
|
buf: &[u8],
|
||||||
|
@ -36,9 +160,9 @@ fn decode_message<T: serde::de::DeserializeOwned, Ret>(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_message<T: serde::Serialize>(
|
fn build_message<T: serde::Serialize>(
|
||||||
id: u32,
|
id: LevinCommand,
|
||||||
val: &T,
|
val: &T,
|
||||||
builder: &mut BucketBuilder,
|
builder: &mut BucketBuilder<LevinCommand>,
|
||||||
) -> Result<(), BucketError> {
|
) -> Result<(), BucketError> {
|
||||||
builder.set_command(id);
|
builder.set_command(id);
|
||||||
builder.set_body(
|
builder.set_body(
|
||||||
|
@ -61,34 +185,66 @@ pub enum ProtocolMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProtocolMessage {
|
impl ProtocolMessage {
|
||||||
fn decode(buf: &[u8], command: u32) -> Result<Self, BucketError> {
|
pub fn command(&self) -> LevinCommand {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
|
match self {
|
||||||
|
ProtocolMessage::NewBlock(_) => C::NewBlock,
|
||||||
|
ProtocolMessage::NewFluffyBlock(_) => C::NewFluffyBlock,
|
||||||
|
ProtocolMessage::GetObjectsRequest(_) => C::GetObjectsRequest,
|
||||||
|
ProtocolMessage::GetObjectsResponse(_) => C::GetObjectsResponse,
|
||||||
|
ProtocolMessage::ChainRequest(_) => C::ChainRequest,
|
||||||
|
ProtocolMessage::ChainEntryResponse(_) => C::ChainResponse,
|
||||||
|
ProtocolMessage::NewTransactions(_) => C::NewTransactions,
|
||||||
|
ProtocolMessage::FluffyMissingTransactionsRequest(_) => C::FluffyMissingTxsRequest,
|
||||||
|
ProtocolMessage::GetTxPoolCompliment(_) => C::GetTxPoolCompliment,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decode(buf: &[u8], command: LevinCommand) -> Result<Self, BucketError> {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
Ok(match command {
|
Ok(match command {
|
||||||
2001 => decode_message(ProtocolMessage::NewBlock, buf)?,
|
C::NewBlock => decode_message(ProtocolMessage::NewBlock, buf)?,
|
||||||
2002 => decode_message(ProtocolMessage::NewTransactions, buf)?,
|
C::NewTransactions => decode_message(ProtocolMessage::NewTransactions, buf)?,
|
||||||
2003 => decode_message(ProtocolMessage::GetObjectsRequest, buf)?,
|
C::GetObjectsRequest => decode_message(ProtocolMessage::GetObjectsRequest, buf)?,
|
||||||
2004 => decode_message(ProtocolMessage::GetObjectsResponse, buf)?,
|
C::GetObjectsResponse => decode_message(ProtocolMessage::GetObjectsResponse, buf)?,
|
||||||
2006 => decode_message(ProtocolMessage::ChainRequest, buf)?,
|
C::ChainRequest => decode_message(ProtocolMessage::ChainRequest, buf)?,
|
||||||
2007 => decode_message(ProtocolMessage::ChainEntryResponse, buf)?,
|
C::ChainResponse => decode_message(ProtocolMessage::ChainEntryResponse, buf)?,
|
||||||
2008 => decode_message(ProtocolMessage::NewFluffyBlock, buf)?,
|
C::NewFluffyBlock => decode_message(ProtocolMessage::NewFluffyBlock, buf)?,
|
||||||
2009 => decode_message(ProtocolMessage::FluffyMissingTransactionsRequest, buf)?,
|
C::FluffyMissingTxsRequest => {
|
||||||
2010 => decode_message(ProtocolMessage::GetTxPoolCompliment, buf)?,
|
decode_message(ProtocolMessage::FluffyMissingTransactionsRequest, buf)?
|
||||||
|
}
|
||||||
|
C::GetTxPoolCompliment => decode_message(ProtocolMessage::GetTxPoolCompliment, buf)?,
|
||||||
_ => return Err(BucketError::UnknownCommand),
|
_ => return Err(BucketError::UnknownCommand),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build(&self, builder: &mut BucketBuilder) -> Result<(), BucketError> {
|
fn build(&self, builder: &mut BucketBuilder<LevinCommand>) -> Result<(), BucketError> {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
ProtocolMessage::NewBlock(val) => build_message(2001, val, builder)?,
|
ProtocolMessage::NewBlock(val) => build_message(C::NewBlock, val, builder)?,
|
||||||
ProtocolMessage::NewTransactions(val) => build_message(2002, val, builder)?,
|
ProtocolMessage::NewTransactions(val) => {
|
||||||
ProtocolMessage::GetObjectsRequest(val) => build_message(2003, val, builder)?,
|
build_message(C::NewTransactions, val, builder)?
|
||||||
ProtocolMessage::GetObjectsResponse(val) => build_message(2004, val, builder)?,
|
}
|
||||||
ProtocolMessage::ChainRequest(val) => build_message(2006, val, builder)?,
|
ProtocolMessage::GetObjectsRequest(val) => {
|
||||||
ProtocolMessage::ChainEntryResponse(val) => build_message(2007, &val, builder)?,
|
build_message(C::GetObjectsRequest, val, builder)?
|
||||||
ProtocolMessage::NewFluffyBlock(val) => build_message(2008, val, builder)?,
|
}
|
||||||
ProtocolMessage::FluffyMissingTransactionsRequest(val) => {
|
ProtocolMessage::GetObjectsResponse(val) => {
|
||||||
build_message(2009, val, builder)?
|
build_message(C::GetObjectsResponse, val, builder)?
|
||||||
|
}
|
||||||
|
ProtocolMessage::ChainRequest(val) => build_message(C::ChainRequest, val, builder)?,
|
||||||
|
ProtocolMessage::ChainEntryResponse(val) => {
|
||||||
|
build_message(C::ChainResponse, &val, builder)?
|
||||||
|
}
|
||||||
|
ProtocolMessage::NewFluffyBlock(val) => build_message(C::NewFluffyBlock, val, builder)?,
|
||||||
|
ProtocolMessage::FluffyMissingTransactionsRequest(val) => {
|
||||||
|
build_message(C::FluffyMissingTxsRequest, val, builder)?
|
||||||
|
}
|
||||||
|
ProtocolMessage::GetTxPoolCompliment(val) => {
|
||||||
|
build_message(C::GetTxPoolCompliment, val, builder)?
|
||||||
}
|
}
|
||||||
ProtocolMessage::GetTxPoolCompliment(val) => build_message(2010, val, builder)?,
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -102,26 +258,41 @@ pub enum RequestMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RequestMessage {
|
impl RequestMessage {
|
||||||
fn decode(buf: &[u8], command: u32) -> Result<Self, BucketError> {
|
pub fn command(&self) -> LevinCommand {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
|
match self {
|
||||||
|
RequestMessage::Handshake(_) => C::Handshake,
|
||||||
|
RequestMessage::Ping => C::Ping,
|
||||||
|
RequestMessage::SupportFlags => C::SupportFlags,
|
||||||
|
RequestMessage::TimedSync(_) => C::TimedSync,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decode(buf: &[u8], command: LevinCommand) -> Result<Self, BucketError> {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
Ok(match command {
|
Ok(match command {
|
||||||
1001 => decode_message(RequestMessage::Handshake, buf)?,
|
C::Handshake => decode_message(RequestMessage::Handshake, buf)?,
|
||||||
1002 => decode_message(RequestMessage::TimedSync, buf)?,
|
C::TimedSync => decode_message(RequestMessage::TimedSync, buf)?,
|
||||||
1003 => RequestMessage::Ping,
|
C::Ping => RequestMessage::Ping,
|
||||||
1007 => RequestMessage::SupportFlags,
|
C::SupportFlags => RequestMessage::SupportFlags,
|
||||||
_ => return Err(BucketError::UnknownCommand),
|
_ => return Err(BucketError::UnknownCommand),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build(&self, builder: &mut BucketBuilder) -> Result<(), BucketError> {
|
fn build(&self, builder: &mut BucketBuilder<LevinCommand>) -> Result<(), BucketError> {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
RequestMessage::Handshake(val) => build_message(1001, val, builder)?,
|
RequestMessage::Handshake(val) => build_message(C::Handshake, val, builder)?,
|
||||||
RequestMessage::TimedSync(val) => build_message(1002, val, builder)?,
|
RequestMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?,
|
||||||
RequestMessage::Ping => {
|
RequestMessage::Ping => {
|
||||||
builder.set_command(1003);
|
builder.set_command(C::Ping);
|
||||||
builder.set_body(Vec::new());
|
builder.set_body(Vec::new());
|
||||||
}
|
}
|
||||||
RequestMessage::SupportFlags => {
|
RequestMessage::SupportFlags => {
|
||||||
builder.set_command(1007);
|
builder.set_command(C::SupportFlags);
|
||||||
builder.set_body(Vec::new());
|
builder.set_body(Vec::new());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -137,22 +308,37 @@ pub enum ResponseMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResponseMessage {
|
impl ResponseMessage {
|
||||||
fn decode(buf: &[u8], command: u32) -> Result<Self, BucketError> {
|
pub fn command(&self) -> LevinCommand {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
|
match self {
|
||||||
|
ResponseMessage::Handshake(_) => C::Handshake,
|
||||||
|
ResponseMessage::Ping(_) => C::Ping,
|
||||||
|
ResponseMessage::SupportFlags(_) => C::SupportFlags,
|
||||||
|
ResponseMessage::TimedSync(_) => C::TimedSync,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decode(buf: &[u8], command: LevinCommand) -> Result<Self, BucketError> {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
Ok(match command {
|
Ok(match command {
|
||||||
1001 => decode_message(ResponseMessage::Handshake, buf)?,
|
C::Handshake => decode_message(ResponseMessage::Handshake, buf)?,
|
||||||
1002 => decode_message(ResponseMessage::TimedSync, buf)?,
|
C::TimedSync => decode_message(ResponseMessage::TimedSync, buf)?,
|
||||||
1003 => decode_message(ResponseMessage::Ping, buf)?,
|
C::Ping => decode_message(ResponseMessage::Ping, buf)?,
|
||||||
1007 => decode_message(ResponseMessage::SupportFlags, buf)?,
|
C::SupportFlags => decode_message(ResponseMessage::SupportFlags, buf)?,
|
||||||
_ => return Err(BucketError::UnknownCommand),
|
_ => return Err(BucketError::UnknownCommand),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build(&self, builder: &mut BucketBuilder) -> Result<(), BucketError> {
|
fn build(&self, builder: &mut BucketBuilder<LevinCommand>) -> Result<(), BucketError> {
|
||||||
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
ResponseMessage::Handshake(val) => build_message(1001, val, builder)?,
|
ResponseMessage::Handshake(val) => build_message(C::Handshake, val, builder)?,
|
||||||
ResponseMessage::TimedSync(val) => build_message(1002, val, builder)?,
|
ResponseMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?,
|
||||||
ResponseMessage::Ping(val) => build_message(1003, val, builder)?,
|
ResponseMessage::Ping(val) => build_message(C::Ping, val, builder)?,
|
||||||
ResponseMessage::SupportFlags(val) => build_message(1007, val, builder)?,
|
ResponseMessage::SupportFlags(val) => build_message(C::SupportFlags, val, builder)?,
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -164,8 +350,36 @@ pub enum Message {
|
||||||
Protocol(ProtocolMessage),
|
Protocol(ProtocolMessage),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Message {
|
||||||
|
pub fn is_request(&self) -> bool {
|
||||||
|
matches!(self, Message::Request(_))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_response(&self) -> bool {
|
||||||
|
matches!(self, Message::Response(_))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_protocol(&self) -> bool {
|
||||||
|
matches!(self, Message::Protocol(_))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn command(&self) -> LevinCommand {
|
||||||
|
match self {
|
||||||
|
Message::Request(mes) => mes.command(),
|
||||||
|
Message::Response(mes) => mes.command(),
|
||||||
|
Message::Protocol(mes) => mes.command(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl LevinBody for Message {
|
impl LevinBody for Message {
|
||||||
fn decode_message(body: &[u8], typ: MessageType, command: u32) -> Result<Self, BucketError> {
|
type Command = LevinCommand;
|
||||||
|
|
||||||
|
fn decode_message(
|
||||||
|
body: &[u8],
|
||||||
|
typ: MessageType,
|
||||||
|
command: LevinCommand,
|
||||||
|
) -> Result<Self, BucketError> {
|
||||||
Ok(match typ {
|
Ok(match typ {
|
||||||
MessageType::Request => Message::Request(RequestMessage::decode(body, command)?),
|
MessageType::Request => Message::Request(RequestMessage::decode(body, command)?),
|
||||||
MessageType::Response => Message::Response(ResponseMessage::decode(body, command)?),
|
MessageType::Response => Message::Response(ResponseMessage::decode(body, command)?),
|
||||||
|
@ -173,7 +387,7 @@ impl LevinBody for Message {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode(&self, builder: &mut BucketBuilder) -> Result<(), BucketError> {
|
fn encode(&self, builder: &mut BucketBuilder<LevinCommand>) -> Result<(), BucketError> {
|
||||||
match self {
|
match self {
|
||||||
Message::Protocol(pro) => {
|
Message::Protocol(pro) => {
|
||||||
builder.set_message_type(MessageType::Notification);
|
builder.set_message_type(MessageType::Notification);
|
||||||
|
|
|
@ -39,27 +39,14 @@ impl From<PeerSupportFlags> for u32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
impl PeerSupportFlags {
|
impl PeerSupportFlags {
|
||||||
const FLUFFY_BLOCKS: u32 = 0b0000_0001;
|
//const FLUFFY_BLOCKS: u32 = 0b0000_0001;
|
||||||
/// checks if `self` has all the flags that `other` has
|
|
||||||
pub fn contains(&self, other: &PeerSupportFlags) -> bool {
|
|
||||||
self.0. & other.0 == other.0
|
|
||||||
}
|
|
||||||
pub fn supports_fluffy_blocks(&self) -> bool {
|
|
||||||
self.0 & Self::FLUFFY_BLOCKS == Self::FLUFFY_BLOCKS
|
|
||||||
}
|
|
||||||
pub fn get_support_flag_fluffy_blocks() -> Self {
|
|
||||||
PeerSupportFlags {
|
|
||||||
support_flags: Self::FLUFFY_BLOCKS,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_empty(&self) -> bool {
|
pub fn is_empty(&self) -> bool {
|
||||||
self.0 == 0
|
self.0 == 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
impl From<u8> for PeerSupportFlags {
|
impl From<u8> for PeerSupportFlags {
|
||||||
fn from(value: u8) -> Self {
|
fn from(value: u8) -> Self {
|
||||||
PeerSupportFlags(value.into())
|
PeerSupportFlags(value.into())
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
[package]
|
[package]
|
||||||
name = "cuprate-peer"
|
name = "cuprate-p2p"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "AGPL-3.0-only"
|
license = "AGPL-3.0-only"
|
||||||
|
@ -12,8 +12,12 @@ thiserror = "1.0.39"
|
||||||
cuprate-common = {path = "../common"}
|
cuprate-common = {path = "../common"}
|
||||||
monero-wire = {path= "../net/monero-wire"}
|
monero-wire = {path= "../net/monero-wire"}
|
||||||
futures = "0.3.26"
|
futures = "0.3.26"
|
||||||
tower = {version = "0.4.13", features = ["util", "steer"]}
|
tower = {version = "0.4.13", features = ["util", "steer", "load", "discover", "load-shed", "buffer", "timeout"]}
|
||||||
tokio = {version= "1.27", features=["rt", "time"]}
|
tokio = {version= "1.27", features=["rt", "time", "net"]}
|
||||||
|
tokio-util = {version = "0.7.8", features=["codec"]}
|
||||||
|
tokio-stream = {version="0.1.14", features=["time"]}
|
||||||
async-trait = "0.1.68"
|
async-trait = "0.1.68"
|
||||||
tracing = "0.1.37"
|
tracing = "0.1.37"
|
||||||
rand = "0.8.5"
|
tracing-error = "0.2.0"
|
||||||
|
rand = "0.8.5"
|
||||||
|
pin-project = "1.0.12"
|
||||||
|
|
28
p2p/monero-peer/Cargo.toml
Normal file
28
p2p/monero-peer/Cargo.toml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
[package]
|
||||||
|
name = "monero-peer"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = []
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
monero-wire = {path= "../../net/monero-wire"}
|
||||||
|
cuprate-common = {path = "../../common"}
|
||||||
|
|
||||||
|
tokio = {version= "1.34.0", default-features = false, features = ["net"]}
|
||||||
|
tokio-util = { version = "0.7.10", default-features = false, features = ["codec"] }
|
||||||
|
futures = "0.3.29"
|
||||||
|
async-trait = "0.1.74"
|
||||||
|
|
||||||
|
tower = { version= "0.4.13", features = ["util"] }
|
||||||
|
thiserror = "1.0.50"
|
||||||
|
|
||||||
|
tracing = "0.1.40"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
cuprate-test-utils = {path = "../../test-utils"}
|
||||||
|
|
||||||
|
hex = "0.4.3"
|
||||||
|
tokio = {version= "1.34.0", default-features = false, features = ["net", "rt-multi-thread", "rt", "macros"]}
|
||||||
|
tracing-subscriber = "0.3"
|
6
p2p/monero-peer/src/client.rs
Normal file
6
p2p/monero-peer/src/client.rs
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
mod conector;
|
||||||
|
mod connection;
|
||||||
|
pub mod handshaker;
|
||||||
|
|
||||||
|
pub use conector::{ConnectRequest, Connector};
|
||||||
|
pub use handshaker::{DoHandshakeRequest, HandShaker, HandshakeError};
|
61
p2p/monero-peer/src/client/conector.rs
Normal file
61
p2p/monero-peer/src/client/conector.rs
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
use std::{
|
||||||
|
future::Future,
|
||||||
|
pin::Pin,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use tower::{Service, ServiceExt};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
client::{DoHandshakeRequest, HandShaker, HandshakeError},
|
||||||
|
AddressBook, ConnectionDirection, CoreSyncSvc, NetworkZone, PeerRequestHandler,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct ConnectRequest<Z: NetworkZone> {
|
||||||
|
pub addr: Z::Addr,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Connector<Z: NetworkZone, AdrBook, CSync, ReqHdlr> {
|
||||||
|
handshaker: HandShaker<Z, AdrBook, CSync, ReqHdlr>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> Connector<Z, AdrBook, CSync, ReqHdlr> {
|
||||||
|
pub fn new(handshaker: HandShaker<Z, AdrBook, CSync, ReqHdlr>) -> Self {
|
||||||
|
Self { handshaker }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> Service<ConnectRequest<Z>>
|
||||||
|
for Connector<Z, AdrBook, CSync, ReqHdlr>
|
||||||
|
where
|
||||||
|
AdrBook: AddressBook<Z> + Clone,
|
||||||
|
CSync: CoreSyncSvc + Clone,
|
||||||
|
ReqHdlr: PeerRequestHandler + Clone,
|
||||||
|
{
|
||||||
|
type Response = ();
|
||||||
|
type Error = HandshakeError;
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: ConnectRequest<Z>) -> Self::Future {
|
||||||
|
tracing::debug!("Connecting to peer: {}", req.addr);
|
||||||
|
let mut handshaker = self.handshaker.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let (peer_stream, peer_sink) = Z::connect_to_peer(req.addr.clone()).await?;
|
||||||
|
let req = DoHandshakeRequest {
|
||||||
|
addr: req.addr,
|
||||||
|
peer_stream,
|
||||||
|
peer_sink,
|
||||||
|
direction: ConnectionDirection::OutBound,
|
||||||
|
};
|
||||||
|
handshaker.ready().await?.call(req).await
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
}
|
176
p2p/monero-peer/src/client/connection.rs
Normal file
176
p2p/monero-peer/src/client/connection.rs
Normal file
|
@ -0,0 +1,176 @@
|
||||||
|
use futures::{
|
||||||
|
channel::{mpsc, oneshot},
|
||||||
|
stream::FusedStream,
|
||||||
|
SinkExt, StreamExt,
|
||||||
|
};
|
||||||
|
|
||||||
|
use monero_wire::{LevinCommand, Message};
|
||||||
|
|
||||||
|
use crate::{MessageID, NetworkZone, PeerError, PeerRequest, PeerRequestHandler, PeerResponse};
|
||||||
|
|
||||||
|
pub struct ConnectionTaskRequest {
|
||||||
|
request: PeerRequest,
|
||||||
|
response_channel: oneshot::Sender<Result<PeerResponse, PeerError>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum State {
|
||||||
|
WaitingForRequest,
|
||||||
|
WaitingForResponse {
|
||||||
|
request_id: MessageID,
|
||||||
|
tx: oneshot::Sender<Result<PeerResponse, PeerError>>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl State {
|
||||||
|
/// Returns if the [`LevinCommand`] is the correct response message for our request.
|
||||||
|
///
|
||||||
|
/// e.g that we didn't get a block for a txs request.
|
||||||
|
fn levin_command_response(&self, command: LevinCommand) -> bool {
|
||||||
|
match self {
|
||||||
|
State::WaitingForResponse { request_id, .. } => matches!(
|
||||||
|
(request_id, command),
|
||||||
|
(MessageID::Handshake, LevinCommand::Handshake)
|
||||||
|
| (MessageID::TimedSync, LevinCommand::TimedSync)
|
||||||
|
| (MessageID::Ping, LevinCommand::Ping)
|
||||||
|
| (MessageID::SupportFlags, LevinCommand::SupportFlags)
|
||||||
|
| (MessageID::GetObjects, LevinCommand::GetObjectsResponse)
|
||||||
|
| (MessageID::GetChain, LevinCommand::ChainResponse)
|
||||||
|
| (MessageID::FluffyMissingTxs, LevinCommand::NewFluffyBlock)
|
||||||
|
| (
|
||||||
|
MessageID::GetTxPoolCompliment,
|
||||||
|
LevinCommand::NewTransactions
|
||||||
|
)
|
||||||
|
),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Connection<Z: NetworkZone, ReqHndlr> {
|
||||||
|
peer_sink: Z::Sink,
|
||||||
|
|
||||||
|
state: State,
|
||||||
|
client_rx: mpsc::Receiver<ConnectionTaskRequest>,
|
||||||
|
|
||||||
|
peer_request_handler: ReqHndlr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Z: NetworkZone, ReqHndlr> Connection<Z, ReqHndlr>
|
||||||
|
where
|
||||||
|
ReqHndlr: PeerRequestHandler,
|
||||||
|
{
|
||||||
|
pub fn new(
|
||||||
|
peer_sink: Z::Sink,
|
||||||
|
client_rx: mpsc::Receiver<ConnectionTaskRequest>,
|
||||||
|
|
||||||
|
peer_request_handler: ReqHndlr,
|
||||||
|
) -> Connection<Z, ReqHndlr> {
|
||||||
|
Connection {
|
||||||
|
peer_sink,
|
||||||
|
state: State::WaitingForRequest,
|
||||||
|
client_rx,
|
||||||
|
peer_request_handler,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_response(&mut self, res: PeerResponse) -> Result<(), PeerError> {
|
||||||
|
let state = std::mem::replace(&mut self.state, State::WaitingForRequest);
|
||||||
|
if let State::WaitingForResponse { request_id, tx } = state {
|
||||||
|
if request_id != res.id() {
|
||||||
|
// TODO: Fail here
|
||||||
|
return Err(PeerError::PeerSentIncorrectResponse);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: do more tests here
|
||||||
|
|
||||||
|
// response passed our tests we can send it to the requester
|
||||||
|
let _ = tx.send(Ok(res));
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
unreachable!("This will only be called when in state WaitingForResponse");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_message_to_peer(&mut self, mes: impl Into<Message>) -> Result<(), PeerError> {
|
||||||
|
Ok(self.peer_sink.send(mes.into()).await?)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_peer_request(&mut self, _req: PeerRequest) -> Result<(), PeerError> {
|
||||||
|
// we should check contents of peer requests for obvious errors like we do with responses
|
||||||
|
todo!()
|
||||||
|
/*
|
||||||
|
let ready_svc = self.svc.ready().await?;
|
||||||
|
let res = ready_svc.call(req).await?;
|
||||||
|
self.send_message_to_peer(res).await
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_client_request(&mut self, req: ConnectionTaskRequest) -> Result<(), PeerError> {
|
||||||
|
if req.request.needs_response() {
|
||||||
|
self.state = State::WaitingForResponse {
|
||||||
|
request_id: req.request.id(),
|
||||||
|
tx: req.response_channel,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// TODO: send NA response to requester
|
||||||
|
self.send_message_to_peer(req.request).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn state_waiting_for_request<Str>(&mut self, stream: &mut Str) -> Result<(), PeerError>
|
||||||
|
where
|
||||||
|
Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin,
|
||||||
|
{
|
||||||
|
futures::select! {
|
||||||
|
peer_message = stream.next() => {
|
||||||
|
match peer_message.expect("MessageStream will never return None") {
|
||||||
|
Ok(message) => {
|
||||||
|
self.handle_peer_request(message.try_into().map_err(|_| PeerError::ResponseError(""))?).await
|
||||||
|
},
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
client_req = self.client_rx.next() => {
|
||||||
|
self.handle_client_request(client_req.ok_or(PeerError::ClientChannelClosed)?).await
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn state_waiting_for_response<Str>(&mut self, stream: &mut Str) -> Result<(), PeerError>
|
||||||
|
where
|
||||||
|
Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin,
|
||||||
|
{
|
||||||
|
// put a timeout on this
|
||||||
|
let peer_message = stream
|
||||||
|
.next()
|
||||||
|
.await
|
||||||
|
.expect("MessageStream will never return None")?;
|
||||||
|
|
||||||
|
if !peer_message.is_request() && self.state.levin_command_response(peer_message.command()) {
|
||||||
|
if let Ok(res) = peer_message.try_into() {
|
||||||
|
Ok(self.handle_response(res).await?)
|
||||||
|
} else {
|
||||||
|
// im almost certain this is impossible to hit, but im not certain enough to use unreachable!()
|
||||||
|
Err(PeerError::ResponseError("Peer sent incorrect response"))
|
||||||
|
}
|
||||||
|
} else if let Ok(req) = peer_message.try_into() {
|
||||||
|
self.handle_peer_request(req).await
|
||||||
|
} else {
|
||||||
|
// this can be hit if the peer sends an incorrect response message
|
||||||
|
Err(PeerError::ResponseError("Peer sent incorrect response"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run<Str>(mut self, mut stream: Str)
|
||||||
|
where
|
||||||
|
Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin,
|
||||||
|
{
|
||||||
|
loop {
|
||||||
|
let _res = match self.state {
|
||||||
|
State::WaitingForRequest => self.state_waiting_for_request(&mut stream).await,
|
||||||
|
State::WaitingForResponse { .. } => {
|
||||||
|
self.state_waiting_for_response(&mut stream).await
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
494
p2p/monero-peer/src/client/handshaker.rs
Normal file
494
p2p/monero-peer/src/client/handshaker.rs
Normal file
|
@ -0,0 +1,494 @@
|
||||||
|
use std::{
|
||||||
|
future::Future,
|
||||||
|
marker::PhantomData,
|
||||||
|
pin::Pin,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::{FutureExt, SinkExt, StreamExt};
|
||||||
|
use tower::{Service, ServiceExt};
|
||||||
|
use tracing::Instrument;
|
||||||
|
|
||||||
|
use monero_wire::{
|
||||||
|
admin::{
|
||||||
|
HandshakeRequest, HandshakeResponse, PingResponse, SupportFlagsResponse,
|
||||||
|
PING_OK_RESPONSE_STATUS_TEXT,
|
||||||
|
},
|
||||||
|
common::PeerSupportFlags,
|
||||||
|
BasicNodeData, BucketError, CoreSyncData, Message, RequestMessage, ResponseMessage,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
AddressBook, AddressBookRequest, AddressBookResponse, ConnectionDirection, CoreSyncDataRequest,
|
||||||
|
CoreSyncDataResponse, CoreSyncSvc, NetworkZone, PeerRequestHandler,
|
||||||
|
MAX_PEERS_IN_PEER_LIST_MESSAGE,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum HandshakeError {
|
||||||
|
#[error("peer has the same node ID as us")]
|
||||||
|
PeerHasSameNodeID,
|
||||||
|
#[error("peer is on a different network")]
|
||||||
|
IncorrectNetwork,
|
||||||
|
#[error("peer sent a peer list with peers from different zones")]
|
||||||
|
PeerSentIncorrectZonePeerList(#[from] crate::NetworkAddressIncorrectZone),
|
||||||
|
#[error("peer sent invalid message: {0}")]
|
||||||
|
PeerSentInvalidMessage(&'static str),
|
||||||
|
#[error("Levin bucket error: {0}")]
|
||||||
|
LevinBucketError(#[from] BucketError),
|
||||||
|
#[error("Internal service error: {0}")]
|
||||||
|
InternalSvcErr(#[from] tower::BoxError),
|
||||||
|
#[error("i/o error: {0}")]
|
||||||
|
IO(#[from] std::io::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DoHandshakeRequest<Z: NetworkZone> {
|
||||||
|
pub addr: Z::Addr,
|
||||||
|
pub peer_stream: Z::Stream,
|
||||||
|
pub peer_sink: Z::Sink,
|
||||||
|
pub direction: ConnectionDirection,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct HandShaker<Z: NetworkZone, AdrBook, CSync, ReqHdlr> {
|
||||||
|
address_book: AdrBook,
|
||||||
|
core_sync_svc: CSync,
|
||||||
|
peer_request_svc: ReqHdlr,
|
||||||
|
|
||||||
|
our_basic_node_data: BasicNodeData,
|
||||||
|
|
||||||
|
_zone: PhantomData<Z>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> HandShaker<Z, AdrBook, CSync, ReqHdlr> {
|
||||||
|
pub fn new(
|
||||||
|
address_book: AdrBook,
|
||||||
|
core_sync_svc: CSync,
|
||||||
|
peer_request_svc: ReqHdlr,
|
||||||
|
|
||||||
|
our_basic_node_data: BasicNodeData,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
address_book,
|
||||||
|
core_sync_svc,
|
||||||
|
peer_request_svc,
|
||||||
|
our_basic_node_data,
|
||||||
|
_zone: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> Service<DoHandshakeRequest<Z>>
|
||||||
|
for HandShaker<Z, AdrBook, CSync, ReqHdlr>
|
||||||
|
where
|
||||||
|
AdrBook: AddressBook<Z> + Clone,
|
||||||
|
CSync: CoreSyncSvc + Clone,
|
||||||
|
ReqHdlr: PeerRequestHandler + Clone,
|
||||||
|
{
|
||||||
|
type Response = ();
|
||||||
|
type Error = HandshakeError;
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: DoHandshakeRequest<Z>) -> Self::Future {
|
||||||
|
let DoHandshakeRequest {
|
||||||
|
addr,
|
||||||
|
peer_stream,
|
||||||
|
peer_sink,
|
||||||
|
direction,
|
||||||
|
} = req;
|
||||||
|
|
||||||
|
let address_book = self.address_book.clone();
|
||||||
|
let peer_request_svc = self.peer_request_svc.clone();
|
||||||
|
let core_sync_svc = self.core_sync_svc.clone();
|
||||||
|
let our_basic_node_data = self.our_basic_node_data.clone();
|
||||||
|
|
||||||
|
let span = tracing::info_span!(parent: &tracing::Span::current(), "handshaker", %addr);
|
||||||
|
|
||||||
|
let state_machine = HandshakeStateMachine::<Z, _, _, _> {
|
||||||
|
addr,
|
||||||
|
peer_stream,
|
||||||
|
peer_sink,
|
||||||
|
direction,
|
||||||
|
address_book,
|
||||||
|
core_sync_svc,
|
||||||
|
peer_request_svc,
|
||||||
|
our_basic_node_data,
|
||||||
|
state: HandshakeState::Start,
|
||||||
|
eager_protocol_messages: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
async move {
|
||||||
|
// TODO: timeouts
|
||||||
|
state_machine.do_handshake().await
|
||||||
|
}
|
||||||
|
.instrument(span)
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The states a handshake can be in.
|
||||||
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
|
enum HandshakeState {
|
||||||
|
/// The initial state.
|
||||||
|
///
|
||||||
|
/// If this is an inbound handshake then this state means we
|
||||||
|
/// are waiting for a [`HandshakeRequest`].
|
||||||
|
Start,
|
||||||
|
/// Waiting for a [`HandshakeResponse`].
|
||||||
|
WaitingForHandshakeResponse,
|
||||||
|
/// Waiting for a [`SupportFlagsResponse`]
|
||||||
|
/// This contains the peers node data.
|
||||||
|
WaitingForSupportFlagResponse(BasicNodeData, CoreSyncData),
|
||||||
|
/// The handshake is complete.
|
||||||
|
/// This contains the peers node data.
|
||||||
|
Complete(BasicNodeData, CoreSyncData),
|
||||||
|
/// An invalid state, the handshake SM should not be in this state.
|
||||||
|
Invalid,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HandshakeState {
|
||||||
|
/// Returns true if the handshake is completed.
|
||||||
|
pub fn is_complete(&self) -> bool {
|
||||||
|
matches!(self, Self::Complete(..))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// returns the peers [`BasicNodeData`] and [`CoreSyncData`] if the peer
|
||||||
|
/// is in state [`HandshakeState::Complete`].
|
||||||
|
pub fn peer_data(self) -> Option<(BasicNodeData, CoreSyncData)> {
|
||||||
|
match self {
|
||||||
|
HandshakeState::Complete(bnd, coresync) => Some((bnd, coresync)),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct HandshakeStateMachine<Z: NetworkZone, AdrBook, CSync, ReqHdlr> {
|
||||||
|
addr: Z::Addr,
|
||||||
|
|
||||||
|
peer_stream: Z::Stream,
|
||||||
|
peer_sink: Z::Sink,
|
||||||
|
|
||||||
|
direction: ConnectionDirection,
|
||||||
|
|
||||||
|
address_book: AdrBook,
|
||||||
|
core_sync_svc: CSync,
|
||||||
|
peer_request_svc: ReqHdlr,
|
||||||
|
|
||||||
|
our_basic_node_data: BasicNodeData,
|
||||||
|
|
||||||
|
state: HandshakeState,
|
||||||
|
|
||||||
|
/// Monero allows protocol messages to be sent before a handshake response, so we have to
|
||||||
|
/// keep track of them here. For saftey we only keep a Max of 2 messages.
|
||||||
|
eager_protocol_messages: Vec<monero_wire::ProtocolMessage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> HandshakeStateMachine<Z, AdrBook, CSync, ReqHdlr>
|
||||||
|
where
|
||||||
|
AdrBook: AddressBook<Z>,
|
||||||
|
CSync: CoreSyncSvc,
|
||||||
|
ReqHdlr: PeerRequestHandler,
|
||||||
|
{
|
||||||
|
async fn send_handshake_request(&mut self) -> Result<(), HandshakeError> {
|
||||||
|
let CoreSyncDataResponse::Ours(our_core_sync_data) = self
|
||||||
|
.core_sync_svc
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(CoreSyncDataRequest::Ours)
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
|
panic!("core sync service returned wrong response!");
|
||||||
|
};
|
||||||
|
|
||||||
|
let req = HandshakeRequest {
|
||||||
|
node_data: self.our_basic_node_data.clone(),
|
||||||
|
payload_data: our_core_sync_data,
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::debug!("Sending handshake request.");
|
||||||
|
|
||||||
|
self.peer_sink
|
||||||
|
.send(Message::Request(RequestMessage::Handshake(req)))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_handshake_response(&mut self) -> Result<(), HandshakeError> {
|
||||||
|
let CoreSyncDataResponse::Ours(our_core_sync_data) = self
|
||||||
|
.core_sync_svc
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(CoreSyncDataRequest::Ours)
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
|
panic!("core sync service returned wrong response!");
|
||||||
|
};
|
||||||
|
|
||||||
|
let AddressBookResponse::Peers(our_peer_list) = self
|
||||||
|
.address_book
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(AddressBookRequest::GetPeers(MAX_PEERS_IN_PEER_LIST_MESSAGE))
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
|
panic!("Address book sent incorrect response");
|
||||||
|
};
|
||||||
|
|
||||||
|
let res = HandshakeResponse {
|
||||||
|
node_data: self.our_basic_node_data.clone(),
|
||||||
|
payload_data: our_core_sync_data,
|
||||||
|
local_peerlist_new: our_peer_list.into_iter().map(Into::into).collect(),
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::debug!("Sending handshake response.");
|
||||||
|
|
||||||
|
self.peer_sink
|
||||||
|
.send(Message::Response(ResponseMessage::Handshake(res)))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_support_flags(&mut self) -> Result<(), HandshakeError> {
|
||||||
|
let res = SupportFlagsResponse {
|
||||||
|
support_flags: self.our_basic_node_data.support_flags,
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::debug!("Sending support flag response.");
|
||||||
|
|
||||||
|
self.peer_sink
|
||||||
|
.send(Message::Response(ResponseMessage::SupportFlags(res)))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn check_request_support_flags(
|
||||||
|
&mut self,
|
||||||
|
support_flags: &PeerSupportFlags,
|
||||||
|
) -> Result<bool, HandshakeError> {
|
||||||
|
Ok(if support_flags.is_empty() {
|
||||||
|
tracing::debug!(
|
||||||
|
"Peer didn't send support flags or has no features, sending request to make sure."
|
||||||
|
);
|
||||||
|
self.peer_sink
|
||||||
|
.send(Message::Request(RequestMessage::SupportFlags))
|
||||||
|
.await?;
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_handshake_response(
|
||||||
|
&mut self,
|
||||||
|
response: HandshakeResponse,
|
||||||
|
) -> Result<(), HandshakeError> {
|
||||||
|
if response.local_peerlist_new.len() > MAX_PEERS_IN_PEER_LIST_MESSAGE {
|
||||||
|
tracing::debug!("peer sent too many peers in response, cancelling handshake");
|
||||||
|
|
||||||
|
return Err(HandshakeError::PeerSentInvalidMessage(
|
||||||
|
"Too many peers in peer list message (>250)",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.node_data.network_id != self.our_basic_node_data.network_id {
|
||||||
|
return Err(HandshakeError::IncorrectNetwork);
|
||||||
|
}
|
||||||
|
|
||||||
|
if Z::CHECK_NODE_ID && response.node_data.peer_id == self.our_basic_node_data.peer_id {
|
||||||
|
return Err(HandshakeError::PeerHasSameNodeID);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
"Telling address book about new peers, len: {}",
|
||||||
|
response.local_peerlist_new.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
self.address_book
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(AddressBookRequest::IncomingPeerList(
|
||||||
|
response
|
||||||
|
.local_peerlist_new
|
||||||
|
.into_iter()
|
||||||
|
.map(TryInto::try_into)
|
||||||
|
.collect::<Result<_, _>>()?,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if self
|
||||||
|
.check_request_support_flags(&response.node_data.support_flags)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
self.state = HandshakeState::WaitingForSupportFlagResponse(
|
||||||
|
response.node_data,
|
||||||
|
response.payload_data,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
self.state = HandshakeState::Complete(response.node_data, response.payload_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_handshake_request(
|
||||||
|
&mut self,
|
||||||
|
request: HandshakeRequest,
|
||||||
|
) -> Result<(), HandshakeError> {
|
||||||
|
// We don't respond here as if we did the other peer could accept the handshake before responding to a
|
||||||
|
// support flag request which then means we could recive other requests while waiting for the support
|
||||||
|
// flags.
|
||||||
|
|
||||||
|
if request.node_data.network_id != self.our_basic_node_data.network_id {
|
||||||
|
return Err(HandshakeError::IncorrectNetwork);
|
||||||
|
}
|
||||||
|
|
||||||
|
if Z::CHECK_NODE_ID && request.node_data.peer_id == self.our_basic_node_data.peer_id {
|
||||||
|
return Err(HandshakeError::PeerHasSameNodeID);
|
||||||
|
}
|
||||||
|
|
||||||
|
if self
|
||||||
|
.check_request_support_flags(&request.node_data.support_flags)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
self.state = HandshakeState::WaitingForSupportFlagResponse(
|
||||||
|
request.node_data,
|
||||||
|
request.payload_data,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
self.state = HandshakeState::Complete(request.node_data, request.payload_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_incoming_message(&mut self, message: Message) -> Result<(), HandshakeError> {
|
||||||
|
tracing::debug!("Received message from peer: {}", message.command());
|
||||||
|
|
||||||
|
if let Message::Protocol(protocol_message) = message {
|
||||||
|
if self.eager_protocol_messages.len() == 2 {
|
||||||
|
tracing::debug!("Peer sent too many protocl messages before a handshake response.");
|
||||||
|
return Err(HandshakeError::PeerSentInvalidMessage(
|
||||||
|
"Peer sent too many protocol messages",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
tracing::debug!(
|
||||||
|
"Protocol message getting added to queue for when handshake is complete."
|
||||||
|
);
|
||||||
|
self.eager_protocol_messages.push(protocol_message);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
match std::mem::replace(&mut self.state, HandshakeState::Invalid) {
|
||||||
|
HandshakeState::Start => match message {
|
||||||
|
Message::Request(RequestMessage::Ping) => {
|
||||||
|
// Set the state back to what it was before.
|
||||||
|
self.state = HandshakeState::Start;
|
||||||
|
Ok(self
|
||||||
|
.peer_sink
|
||||||
|
.send(Message::Response(ResponseMessage::Ping(PingResponse {
|
||||||
|
status: PING_OK_RESPONSE_STATUS_TEXT.to_string(),
|
||||||
|
peer_id: self.our_basic_node_data.peer_id,
|
||||||
|
})))
|
||||||
|
.await?)
|
||||||
|
}
|
||||||
|
Message::Request(RequestMessage::Handshake(handshake_req)) => {
|
||||||
|
self.handle_handshake_request(handshake_req).await
|
||||||
|
}
|
||||||
|
_ => Err(HandshakeError::PeerSentInvalidMessage(
|
||||||
|
"Peer didn't send handshake request.",
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
HandshakeState::WaitingForHandshakeResponse => match message {
|
||||||
|
// TODO: only allow 1 support flag request.
|
||||||
|
Message::Request(RequestMessage::SupportFlags) => {
|
||||||
|
// Set the state back to what it was before.
|
||||||
|
self.state = HandshakeState::WaitingForHandshakeResponse;
|
||||||
|
self.send_support_flags().await
|
||||||
|
}
|
||||||
|
Message::Response(ResponseMessage::Handshake(res)) => {
|
||||||
|
self.handle_handshake_response(res).await
|
||||||
|
}
|
||||||
|
_ => Err(HandshakeError::PeerSentInvalidMessage(
|
||||||
|
"Peer didn't send handshake response.",
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
HandshakeState::WaitingForSupportFlagResponse(mut peer_node_data, peer_core_sync) => {
|
||||||
|
let Message::Response(ResponseMessage::SupportFlags(support_flags)) = message
|
||||||
|
else {
|
||||||
|
return Err(HandshakeError::PeerSentInvalidMessage(
|
||||||
|
"Peer didn't send support flags response.",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
peer_node_data.support_flags = support_flags.support_flags;
|
||||||
|
self.state = HandshakeState::Complete(peer_node_data, peer_core_sync);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
HandshakeState::Complete(..) => {
|
||||||
|
panic!("Handshake is complete messages should no longer be handled here!")
|
||||||
|
}
|
||||||
|
HandshakeState::Invalid => panic!("Handshake state machine stayed in invalid state!"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn advance_machine(&mut self) -> Result<(), HandshakeError> {
|
||||||
|
while !self.state.is_complete() {
|
||||||
|
tracing::debug!("Waiting for message from peer.");
|
||||||
|
|
||||||
|
match self.peer_stream.next().await {
|
||||||
|
Some(message) => self.handle_incoming_message(message?).await?,
|
||||||
|
None => Err(BucketError::IO(std::io::Error::new(
|
||||||
|
std::io::ErrorKind::ConnectionAborted,
|
||||||
|
"The peer stream returned None",
|
||||||
|
)))?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn do_outbound_handshake(&mut self) -> Result<(), HandshakeError> {
|
||||||
|
self.send_handshake_request().await?;
|
||||||
|
self.state = HandshakeState::WaitingForHandshakeResponse;
|
||||||
|
|
||||||
|
self.advance_machine().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn do_inbound_handshake(&mut self) -> Result<(), HandshakeError> {
|
||||||
|
self.advance_machine().await?;
|
||||||
|
|
||||||
|
debug_assert!(self.state.is_complete());
|
||||||
|
|
||||||
|
self.send_handshake_response().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn do_handshake(mut self) -> Result<(), HandshakeError> {
|
||||||
|
tracing::debug!("Beginning handshake.");
|
||||||
|
|
||||||
|
match self.direction {
|
||||||
|
ConnectionDirection::OutBound => self.do_outbound_handshake().await?,
|
||||||
|
ConnectionDirection::InBound => self.do_inbound_handshake().await?,
|
||||||
|
}
|
||||||
|
|
||||||
|
let HandshakeState::Complete(peer_node_data, peer_core_sync) = self.state else {
|
||||||
|
panic!("Hanshake completed not in complete state!");
|
||||||
|
};
|
||||||
|
|
||||||
|
self.core_sync_svc
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(CoreSyncDataRequest::HandleIncoming(peer_core_sync))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
tracing::debug!("Handshake complete.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
15
p2p/monero-peer/src/error.rs
Normal file
15
p2p/monero-peer/src/error.rs
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum PeerError {
|
||||||
|
#[error("The connection tasks client channel was closed")]
|
||||||
|
ClientChannelClosed,
|
||||||
|
#[error("error with peer response: {0}")]
|
||||||
|
ResponseError(&'static str),
|
||||||
|
#[error("the peer sent an incorrect response to our request")]
|
||||||
|
PeerSentIncorrectResponse,
|
||||||
|
#[error("bucket error")]
|
||||||
|
BucketError(#[from] monero_wire::BucketError),
|
||||||
|
#[error("handshake error: {0}")]
|
||||||
|
Handshake(#[from] crate::client::HandshakeError),
|
||||||
|
#[error("i/o error: {0}")]
|
||||||
|
IO(#[from] std::io::Error),
|
||||||
|
}
|
157
p2p/monero-peer/src/lib.rs
Normal file
157
p2p/monero-peer/src/lib.rs
Normal file
|
@ -0,0 +1,157 @@
|
||||||
|
#![allow(unused)]
|
||||||
|
|
||||||
|
use std::{future::Future, pin::Pin};
|
||||||
|
|
||||||
|
use futures::{Sink, Stream};
|
||||||
|
|
||||||
|
use monero_wire::{
|
||||||
|
network_address::NetworkAddressIncorrectZone, BucketError, Message, NetworkAddress,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod client;
|
||||||
|
pub mod error;
|
||||||
|
pub mod network_zones;
|
||||||
|
pub mod protocol;
|
||||||
|
pub mod services;
|
||||||
|
|
||||||
|
pub use error::*;
|
||||||
|
pub use protocol::*;
|
||||||
|
use services::*;
|
||||||
|
|
||||||
|
const MAX_PEERS_IN_PEER_LIST_MESSAGE: usize = 250;
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||||
|
pub enum ConnectionDirection {
|
||||||
|
InBound,
|
||||||
|
OutBound,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An abstraction over a network zone (tor/i2p/clear)
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait NetworkZone: Clone + Send + 'static {
|
||||||
|
/// Allow syncing over this network.
|
||||||
|
///
|
||||||
|
/// Not recommended for anonymity networks.
|
||||||
|
const ALLOW_SYNC: bool;
|
||||||
|
/// Enable dandelion++ for this network.
|
||||||
|
///
|
||||||
|
/// This is unneeded on anonymity networks.
|
||||||
|
const DANDELION_PP: bool;
|
||||||
|
/// Check if our node ID matches the incoming peers node ID for this network.
|
||||||
|
///
|
||||||
|
/// This has privacy implications on an anonymity network if true so should be set
|
||||||
|
/// to false.
|
||||||
|
const CHECK_NODE_ID: bool;
|
||||||
|
|
||||||
|
/// The address type of this network.
|
||||||
|
type Addr: TryFrom<NetworkAddress, Error = NetworkAddressIncorrectZone>
|
||||||
|
+ Into<NetworkAddress>
|
||||||
|
+ std::fmt::Display
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static;
|
||||||
|
/// The stream (incoming data) type for this network.
|
||||||
|
type Stream: Stream<Item = Result<Message, BucketError>> + Unpin + Send + 'static;
|
||||||
|
/// The sink (outgoing data) type for this network.
|
||||||
|
type Sink: Sink<Message, Error = BucketError> + Unpin + Send + 'static;
|
||||||
|
/// Config used to start a server which listens for incoming connections.
|
||||||
|
type ServerCfg;
|
||||||
|
|
||||||
|
async fn connect_to_peer(
|
||||||
|
addr: Self::Addr,
|
||||||
|
) -> Result<(Self::Stream, Self::Sink), std::io::Error>;
|
||||||
|
|
||||||
|
async fn incoming_connection_listener(config: Self::ServerCfg) -> ();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) trait AddressBook<Z: NetworkZone>:
|
||||||
|
tower::Service<
|
||||||
|
AddressBookRequest<Z>,
|
||||||
|
Response = AddressBookResponse<Z>,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
Future = Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = Result<AddressBookResponse<Z>, tower::BoxError>>
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
>,
|
||||||
|
>,
|
||||||
|
> + Send
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, Z: NetworkZone> AddressBook<Z> for T where
|
||||||
|
T: tower::Service<
|
||||||
|
AddressBookRequest<Z>,
|
||||||
|
Response = AddressBookResponse<Z>,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
Future = Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = Result<AddressBookResponse<Z>, tower::BoxError>>
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
>,
|
||||||
|
>,
|
||||||
|
> + Send
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) trait CoreSyncSvc:
|
||||||
|
tower::Service<
|
||||||
|
CoreSyncDataRequest,
|
||||||
|
Response = CoreSyncDataResponse,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
Future = Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = Result<CoreSyncDataResponse, tower::BoxError>> + Send + 'static,
|
||||||
|
>,
|
||||||
|
>,
|
||||||
|
> + Send
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> CoreSyncSvc for T where
|
||||||
|
T: tower::Service<
|
||||||
|
CoreSyncDataRequest,
|
||||||
|
Response = CoreSyncDataResponse,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
Future = Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = Result<CoreSyncDataResponse, tower::BoxError>>
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
>,
|
||||||
|
>,
|
||||||
|
> + Send
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) trait PeerRequestHandler:
|
||||||
|
tower::Service<
|
||||||
|
PeerRequest,
|
||||||
|
Response = PeerResponse,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
Future = Pin<
|
||||||
|
Box<dyn Future<Output = Result<PeerResponse, tower::BoxError>> + Send + 'static>,
|
||||||
|
>,
|
||||||
|
> + Send
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> PeerRequestHandler for T where
|
||||||
|
T: tower::Service<
|
||||||
|
PeerRequest,
|
||||||
|
Response = PeerResponse,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
Future = Pin<
|
||||||
|
Box<dyn Future<Output = Result<PeerResponse, tower::BoxError>> + Send + 'static>,
|
||||||
|
>,
|
||||||
|
> + Send
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
3
p2p/monero-peer/src/network_zones.rs
Normal file
3
p2p/monero-peer/src/network_zones.rs
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
mod clear;
|
||||||
|
|
||||||
|
pub use clear::{ClearNet, ClearNetServerCfg};
|
43
p2p/monero-peer/src/network_zones/clear.rs
Normal file
43
p2p/monero-peer/src/network_zones/clear.rs
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use monero_wire::MoneroWireCodec;
|
||||||
|
|
||||||
|
use tokio::net::{
|
||||||
|
tcp::{OwnedReadHalf, OwnedWriteHalf},
|
||||||
|
TcpStream,
|
||||||
|
};
|
||||||
|
use tokio_util::codec::{FramedRead, FramedWrite};
|
||||||
|
|
||||||
|
use crate::NetworkZone;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ClearNet;
|
||||||
|
|
||||||
|
pub struct ClearNetServerCfg {}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl NetworkZone for ClearNet {
|
||||||
|
const ALLOW_SYNC: bool = true;
|
||||||
|
const DANDELION_PP: bool = true;
|
||||||
|
const CHECK_NODE_ID: bool = true;
|
||||||
|
|
||||||
|
type Addr = SocketAddr;
|
||||||
|
type Stream = FramedRead<OwnedReadHalf, MoneroWireCodec>;
|
||||||
|
type Sink = FramedWrite<OwnedWriteHalf, MoneroWireCodec>;
|
||||||
|
|
||||||
|
type ServerCfg = ();
|
||||||
|
|
||||||
|
async fn connect_to_peer(
|
||||||
|
addr: Self::Addr,
|
||||||
|
) -> Result<(Self::Stream, Self::Sink), std::io::Error> {
|
||||||
|
let (read, write) = TcpStream::connect(addr).await?.into_split();
|
||||||
|
Ok((
|
||||||
|
FramedRead::new(read, MoneroWireCodec::default()),
|
||||||
|
FramedWrite::new(write, MoneroWireCodec::default()),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn incoming_connection_listener(config: Self::ServerCfg) -> () {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
130
p2p/monero-peer/src/protocol.rs
Normal file
130
p2p/monero-peer/src/protocol.rs
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
/// This module defines InternalRequests and InternalResponses. Cuprate's P2P works by translating network messages into an internal
|
||||||
|
/// request/ response, this is easy for levin "requests" and "responses" (admin messages) but takes a bit more work with "notifications"
|
||||||
|
/// (protocol messages).
|
||||||
|
///
|
||||||
|
/// Some notifications are easy to translate, like `GetObjectsRequest` is obviously a request but others like `NewFluffyBlock` are a
|
||||||
|
/// bit tricker. To translate a `NewFluffyBlock` into a request/ response we will have to look to see if we asked for `FluffyMissingTransactionsRequest`
|
||||||
|
/// if we have we interpret `NewFluffyBlock` as a response if not its a request that doesn't require a response.
|
||||||
|
///
|
||||||
|
/// Here is every P2P request/ response. *note admin messages are already request/ response so "Handshake" is actually made of a HandshakeRequest & HandshakeResponse
|
||||||
|
///
|
||||||
|
/// Admin:
|
||||||
|
/// Handshake,
|
||||||
|
/// TimedSync,
|
||||||
|
/// Ping,
|
||||||
|
/// SupportFlags
|
||||||
|
/// Protocol:
|
||||||
|
/// Request: GetObjectsRequest, Response: GetObjectsResponse,
|
||||||
|
/// Request: ChainRequest, Response: ChainResponse,
|
||||||
|
/// Request: FluffyMissingTransactionsRequest, Response: NewFluffyBlock, <- these 2 could be requests or responses
|
||||||
|
/// Request: GetTxPoolCompliment, Response: NewTransactions, <-
|
||||||
|
/// Request: NewBlock, Response: None,
|
||||||
|
/// Request: NewFluffyBlock, Response: None,
|
||||||
|
/// Request: NewTransactions, Response: None
|
||||||
|
///
|
||||||
|
///
|
||||||
|
use monero_wire::{
|
||||||
|
admin::{
|
||||||
|
HandshakeRequest, HandshakeResponse, PingResponse, SupportFlagsResponse, TimedSyncRequest,
|
||||||
|
TimedSyncResponse,
|
||||||
|
},
|
||||||
|
protocol::{
|
||||||
|
ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
|
||||||
|
GetObjectsResponse, GetTxPoolCompliment, NewBlock, NewFluffyBlock, NewTransactions,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
mod try_from;
|
||||||
|
|
||||||
|
/// An enum representing a request/ response combination, so a handshake request
|
||||||
|
/// and response would have the same [`MessageID`]. This allows associating the
|
||||||
|
/// correct response to a request.
|
||||||
|
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||||
|
pub enum MessageID {
|
||||||
|
Handshake,
|
||||||
|
TimedSync,
|
||||||
|
Ping,
|
||||||
|
SupportFlags,
|
||||||
|
|
||||||
|
GetObjects,
|
||||||
|
GetChain,
|
||||||
|
FluffyMissingTxs,
|
||||||
|
GetTxPoolCompliment,
|
||||||
|
NewBlock,
|
||||||
|
NewFluffyBlock,
|
||||||
|
NewTransactions,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum PeerRequest {
|
||||||
|
Handshake(HandshakeRequest),
|
||||||
|
TimedSync(TimedSyncRequest),
|
||||||
|
Ping,
|
||||||
|
SupportFlags,
|
||||||
|
|
||||||
|
GetObjects(GetObjectsRequest),
|
||||||
|
GetChain(ChainRequest),
|
||||||
|
FluffyMissingTxs(FluffyMissingTransactionsRequest),
|
||||||
|
GetTxPoolCompliment(GetTxPoolCompliment),
|
||||||
|
NewBlock(NewBlock),
|
||||||
|
NewFluffyBlock(NewFluffyBlock),
|
||||||
|
NewTransactions(NewTransactions),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PeerRequest {
|
||||||
|
pub fn id(&self) -> MessageID {
|
||||||
|
match self {
|
||||||
|
PeerRequest::Handshake(_) => MessageID::Handshake,
|
||||||
|
PeerRequest::TimedSync(_) => MessageID::TimedSync,
|
||||||
|
PeerRequest::Ping => MessageID::Ping,
|
||||||
|
PeerRequest::SupportFlags => MessageID::SupportFlags,
|
||||||
|
|
||||||
|
PeerRequest::GetObjects(_) => MessageID::GetObjects,
|
||||||
|
PeerRequest::GetChain(_) => MessageID::GetChain,
|
||||||
|
PeerRequest::FluffyMissingTxs(_) => MessageID::FluffyMissingTxs,
|
||||||
|
PeerRequest::GetTxPoolCompliment(_) => MessageID::GetTxPoolCompliment,
|
||||||
|
PeerRequest::NewBlock(_) => MessageID::NewBlock,
|
||||||
|
PeerRequest::NewFluffyBlock(_) => MessageID::NewFluffyBlock,
|
||||||
|
PeerRequest::NewTransactions(_) => MessageID::NewTransactions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn needs_response(&self) -> bool {
|
||||||
|
!matches!(
|
||||||
|
self,
|
||||||
|
PeerRequest::NewBlock(_)
|
||||||
|
| PeerRequest::NewFluffyBlock(_)
|
||||||
|
| PeerRequest::NewTransactions(_)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum PeerResponse {
|
||||||
|
Handshake(HandshakeResponse),
|
||||||
|
TimedSync(TimedSyncResponse),
|
||||||
|
Ping(PingResponse),
|
||||||
|
SupportFlags(SupportFlagsResponse),
|
||||||
|
|
||||||
|
GetObjects(GetObjectsResponse),
|
||||||
|
GetChain(ChainResponse),
|
||||||
|
NewFluffyBlock(NewFluffyBlock),
|
||||||
|
NewTransactions(NewTransactions),
|
||||||
|
NA,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PeerResponse {
|
||||||
|
pub fn id(&self) -> MessageID {
|
||||||
|
match self {
|
||||||
|
PeerResponse::Handshake(_) => MessageID::Handshake,
|
||||||
|
PeerResponse::TimedSync(_) => MessageID::TimedSync,
|
||||||
|
PeerResponse::Ping(_) => MessageID::Ping,
|
||||||
|
PeerResponse::SupportFlags(_) => MessageID::SupportFlags,
|
||||||
|
|
||||||
|
PeerResponse::GetObjects(_) => MessageID::GetObjects,
|
||||||
|
PeerResponse::GetChain(_) => MessageID::GetChain,
|
||||||
|
PeerResponse::NewFluffyBlock(_) => MessageID::NewBlock,
|
||||||
|
PeerResponse::NewTransactions(_) => MessageID::NewFluffyBlock,
|
||||||
|
|
||||||
|
PeerResponse::NA => panic!("Can't get message ID for a non existent response"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
179
p2p/monero-peer/src/protocol/try_from.rs
Normal file
179
p2p/monero-peer/src/protocol/try_from.rs
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
//! This module contains the implementations of [`TryFrom`] and [`From`] to convert between
|
||||||
|
//! [`Message`], [`PeerRequest`] and [`PeerResponse`].
|
||||||
|
|
||||||
|
use monero_wire::{Message, ProtocolMessage, RequestMessage, ResponseMessage};
|
||||||
|
|
||||||
|
use super::{PeerRequest, PeerResponse};
|
||||||
|
|
||||||
|
pub struct MessageConversionError;
|
||||||
|
|
||||||
|
macro_rules! match_body {
|
||||||
|
(match $value: ident {$($body:tt)*} ($left:pat => $right_ty:expr) $($todo:tt)*) => {
|
||||||
|
match_body!( match $value {
|
||||||
|
$left => $right_ty,
|
||||||
|
$($body)*
|
||||||
|
} $($todo)* )
|
||||||
|
};
|
||||||
|
(match $value: ident {$($body:tt)*}) => {
|
||||||
|
match $value {
|
||||||
|
$($body)*
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! from {
|
||||||
|
($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => {
|
||||||
|
impl From<$left_ty> for $right_ty {
|
||||||
|
fn from(value: $left_ty) -> Self {
|
||||||
|
match_body!( match value {}
|
||||||
|
$(($left_ty::$left$(($val))? => $right_ty::$right$(($vall))?))+
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! try_from {
|
||||||
|
($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => {
|
||||||
|
impl TryFrom<$left_ty> for $right_ty {
|
||||||
|
type Error = MessageConversionError;
|
||||||
|
|
||||||
|
fn try_from(value: $left_ty) -> Result<Self, Self::Error> {
|
||||||
|
Ok(match_body!( match value {
|
||||||
|
_ => return Err(MessageConversionError)
|
||||||
|
}
|
||||||
|
$(($left_ty::$left$(($val))? => $right_ty::$right$(($vall))?))+
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! from_try_from {
|
||||||
|
($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => {
|
||||||
|
try_from!($left_ty, $right_ty, {$($left $(($val))? = $right $(($vall))?,)+});
|
||||||
|
from!($right_ty, $left_ty, {$($right $(($val))? = $left $(($vall))?,)+});
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! try_from_try_from {
|
||||||
|
($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => {
|
||||||
|
try_from!($left_ty, $right_ty, {$($left $(($val))? = $right $(($vall))?,)+});
|
||||||
|
try_from!($right_ty, $left_ty, {$($right $(($val))? = $left $(($val))?,)+});
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
from_try_from!(PeerRequest, RequestMessage,{
|
||||||
|
Handshake(val) = Handshake(val),
|
||||||
|
Ping = Ping,
|
||||||
|
SupportFlags = SupportFlags,
|
||||||
|
TimedSync(val) = TimedSync(val),
|
||||||
|
});
|
||||||
|
|
||||||
|
try_from_try_from!(PeerRequest, ProtocolMessage,{
|
||||||
|
NewBlock(val) = NewBlock(val),
|
||||||
|
NewFluffyBlock(val) = NewFluffyBlock(val),
|
||||||
|
GetObjects(val) = GetObjectsRequest(val),
|
||||||
|
GetChain(val) = ChainRequest(val),
|
||||||
|
NewTransactions(val) = NewTransactions(val),
|
||||||
|
FluffyMissingTxs(val) = FluffyMissingTransactionsRequest(val),
|
||||||
|
GetTxPoolCompliment(val) = GetTxPoolCompliment(val),
|
||||||
|
});
|
||||||
|
|
||||||
|
impl TryFrom<Message> for PeerRequest {
|
||||||
|
type Error = MessageConversionError;
|
||||||
|
|
||||||
|
fn try_from(value: Message) -> Result<Self, Self::Error> {
|
||||||
|
match value {
|
||||||
|
Message::Request(req) => Ok(req.into()),
|
||||||
|
Message::Protocol(pro) => pro.try_into(),
|
||||||
|
_ => Err(MessageConversionError),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<PeerRequest> for Message {
|
||||||
|
fn from(value: PeerRequest) -> Self {
|
||||||
|
match value {
|
||||||
|
PeerRequest::Handshake(val) => Message::Request(RequestMessage::Handshake(val)),
|
||||||
|
PeerRequest::Ping => Message::Request(RequestMessage::Ping),
|
||||||
|
PeerRequest::SupportFlags => Message::Request(RequestMessage::SupportFlags),
|
||||||
|
PeerRequest::TimedSync(val) => Message::Request(RequestMessage::TimedSync(val)),
|
||||||
|
|
||||||
|
PeerRequest::NewBlock(val) => Message::Protocol(ProtocolMessage::NewBlock(val)),
|
||||||
|
PeerRequest::NewFluffyBlock(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::NewFluffyBlock(val))
|
||||||
|
}
|
||||||
|
PeerRequest::GetObjects(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::GetObjectsRequest(val))
|
||||||
|
}
|
||||||
|
PeerRequest::GetChain(val) => Message::Protocol(ProtocolMessage::ChainRequest(val)),
|
||||||
|
PeerRequest::NewTransactions(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::NewTransactions(val))
|
||||||
|
}
|
||||||
|
PeerRequest::FluffyMissingTxs(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::FluffyMissingTransactionsRequest(val))
|
||||||
|
}
|
||||||
|
PeerRequest::GetTxPoolCompliment(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::GetTxPoolCompliment(val))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
from_try_from!(PeerResponse, ResponseMessage,{
|
||||||
|
Handshake(val) = Handshake(val),
|
||||||
|
Ping(val) = Ping(val),
|
||||||
|
SupportFlags(val) = SupportFlags(val),
|
||||||
|
TimedSync(val) = TimedSync(val),
|
||||||
|
});
|
||||||
|
|
||||||
|
try_from_try_from!(PeerResponse, ProtocolMessage,{
|
||||||
|
NewFluffyBlock(val) = NewFluffyBlock(val),
|
||||||
|
GetObjects(val) = GetObjectsResponse(val),
|
||||||
|
GetChain(val) = ChainEntryResponse(val),
|
||||||
|
NewTransactions(val) = NewTransactions(val),
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
impl TryFrom<Message> for PeerResponse {
|
||||||
|
type Error = MessageConversionError;
|
||||||
|
|
||||||
|
fn try_from(value: Message) -> Result<Self, Self::Error> {
|
||||||
|
match value {
|
||||||
|
Message::Response(res) => Ok(res.into()),
|
||||||
|
Message::Protocol(pro) => pro.try_into(),
|
||||||
|
_ => Err(MessageConversionError),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<PeerResponse> for Message {
|
||||||
|
type Error = MessageConversionError;
|
||||||
|
|
||||||
|
fn try_from(value: PeerResponse) -> Result<Self, Self::Error> {
|
||||||
|
Ok(match value {
|
||||||
|
PeerResponse::Handshake(val) => Message::Response(ResponseMessage::Handshake(val)),
|
||||||
|
PeerResponse::Ping(val) => Message::Response(ResponseMessage::Ping(val)),
|
||||||
|
PeerResponse::SupportFlags(val) => {
|
||||||
|
Message::Response(ResponseMessage::SupportFlags(val))
|
||||||
|
}
|
||||||
|
PeerResponse::TimedSync(val) => Message::Response(ResponseMessage::TimedSync(val)),
|
||||||
|
|
||||||
|
PeerResponse::NewFluffyBlock(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::NewFluffyBlock(val))
|
||||||
|
}
|
||||||
|
PeerResponse::GetObjects(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::GetObjectsResponse(val))
|
||||||
|
}
|
||||||
|
PeerResponse::GetChain(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::ChainEntryResponse(val))
|
||||||
|
}
|
||||||
|
PeerResponse::NewTransactions(val) => {
|
||||||
|
Message::Protocol(ProtocolMessage::NewTransactions(val))
|
||||||
|
}
|
||||||
|
|
||||||
|
PeerResponse::NA => return Err(MessageConversionError),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
61
p2p/monero-peer/src/services.rs
Normal file
61
p2p/monero-peer/src/services.rs
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
use monero_wire::PeerListEntryBase;
|
||||||
|
|
||||||
|
use crate::{NetworkAddressIncorrectZone, NetworkZone};
|
||||||
|
|
||||||
|
pub enum CoreSyncDataRequest {
|
||||||
|
Ours,
|
||||||
|
HandleIncoming(monero_wire::CoreSyncData),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum CoreSyncDataResponse {
|
||||||
|
Ours(monero_wire::CoreSyncData),
|
||||||
|
Ok,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ZoneSpecificPeerListEntryBase<Z: NetworkZone> {
|
||||||
|
pub adr: Z::Addr,
|
||||||
|
pub id: u64,
|
||||||
|
pub last_seen: i64,
|
||||||
|
pub pruning_seed: u32,
|
||||||
|
pub rpc_port: u16,
|
||||||
|
pub rpc_credits_per_hash: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Z: NetworkZone> From<ZoneSpecificPeerListEntryBase<Z>> for monero_wire::PeerListEntryBase {
|
||||||
|
fn from(value: ZoneSpecificPeerListEntryBase<Z>) -> Self {
|
||||||
|
Self {
|
||||||
|
adr: value.adr.into(),
|
||||||
|
id: value.id,
|
||||||
|
last_seen: value.last_seen,
|
||||||
|
pruning_seed: value.pruning_seed,
|
||||||
|
rpc_port: value.rpc_port,
|
||||||
|
rpc_credits_per_hash: value.rpc_credits_per_hash,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Z: NetworkZone> TryFrom<monero_wire::PeerListEntryBase> for ZoneSpecificPeerListEntryBase<Z> {
|
||||||
|
type Error = NetworkAddressIncorrectZone;
|
||||||
|
|
||||||
|
fn try_from(value: PeerListEntryBase) -> Result<Self, Self::Error> {
|
||||||
|
Ok(Self {
|
||||||
|
adr: value.adr.try_into()?,
|
||||||
|
id: value.id,
|
||||||
|
last_seen: value.last_seen,
|
||||||
|
pruning_seed: value.pruning_seed,
|
||||||
|
rpc_port: value.rpc_port,
|
||||||
|
rpc_credits_per_hash: value.rpc_credits_per_hash,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum AddressBookRequest<Z: NetworkZone> {
|
||||||
|
NewConnection(Z::Addr, ZoneSpecificPeerListEntryBase<Z>),
|
||||||
|
IncomingPeerList(Vec<ZoneSpecificPeerListEntryBase<Z>>),
|
||||||
|
GetPeers(usize),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum AddressBookResponse<Z: NetworkZone> {
|
||||||
|
Ok,
|
||||||
|
Peers(Vec<ZoneSpecificPeerListEntryBase<Z>>),
|
||||||
|
}
|
125
p2p/monero-peer/tests/handshake.rs
Normal file
125
p2p/monero-peer/tests/handshake.rs
Normal file
|
@ -0,0 +1,125 @@
|
||||||
|
use std::{net::SocketAddr, str::FromStr};
|
||||||
|
|
||||||
|
use futures::{channel::mpsc, StreamExt};
|
||||||
|
use tower::{Service, ServiceExt};
|
||||||
|
|
||||||
|
use cuprate_common::Network;
|
||||||
|
use monero_wire::{common::PeerSupportFlags, BasicNodeData};
|
||||||
|
|
||||||
|
use monero_peer::{
|
||||||
|
client::{ConnectRequest, Connector, DoHandshakeRequest, HandShaker},
|
||||||
|
network_zones::ClearNet,
|
||||||
|
ConnectionDirection,
|
||||||
|
};
|
||||||
|
|
||||||
|
use cuprate_test_utils::test_netzone::{TestNetZone, TestNetZoneAddr};
|
||||||
|
|
||||||
|
mod utils;
|
||||||
|
use utils::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn handshake_cuprate_to_cuprate() {
|
||||||
|
// Tests a Cuprate <-> Cuprate handshake by making 2 handshake services and making them talk to
|
||||||
|
// each other.
|
||||||
|
|
||||||
|
let our_basic_node_data_1 = BasicNodeData {
|
||||||
|
my_port: 0,
|
||||||
|
network_id: Network::Mainnet.network_id(),
|
||||||
|
peer_id: 87980,
|
||||||
|
// TODO: This fails if the support flags are empty (0)
|
||||||
|
support_flags: PeerSupportFlags::from(1_u32),
|
||||||
|
rpc_port: 0,
|
||||||
|
rpc_credits_per_hash: 0,
|
||||||
|
};
|
||||||
|
// make sure both node IDs are different
|
||||||
|
let mut our_basic_node_data_2 = our_basic_node_data_1.clone();
|
||||||
|
our_basic_node_data_2.peer_id = 2344;
|
||||||
|
|
||||||
|
let mut handshaker_1 = HandShaker::<TestNetZone<true, true, true>, _, _, _>::new(
|
||||||
|
DummyAddressBook,
|
||||||
|
DummyCoreSyncSvc,
|
||||||
|
DummyPeerRequestHandlerSvc,
|
||||||
|
our_basic_node_data_1,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut handshaker_2 = HandShaker::<TestNetZone<true, true, true>, _, _, _>::new(
|
||||||
|
DummyAddressBook,
|
||||||
|
DummyCoreSyncSvc,
|
||||||
|
DummyPeerRequestHandlerSvc,
|
||||||
|
our_basic_node_data_2,
|
||||||
|
);
|
||||||
|
|
||||||
|
let (p1_sender, p2_receiver) = mpsc::channel(5);
|
||||||
|
let (p2_sender, p1_receiver) = mpsc::channel(5);
|
||||||
|
|
||||||
|
let p1_handshake_req = DoHandshakeRequest {
|
||||||
|
addr: TestNetZoneAddr(888),
|
||||||
|
peer_stream: p2_receiver.map(Ok).boxed(),
|
||||||
|
peer_sink: p2_sender.into(),
|
||||||
|
direction: ConnectionDirection::OutBound,
|
||||||
|
};
|
||||||
|
|
||||||
|
let p2_handshake_req = DoHandshakeRequest {
|
||||||
|
addr: TestNetZoneAddr(444),
|
||||||
|
peer_stream: p1_receiver.boxed().map(Ok).boxed(),
|
||||||
|
peer_sink: p1_sender.into(),
|
||||||
|
direction: ConnectionDirection::InBound,
|
||||||
|
};
|
||||||
|
|
||||||
|
let p1 = tokio::spawn(async move {
|
||||||
|
handshaker_1
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(p1_handshake_req)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
|
let p2 = tokio::spawn(async move {
|
||||||
|
handshaker_2
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(p2_handshake_req)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
|
let (res1, res2) = futures::join!(p1, p2);
|
||||||
|
res1.unwrap();
|
||||||
|
res2.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn handshake() {
|
||||||
|
let addr = "127.0.0.1:18080";
|
||||||
|
|
||||||
|
let our_basic_node_data = BasicNodeData {
|
||||||
|
my_port: 0,
|
||||||
|
network_id: Network::Mainnet.network_id(),
|
||||||
|
peer_id: 87980,
|
||||||
|
support_flags: PeerSupportFlags::from(1_u32),
|
||||||
|
rpc_port: 0,
|
||||||
|
rpc_credits_per_hash: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
let handshaker = HandShaker::<ClearNet, _, _, _>::new(
|
||||||
|
DummyAddressBook,
|
||||||
|
DummyCoreSyncSvc,
|
||||||
|
DummyPeerRequestHandlerSvc,
|
||||||
|
our_basic_node_data,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut connector = Connector::new(handshaker);
|
||||||
|
|
||||||
|
connector
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(ConnectRequest {
|
||||||
|
addr: SocketAddr::from_str(addr).unwrap(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
95
p2p/monero-peer/tests/utils.rs
Normal file
95
p2p/monero-peer/tests/utils.rs
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
use std::{
|
||||||
|
future::Future,
|
||||||
|
pin::Pin,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use tower::Service;
|
||||||
|
|
||||||
|
use monero_peer::{
|
||||||
|
services::{
|
||||||
|
AddressBookRequest, AddressBookResponse, CoreSyncDataRequest, CoreSyncDataResponse,
|
||||||
|
},
|
||||||
|
NetworkZone, PeerRequest, PeerResponse,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DummyAddressBook;
|
||||||
|
|
||||||
|
impl<Z: NetworkZone> Service<AddressBookRequest<Z>> for DummyAddressBook {
|
||||||
|
type Response = AddressBookResponse<Z>;
|
||||||
|
type Error = tower::BoxError;
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: AddressBookRequest<Z>) -> Self::Future {
|
||||||
|
async move {
|
||||||
|
Ok(match req {
|
||||||
|
AddressBookRequest::GetPeers(_) => AddressBookResponse::Peers(vec![]),
|
||||||
|
_ => AddressBookResponse::Ok,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DummyCoreSyncSvc;
|
||||||
|
|
||||||
|
impl Service<CoreSyncDataRequest> for DummyCoreSyncSvc {
|
||||||
|
type Response = CoreSyncDataResponse;
|
||||||
|
type Error = tower::BoxError;
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: CoreSyncDataRequest) -> Self::Future {
|
||||||
|
async move {
|
||||||
|
match req {
|
||||||
|
CoreSyncDataRequest::Ours => {
|
||||||
|
Ok(CoreSyncDataResponse::Ours(monero_wire::CoreSyncData {
|
||||||
|
cumulative_difficulty: 1,
|
||||||
|
cumulative_difficulty_top64: 0,
|
||||||
|
current_height: 1,
|
||||||
|
pruning_seed: 0,
|
||||||
|
top_id: hex::decode(
|
||||||
|
"418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3",
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.try_into()
|
||||||
|
.unwrap(),
|
||||||
|
top_version: 1,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
CoreSyncDataRequest::HandleIncoming(_) => Ok(CoreSyncDataResponse::Ok),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DummyPeerRequestHandlerSvc;
|
||||||
|
|
||||||
|
impl Service<PeerRequest> for DummyPeerRequestHandlerSvc {
|
||||||
|
type Response = PeerResponse;
|
||||||
|
type Error = tower::BoxError;
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: PeerRequest) -> Self::Future {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,120 +1,157 @@
|
||||||
|
//! Cuprate Address Book
|
||||||
|
//!
|
||||||
|
//! This module holds the logic for persistent peer storage.
|
||||||
|
//! Cuprates address book is modeled as a [`tower::Service`]
|
||||||
|
//! The request is [`AddressBookRequest`] and the response is
|
||||||
|
//! [`AddressBookResponse`].
|
||||||
|
//!
|
||||||
|
//! Cuprate, like monerod, actually has 3 address books, one
|
||||||
|
//! for each [`NetZone`]. This is to reduce the possibility of
|
||||||
|
//! clear net peers getting linked to their dark counterparts
|
||||||
|
//! and so peers will only get told about peers they can
|
||||||
|
//! connect to.
|
||||||
|
//!
|
||||||
|
|
||||||
mod addr_book_client;
|
mod addr_book_client;
|
||||||
pub(crate) mod address_book;
|
mod address_book;
|
||||||
|
pub mod connection_handle;
|
||||||
|
|
||||||
|
use cuprate_common::PruningSeed;
|
||||||
|
use monero_wire::{messages::PeerListEntryBase, network_address::NetZone, NetworkAddress, PeerID};
|
||||||
|
|
||||||
|
use connection_handle::ConnectionAddressBookHandle;
|
||||||
|
|
||||||
pub use addr_book_client::start_address_book;
|
pub use addr_book_client::start_address_book;
|
||||||
|
|
||||||
use monero_wire::{messages::PeerListEntryBase, network_address::NetZone, NetworkAddress};
|
/// Possible errors when dealing with the address book.
|
||||||
|
/// This is boxed when returning an error in the [`tower::Service`].
|
||||||
const MAX_WHITE_LIST_PEERS: usize = 1000;
|
|
||||||
const MAX_GRAY_LIST_PEERS: usize = 5000;
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
pub enum AddressBookError {
|
pub enum AddressBookError {
|
||||||
|
/// The peer is not in the address book for this zone.
|
||||||
#[error("Peer was not found in book")]
|
#[error("Peer was not found in book")]
|
||||||
PeerNotFound,
|
PeerNotFound,
|
||||||
|
/// The peer list is empty.
|
||||||
#[error("The peer list is empty")]
|
#[error("The peer list is empty")]
|
||||||
PeerListEmpty,
|
PeerListEmpty,
|
||||||
|
/// The peers pruning seed has changed.
|
||||||
|
#[error("The peers pruning seed has changed")]
|
||||||
|
PeersPruningSeedChanged,
|
||||||
|
/// The peer is banned.
|
||||||
|
#[error("The peer is banned")]
|
||||||
|
PeerIsBanned,
|
||||||
|
/// When handling a received peer list, the list contains
|
||||||
|
/// a peer in a different [`NetZone`]
|
||||||
#[error("Peer sent an address out of it's net-zone")]
|
#[error("Peer sent an address out of it's net-zone")]
|
||||||
PeerSentAnAddressOutOfZone,
|
PeerSentAnAddressOutOfZone,
|
||||||
|
/// The channel to the address book has closed unexpectedly.
|
||||||
#[error("The address books channel has closed.")]
|
#[error("The address books channel has closed.")]
|
||||||
AddressBooksChannelClosed,
|
AddressBooksChannelClosed,
|
||||||
|
/// The address book task has exited.
|
||||||
|
#[error("The address book task has exited.")]
|
||||||
|
AddressBookTaskExited,
|
||||||
|
/// The peer file store has failed.
|
||||||
#[error("Peer Store Error: {0}")]
|
#[error("Peer Store Error: {0}")]
|
||||||
PeerStoreError(&'static str),
|
PeerStoreError(&'static str),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A message sent to tell the address book that a peer has disconnected.
|
||||||
|
pub struct PeerConnectionClosed;
|
||||||
|
|
||||||
|
/// A request to the address book.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum AddressBookRequest {
|
pub enum AddressBookRequest {
|
||||||
|
/// A request to handle an incoming peer list.
|
||||||
HandleNewPeerList(Vec<PeerListEntryBase>, NetZone),
|
HandleNewPeerList(Vec<PeerListEntryBase>, NetZone),
|
||||||
SetPeerSeen(NetworkAddress, i64),
|
/// Updates the `last_seen` timestamp of this peer.
|
||||||
BanPeer(NetworkAddress, chrono::NaiveDateTime),
|
SetPeerSeen(PeerID, chrono::NaiveDateTime, NetZone),
|
||||||
AddPeerToAnchor(NetworkAddress),
|
/// Bans a peer for the specified duration. This request
|
||||||
RemovePeerFromAnchor(NetworkAddress),
|
/// will send disconnect signals to all peers with the same
|
||||||
UpdatePeerInfo(PeerListEntryBase),
|
/// [`ban_identifier`](NetworkAddress::ban_identifier).
|
||||||
|
BanPeer(PeerID, std::time::Duration, NetZone),
|
||||||
|
/// Adds a peer to the connected list
|
||||||
|
ConnectedToPeer {
|
||||||
|
/// The net zone of this connection.
|
||||||
|
zone: NetZone,
|
||||||
|
/// A handle between the connection and address book.
|
||||||
|
connection_handle: ConnectionAddressBookHandle,
|
||||||
|
/// The connection addr, None if the peer is using a
|
||||||
|
/// hidden network.
|
||||||
|
addr: Option<NetworkAddress>,
|
||||||
|
/// The peers id.
|
||||||
|
id: PeerID,
|
||||||
|
/// If the peer is reachable by our node.
|
||||||
|
reachable: bool,
|
||||||
|
/// The last seen timestamp, note: Cuprate may skip updating this
|
||||||
|
/// field on some inbound messages
|
||||||
|
last_seen: chrono::NaiveDateTime,
|
||||||
|
/// The peers pruning seed
|
||||||
|
pruning_seed: PruningSeed,
|
||||||
|
/// The peers port.
|
||||||
|
rpc_port: u16,
|
||||||
|
/// The peers rpc credits per hash
|
||||||
|
rpc_credits_per_hash: u32,
|
||||||
|
},
|
||||||
|
|
||||||
GetRandomGrayPeer(NetZone),
|
/// A request to get and eempty the anchor list,
|
||||||
GetRandomWhitePeer(NetZone),
|
/// used when starting the node.
|
||||||
|
GetAndEmptyAnchorList(NetZone),
|
||||||
|
/// Get a random Gray peer from the peer list
|
||||||
|
/// If a pruning seed is given we will select from
|
||||||
|
/// peers with that seed and peers that dont prune.
|
||||||
|
GetRandomGrayPeer(NetZone, Option<PruningSeed>),
|
||||||
|
/// Get a random White peer from the peer list
|
||||||
|
/// If a pruning seed is given we will select from
|
||||||
|
/// peers with that seed and peers that dont prune.
|
||||||
|
GetRandomWhitePeer(NetZone, Option<PruningSeed>),
|
||||||
|
/// Get a list of random peers from the white list,
|
||||||
|
/// The list will be less than or equal to the provided
|
||||||
|
/// len.
|
||||||
|
GetRandomWhitePeers(NetZone, usize),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for AddressBookRequest {
|
impl std::fmt::Display for AddressBookRequest {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
Self::HandleNewPeerList(_, _) => f.write_str("HandleNewPeerList"),
|
Self::HandleNewPeerList(..) => f.write_str("HandleNewPeerList"),
|
||||||
Self::SetPeerSeen(_, _) => f.write_str("SetPeerSeen"),
|
Self::SetPeerSeen(..) => f.write_str("SetPeerSeen"),
|
||||||
Self::BanPeer(_, _) => f.write_str("BanPeer"),
|
Self::BanPeer(..) => f.write_str("BanPeer"),
|
||||||
Self::AddPeerToAnchor(_) => f.write_str("AddPeerToAnchor"),
|
Self::ConnectedToPeer { .. } => f.write_str("ConnectedToPeer"),
|
||||||
Self::RemovePeerFromAnchor(_) => f.write_str("RemovePeerFromAnchor"),
|
|
||||||
Self::UpdatePeerInfo(_) => f.write_str("UpdatePeerInfo"),
|
Self::GetAndEmptyAnchorList(_) => f.write_str("GetAndEmptyAnchorList"),
|
||||||
Self::GetRandomGrayPeer(_) => f.write_str("GetRandomGrayPeer"),
|
Self::GetRandomGrayPeer(..) => f.write_str("GetRandomGrayPeer"),
|
||||||
Self::GetRandomWhitePeer(_) => f.write_str("GetRandomWhitePeer"),
|
Self::GetRandomWhitePeer(..) => f.write_str("GetRandomWhitePeer"),
|
||||||
|
Self::GetRandomWhitePeers(_, len) => {
|
||||||
|
f.write_str(&format!("GetRandomWhitePeers, len: {len}"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AddressBookRequest {
|
impl AddressBookRequest {
|
||||||
|
/// Gets the [`NetZone`] for this request so we can
|
||||||
|
/// route it to the required address book.
|
||||||
pub fn get_zone(&self) -> NetZone {
|
pub fn get_zone(&self) -> NetZone {
|
||||||
match self {
|
match self {
|
||||||
Self::HandleNewPeerList(_, zone) => *zone,
|
Self::HandleNewPeerList(_, zone) => *zone,
|
||||||
Self::SetPeerSeen(peer, _) => peer.get_zone(),
|
Self::SetPeerSeen(.., zone) => *zone,
|
||||||
Self::BanPeer(peer, _) => peer.get_zone(),
|
Self::BanPeer(.., zone) => *zone,
|
||||||
Self::AddPeerToAnchor(peer) => peer.get_zone(),
|
Self::ConnectedToPeer { zone, .. } => *zone,
|
||||||
Self::RemovePeerFromAnchor(peer) => peer.get_zone(),
|
|
||||||
Self::UpdatePeerInfo(peer) => peer.adr.get_zone(),
|
|
||||||
|
|
||||||
Self::GetRandomGrayPeer(zone) => *zone,
|
Self::GetAndEmptyAnchorList(zone) => *zone,
|
||||||
Self::GetRandomWhitePeer(zone) => *zone,
|
Self::GetRandomGrayPeer(zone, _) => *zone,
|
||||||
|
Self::GetRandomWhitePeer(zone, _) => *zone,
|
||||||
|
Self::GetRandomWhitePeers(zone, _) => *zone,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A response from the AddressBook.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum AddressBookResponse {
|
pub enum AddressBookResponse {
|
||||||
|
/// The request was handled ok.
|
||||||
Ok,
|
Ok,
|
||||||
|
/// A peer.
|
||||||
Peer(PeerListEntryBase),
|
Peer(PeerListEntryBase),
|
||||||
}
|
/// A list of peers.
|
||||||
|
Peers(Vec<PeerListEntryBase>),
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct AddressBookConfig {
|
|
||||||
max_white_peers: usize,
|
|
||||||
max_gray_peers: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for AddressBookConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
AddressBookConfig {
|
|
||||||
max_white_peers: MAX_WHITE_LIST_PEERS,
|
|
||||||
max_gray_peers: MAX_GRAY_LIST_PEERS,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait AddressBookStore: Clone {
|
|
||||||
type Error: Into<AddressBookError>;
|
|
||||||
/// Loads the peers from the peer store.
|
|
||||||
/// returns (in order):
|
|
||||||
/// the white list,
|
|
||||||
/// the gray list,
|
|
||||||
/// the anchor list,
|
|
||||||
/// the ban list
|
|
||||||
async fn load_peers(
|
|
||||||
&mut self,
|
|
||||||
zone: NetZone,
|
|
||||||
) -> Result<
|
|
||||||
(
|
|
||||||
Vec<PeerListEntryBase>, // white list
|
|
||||||
Vec<PeerListEntryBase>, // gray list
|
|
||||||
Vec<NetworkAddress>, // anchor list
|
|
||||||
Vec<(NetworkAddress, chrono::NaiveDateTime)>, // ban list
|
|
||||||
),
|
|
||||||
Self::Error,
|
|
||||||
>;
|
|
||||||
|
|
||||||
async fn save_peers(
|
|
||||||
&mut self,
|
|
||||||
zone: NetZone,
|
|
||||||
white: Vec<PeerListEntryBase>,
|
|
||||||
gray: Vec<PeerListEntryBase>,
|
|
||||||
anchor: Vec<NetworkAddress>,
|
|
||||||
bans: Vec<(NetworkAddress, chrono::NaiveDateTime)>, // ban lists
|
|
||||||
) -> Result<(), Self::Error>;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,38 +1,44 @@
|
||||||
|
//! This module holds the address books client and [`tower::Service`].
|
||||||
|
//!
|
||||||
|
//! To start the address book use [`start_address_book`].
|
||||||
|
// TODO: Store banned peers persistently.
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
use std::task::Poll;
|
||||||
|
|
||||||
use futures::channel::{mpsc, oneshot};
|
use futures::channel::{mpsc, oneshot};
|
||||||
use futures::FutureExt;
|
use futures::FutureExt;
|
||||||
use tokio::task::spawn;
|
use tokio::task::{spawn, JoinHandle};
|
||||||
use tower::steer::Steer;
|
use tower::steer::Steer;
|
||||||
|
use tower::BoxError;
|
||||||
|
use tracing::Instrument;
|
||||||
|
|
||||||
use monero_wire::network_address::NetZone;
|
use monero_wire::network_address::NetZone;
|
||||||
|
|
||||||
use super::address_book::{AddressBook, AddressBookClientRequest};
|
use crate::{Config, P2PStore};
|
||||||
use super::{
|
|
||||||
AddressBookConfig, AddressBookError, AddressBookRequest, AddressBookResponse, AddressBookStore,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
use super::address_book::{AddressBook, AddressBookClientRequest};
|
||||||
|
use super::{AddressBookError, AddressBookRequest, AddressBookResponse};
|
||||||
|
|
||||||
|
/// Start the address book.
|
||||||
|
/// Under the hood this function spawns 3 address books
|
||||||
|
/// for the 3 [`NetZone`] and combines them into a [`tower::Steer`](Steer).
|
||||||
pub async fn start_address_book<S>(
|
pub async fn start_address_book<S>(
|
||||||
peer_store: S,
|
peer_store: S,
|
||||||
config: AddressBookConfig,
|
config: Config,
|
||||||
) -> Result<
|
) -> Result<
|
||||||
impl tower::Service<
|
impl tower::Service<
|
||||||
AddressBookRequest,
|
AddressBookRequest,
|
||||||
Response = AddressBookResponse,
|
Response = AddressBookResponse,
|
||||||
Error = AddressBookError,
|
Error = BoxError,
|
||||||
Future = Pin<
|
Future = Pin<
|
||||||
Box<
|
Box<dyn Future<Output = Result<AddressBookResponse, BoxError>> + Send + 'static>,
|
||||||
dyn Future<Output = Result<AddressBookResponse, AddressBookError>>
|
>,
|
||||||
+ Send
|
>,
|
||||||
+ 'static,
|
BoxError,
|
||||||
>,
|
|
||||||
>,
|
|
||||||
> + Clone,
|
|
||||||
AddressBookError,
|
|
||||||
>
|
>
|
||||||
where
|
where
|
||||||
S: AddressBookStore,
|
S: P2PStore,
|
||||||
{
|
{
|
||||||
let mut builder = AddressBookBuilder::new(peer_store, config);
|
let mut builder = AddressBookBuilder::new(peer_store, config);
|
||||||
|
|
||||||
|
@ -40,11 +46,13 @@ where
|
||||||
let tor = builder.build(NetZone::Tor).await?;
|
let tor = builder.build(NetZone::Tor).await?;
|
||||||
let i2p = builder.build(NetZone::I2p).await?;
|
let i2p = builder.build(NetZone::I2p).await?;
|
||||||
|
|
||||||
|
// This list MUST be in the same order as closuer in the `Steer` func
|
||||||
let books = vec![public, tor, i2p];
|
let books = vec![public, tor, i2p];
|
||||||
|
|
||||||
Ok(Steer::new(
|
Ok(Steer::new(
|
||||||
books,
|
books,
|
||||||
|req: &AddressBookRequest, _: &[_]| match req.get_zone() {
|
|req: &AddressBookRequest, _: &[_]| match req.get_zone() {
|
||||||
|
// This:
|
||||||
NetZone::Public => 0,
|
NetZone::Public => 0,
|
||||||
NetZone::Tor => 1,
|
NetZone::Tor => 1,
|
||||||
NetZone::I2p => 2,
|
NetZone::I2p => 2,
|
||||||
|
@ -52,68 +60,105 @@ where
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct AddressBookBuilder<S> {
|
/// An address book builder.
|
||||||
|
/// This:
|
||||||
|
/// - starts the address book
|
||||||
|
/// - creates and returns the `AddressBookClient`
|
||||||
|
struct AddressBookBuilder<S> {
|
||||||
peer_store: S,
|
peer_store: S,
|
||||||
config: AddressBookConfig,
|
config: Config,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S> AddressBookBuilder<S>
|
impl<S> AddressBookBuilder<S>
|
||||||
where
|
where
|
||||||
S: AddressBookStore,
|
S: P2PStore,
|
||||||
{
|
{
|
||||||
fn new(peer_store: S, config: AddressBookConfig) -> Self {
|
fn new(peer_store: S, config: Config) -> Self {
|
||||||
AddressBookBuilder { peer_store, config }
|
AddressBookBuilder { peer_store, config }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Builds the address book for a specific [`NetZone`]
|
||||||
async fn build(&mut self, zone: NetZone) -> Result<AddressBookClient, AddressBookError> {
|
async fn build(&mut self, zone: NetZone) -> Result<AddressBookClient, AddressBookError> {
|
||||||
let (white, gray, anchor, bans) =
|
let (white, gray, anchor) = self
|
||||||
self.peer_store.load_peers(zone).await.map_err(Into::into)?;
|
.peer_store
|
||||||
|
.load_peers(zone)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AddressBookError::PeerStoreError(e))?;
|
||||||
|
|
||||||
let book = AddressBook::new(self.config.clone(), zone, white, gray, anchor, bans);
|
let book = AddressBook::new(
|
||||||
|
self.config.clone(),
|
||||||
|
zone,
|
||||||
|
white,
|
||||||
|
gray,
|
||||||
|
anchor,
|
||||||
|
vec![],
|
||||||
|
self.peer_store.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
let (tx, rx) = mpsc::channel(5);
|
let (tx, rx) = mpsc::channel(0);
|
||||||
|
|
||||||
spawn(book.run(rx));
|
let book_span = tracing::info_span!("AddressBook", book = book.book_name());
|
||||||
|
|
||||||
Ok(AddressBookClient { book: tx })
|
let book_handle = spawn(book.run(rx).instrument(book_span));
|
||||||
|
|
||||||
|
Ok(AddressBookClient {
|
||||||
|
book: tx,
|
||||||
|
book_handle,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
/// The Client for an individual address book.
|
||||||
|
#[derive(Debug)]
|
||||||
struct AddressBookClient {
|
struct AddressBookClient {
|
||||||
|
/// The channel to pass requests to the address book.
|
||||||
book: mpsc::Sender<AddressBookClientRequest>,
|
book: mpsc::Sender<AddressBookClientRequest>,
|
||||||
|
/// The address book task handle.
|
||||||
|
book_handle: JoinHandle<()>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl tower::Service<AddressBookRequest> for AddressBookClient {
|
impl tower::Service<AddressBookRequest> for AddressBookClient {
|
||||||
type Error = AddressBookError;
|
|
||||||
type Response = AddressBookResponse;
|
type Response = AddressBookResponse;
|
||||||
|
type Error = BoxError;
|
||||||
type Future =
|
type Future =
|
||||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
fn poll_ready(
|
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
&mut self,
|
// Check the channel
|
||||||
cx: &mut std::task::Context<'_>,
|
match self.book.poll_ready(cx) {
|
||||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
Poll::Pending => return Poll::Pending,
|
||||||
self.book
|
Poll::Ready(Ok(())) => (),
|
||||||
.poll_ready(cx)
|
Poll::Ready(Err(_)) => {
|
||||||
.map_err(|_| AddressBookError::AddressBooksChannelClosed)
|
return Poll::Ready(Err(AddressBookError::AddressBooksChannelClosed.into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the address book task is still running
|
||||||
|
match self.book_handle.poll_unpin(cx) {
|
||||||
|
// The address book is still running
|
||||||
|
Poll::Pending => Poll::Ready(Ok(())),
|
||||||
|
// The address book task has exited
|
||||||
|
Poll::Ready(_) => Err(AddressBookError::AddressBookTaskExited)?,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&mut self, req: AddressBookRequest) -> Self::Future {
|
fn call(&mut self, req: AddressBookRequest) -> Self::Future {
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
// get the callers span
|
// get the callers span
|
||||||
let span = tracing::span::Span::current();
|
let span = tracing::debug_span!(parent: &tracing::span::Span::current(), "AddressBook");
|
||||||
|
|
||||||
let req = AddressBookClientRequest { req, tx, span };
|
let req = AddressBookClientRequest { req, tx, span };
|
||||||
|
|
||||||
match self.book.try_send(req) {
|
match self.book.try_send(req) {
|
||||||
Err(_e) => {
|
Err(_e) => {
|
||||||
// I'm assuming all callers will call `poll_ready` first (which they are supposed to)
|
// I'm assuming all callers will call `poll_ready` first (which they are supposed to)
|
||||||
futures::future::ready(Err(AddressBookError::AddressBooksChannelClosed)).boxed()
|
futures::future::ready(Err(AddressBookError::AddressBooksChannelClosed.into()))
|
||||||
|
.boxed()
|
||||||
}
|
}
|
||||||
Ok(()) => async move {
|
Ok(()) => async move {
|
||||||
rx.await
|
rx.await
|
||||||
.expect("Address Book will not drop requests until completed")
|
.expect("Address Book will not drop requests until completed")
|
||||||
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
.boxed(),
|
.boxed(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,70 +1,145 @@
|
||||||
|
//! This module contains the actual address book logic.
|
||||||
|
//!
|
||||||
|
//! The address book is split into multiple [`PeerList`]:
|
||||||
|
//!
|
||||||
|
//! - A White list: For peers we have connected to ourselves.
|
||||||
|
//!
|
||||||
|
//! - A Gray list: For Peers we have been told about but
|
||||||
|
//! haven't connected to ourselves.
|
||||||
|
//!
|
||||||
|
//! - An Anchor list: This holds peers we are currently
|
||||||
|
//! connected to that are reachable if we were to
|
||||||
|
//! connect to them again. For example an inbound proxy
|
||||||
|
//! connection would not get added to this list as we cant
|
||||||
|
//! connect to this peer ourselves. Behind the scenes we
|
||||||
|
//! are just storing the key to a peer in the white list.
|
||||||
|
//!
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::future::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use futures::stream::FuturesUnordered;
|
||||||
use futures::{
|
use futures::{
|
||||||
channel::{mpsc, oneshot},
|
channel::{mpsc, oneshot},
|
||||||
StreamExt,
|
FutureExt, Stream, StreamExt,
|
||||||
};
|
};
|
||||||
use rand::{Rng, SeedableRng};
|
use pin_project::pin_project;
|
||||||
use std::time::Duration;
|
use rand::prelude::SliceRandom;
|
||||||
|
|
||||||
|
use cuprate_common::shutdown::is_shutting_down;
|
||||||
use cuprate_common::PruningSeed;
|
use cuprate_common::PruningSeed;
|
||||||
use monero_wire::{messages::PeerListEntryBase, network_address::NetZone, NetworkAddress};
|
use monero_wire::{messages::PeerListEntryBase, network_address::NetZone, NetworkAddress, PeerID};
|
||||||
|
|
||||||
use super::{AddressBookConfig, AddressBookError, AddressBookRequest, AddressBookResponse};
|
use super::{AddressBookError, AddressBookRequest, AddressBookResponse};
|
||||||
|
use crate::address_book::connection_handle::ConnectionAddressBookHandle;
|
||||||
|
use crate::{constants::ADDRESS_BOOK_SAVE_INTERVAL, Config, P2PStore};
|
||||||
|
|
||||||
mod peer_list;
|
mod peer_list;
|
||||||
use peer_list::PeerList;
|
use peer_list::PeerList;
|
||||||
|
|
||||||
pub(crate) struct AddressBookClientRequest {
|
#[cfg(test)]
|
||||||
pub req: AddressBookRequest,
|
mod tests;
|
||||||
pub tx: oneshot::Sender<Result<AddressBookResponse, AddressBookError>>,
|
|
||||||
|
|
||||||
|
/// A request sent to the address book task.
|
||||||
|
pub(crate) struct AddressBookClientRequest {
|
||||||
|
/// The request
|
||||||
|
pub req: AddressBookRequest,
|
||||||
|
/// A oneshot to send the result down
|
||||||
|
pub tx: oneshot::Sender<Result<AddressBookResponse, AddressBookError>>,
|
||||||
|
/// The tracing span to keep the context of the request
|
||||||
pub span: tracing::Span,
|
pub span: tracing::Span,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct AddressBook {
|
/// An entry in the connected list.
|
||||||
zone: NetZone,
|
pub struct ConnectionPeerEntry {
|
||||||
config: AddressBookConfig,
|
/// A oneshot sent from the Connection when it has finished.
|
||||||
white_list: PeerList,
|
connection_handle: ConnectionAddressBookHandle,
|
||||||
gray_list: PeerList,
|
/// The connection addr, None if the peer is connected through
|
||||||
anchor_list: HashSet<NetworkAddress>,
|
/// a hidden network.
|
||||||
|
addr: Option<NetworkAddress>,
|
||||||
baned_peers: HashMap<NetworkAddress, chrono::NaiveDateTime>,
|
/// If the peer is reachable by our node.
|
||||||
|
reachable: bool,
|
||||||
rng: rand::rngs::StdRng,
|
/// The last seen timestamp, note: Cuprate may skip updating this
|
||||||
//banned_subnets:,
|
/// field on some inbound messages
|
||||||
|
last_seen: chrono::NaiveDateTime,
|
||||||
|
/// The peers pruning seed
|
||||||
|
pruning_seed: PruningSeed,
|
||||||
|
/// The peers port.
|
||||||
|
rpc_port: u16,
|
||||||
|
/// The peers rpc credits per hash
|
||||||
|
rpc_credits_per_hash: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AddressBook {
|
/// A future that resolves when a peer is unbanned.
|
||||||
|
#[pin_project(project = EnumProj)]
|
||||||
|
pub struct BanedPeerFut(Vec<u8>, #[pin] tokio::time::Sleep);
|
||||||
|
|
||||||
|
impl Future for BanedPeerFut {
|
||||||
|
type Output = Vec<u8>;
|
||||||
|
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||||
|
let mut this = self.project();
|
||||||
|
match this.1.poll_unpin(cx) {
|
||||||
|
Poll::Pending => Poll::Pending,
|
||||||
|
Poll::Ready(_) => Poll::Ready(this.0.clone()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The address book for a specific [`NetZone`]
|
||||||
|
pub struct AddressBook<PeerStore> {
|
||||||
|
/// The [`NetZone`] of this address book.
|
||||||
|
zone: NetZone,
|
||||||
|
/// A copy of the nodes configuration.
|
||||||
|
config: Config,
|
||||||
|
/// The Address books white list.
|
||||||
|
white_list: PeerList,
|
||||||
|
/// The Address books gray list.
|
||||||
|
gray_list: PeerList,
|
||||||
|
/// The Address books anchor list.
|
||||||
|
anchor_list: HashSet<NetworkAddress>,
|
||||||
|
/// The Currently connected peers.
|
||||||
|
connected_peers: HashMap<PeerID, ConnectionPeerEntry>,
|
||||||
|
/// A tuple of:
|
||||||
|
/// - A hashset of [`ban_identifier`](NetworkAddress::ban_identifier)
|
||||||
|
/// - A [`FuturesUnordered`] which contains futures for every ban_id
|
||||||
|
/// that will resolve when the ban_id should be un banned.
|
||||||
|
baned_peers: (HashSet<Vec<u8>>, FuturesUnordered<BanedPeerFut>),
|
||||||
|
/// The peer store to save the peers to persistent storage
|
||||||
|
p2p_store: PeerStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<PeerStore: P2PStore> AddressBook<PeerStore> {
|
||||||
|
/// Creates a new address book for a given [`NetZone`]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
config: AddressBookConfig,
|
config: Config,
|
||||||
zone: NetZone,
|
zone: NetZone,
|
||||||
white_peers: Vec<PeerListEntryBase>,
|
white_peers: Vec<PeerListEntryBase>,
|
||||||
gray_peers: Vec<PeerListEntryBase>,
|
gray_peers: Vec<PeerListEntryBase>,
|
||||||
anchor_peers: Vec<NetworkAddress>,
|
anchor_peers: Vec<NetworkAddress>,
|
||||||
baned_peers: Vec<(NetworkAddress, chrono::NaiveDateTime)>,
|
baned_peers: Vec<(NetworkAddress, chrono::NaiveDateTime)>,
|
||||||
) -> AddressBook {
|
p2p_store: PeerStore,
|
||||||
let rng = rand::prelude::StdRng::from_entropy();
|
) -> Self {
|
||||||
let white_list = PeerList::new(white_peers);
|
let white_list = PeerList::new(white_peers);
|
||||||
let gray_list = PeerList::new(gray_peers);
|
let gray_list = PeerList::new(gray_peers);
|
||||||
let anchor_list = HashSet::from_iter(anchor_peers);
|
let anchor_list = HashSet::from_iter(anchor_peers);
|
||||||
let baned_peers = HashMap::from_iter(baned_peers);
|
let baned_peers = (HashSet::new(), FuturesUnordered::new());
|
||||||
|
|
||||||
let mut book = AddressBook {
|
let connected_peers = HashMap::new();
|
||||||
|
|
||||||
|
AddressBook {
|
||||||
zone,
|
zone,
|
||||||
config,
|
config,
|
||||||
white_list,
|
white_list,
|
||||||
gray_list,
|
gray_list,
|
||||||
anchor_list,
|
anchor_list,
|
||||||
|
connected_peers,
|
||||||
baned_peers,
|
baned_peers,
|
||||||
rng,
|
p2p_store,
|
||||||
};
|
}
|
||||||
|
|
||||||
book.check_unban_peers();
|
|
||||||
|
|
||||||
book
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the books name (Based on the [`NetZone`])
|
||||||
pub const fn book_name(&self) -> &'static str {
|
pub const fn book_name(&self) -> &'static str {
|
||||||
match self.zone {
|
match self.zone {
|
||||||
NetZone::Public => "PublicAddressBook",
|
NetZone::Public => "PublicAddressBook",
|
||||||
|
@ -73,80 +148,137 @@ impl AddressBook {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the length of the white list
|
||||||
fn len_white_list(&self) -> usize {
|
fn len_white_list(&self) -> usize {
|
||||||
self.white_list.len()
|
self.white_list.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the length of the gray list
|
||||||
fn len_gray_list(&self) -> usize {
|
fn len_gray_list(&self) -> usize {
|
||||||
self.gray_list.len()
|
self.gray_list.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the length of the anchor list
|
||||||
|
fn len_anchor_list(&self) -> usize {
|
||||||
|
self.anchor_list.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the length of the banned list
|
||||||
|
fn len_banned_list(&self) -> usize {
|
||||||
|
self.baned_peers.0.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the maximum length of the white list
|
||||||
|
/// *note this list can grow bigger if we are connected to more
|
||||||
|
/// than this amount.
|
||||||
fn max_white_peers(&self) -> usize {
|
fn max_white_peers(&self) -> usize {
|
||||||
self.config.max_white_peers
|
self.config.max_white_peers()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the maximum length of the gray list
|
||||||
fn max_gray_peers(&self) -> usize {
|
fn max_gray_peers(&self) -> usize {
|
||||||
self.config.max_gray_peers
|
self.config.max_gray_peers()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks if a peer is banned.
|
||||||
fn is_peer_banned(&self, peer: &NetworkAddress) -> bool {
|
fn is_peer_banned(&self, peer: &NetworkAddress) -> bool {
|
||||||
self.baned_peers.contains_key(peer)
|
self.baned_peers.0.contains(&peer.ban_identifier())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks if banned peers should be unbanned as the duration has elapsed
|
||||||
fn check_unban_peers(&mut self) {
|
fn check_unban_peers(&mut self) {
|
||||||
let mut now = chrono::Utc::now().naive_utc();
|
while let Some(Some(addr)) = Pin::new(&mut self.baned_peers.1).next().now_or_never() {
|
||||||
self.baned_peers.retain(|_, time| time > &mut now)
|
tracing::debug!("Unbanning peer: {addr:?}");
|
||||||
}
|
self.baned_peers.0.remove(&addr);
|
||||||
|
|
||||||
fn ban_peer(&mut self, peer: NetworkAddress, till: chrono::NaiveDateTime) {
|
|
||||||
let now = chrono::Utc::now().naive_utc();
|
|
||||||
if now > till {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tracing::debug!("Banning peer: {peer:?} until: {till}");
|
|
||||||
|
|
||||||
self.baned_peers.insert(peer, till);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_peer_to_anchor(&mut self, peer: NetworkAddress) -> Result<(), AddressBookError> {
|
/// Checks if peers have disconnected, if they have removing them from the
|
||||||
tracing::debug!("Adding peer: {peer:?} to anchor list");
|
/// connected and anchor list.
|
||||||
// is peer in gray list
|
fn check_connected_peers(&mut self) {
|
||||||
if let Some(peer_eb) = self.gray_list.remove_peer(&peer) {
|
let mut remove_from_anchor = vec![];
|
||||||
self.white_list.add_new_peer(peer_eb);
|
// We dont have to worry about updating our white list with the information
|
||||||
self.anchor_list.insert(peer);
|
// before we remove the peers as that happens on every save.
|
||||||
Ok(())
|
self.connected_peers.retain(|_, peer| {
|
||||||
} else {
|
if !peer.connection_handle.connection_closed() {
|
||||||
if !self.white_list.contains_peer(&peer) {
|
// add the peer to the list to get removed from the anchor
|
||||||
return Err(AddressBookError::PeerNotFound);
|
if let Some(addr) = peer.addr {
|
||||||
|
remove_from_anchor.push(addr)
|
||||||
|
}
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
// If we are shutting down we want to keep our anchor peers for
|
||||||
|
// the next time we boot up so we dont remove disconnecting peers
|
||||||
|
// from the anchor list if we are shutting down.
|
||||||
|
if !is_shutting_down() {
|
||||||
|
for peer in remove_from_anchor {
|
||||||
|
self.anchor_list.remove(&peer);
|
||||||
}
|
}
|
||||||
self.anchor_list.insert(peer);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove_peer_from_anchor(&mut self, peer: NetworkAddress) {
|
// Bans the peer and tells the connection tasks of peers with the same ban id to shutdown.
|
||||||
let _ = self.anchor_list.remove(&peer);
|
fn ban_peer(
|
||||||
}
|
|
||||||
|
|
||||||
fn set_peer_seen(
|
|
||||||
&mut self,
|
&mut self,
|
||||||
peer: NetworkAddress,
|
peer: PeerID,
|
||||||
last_seen: i64,
|
time: std::time::Duration,
|
||||||
) -> Result<(), AddressBookError> {
|
) -> Result<(), AddressBookError> {
|
||||||
if let Some(mut peer) = self.gray_list.remove_peer(&peer) {
|
tracing::debug!("Banning peer: {peer:?} for: {time:?}");
|
||||||
peer.last_seen = last_seen;
|
|
||||||
self.white_list.add_new_peer(peer);
|
let Some(conn_entry) = self.connected_peers.get(&peer) else {
|
||||||
} else {
|
tracing::debug!("Peer is not in connected list");
|
||||||
let peer = self
|
return Err(AddressBookError::PeerNotFound);
|
||||||
.white_list
|
};
|
||||||
.get_peer_mut(&peer)
|
// tell the connection task to finish.
|
||||||
.ok_or(AddressBookError::PeerNotFound)?;
|
conn_entry.connection_handle.kill_connection();
|
||||||
peer.last_seen = last_seen;
|
// try find the NetworkAddress of the peer
|
||||||
|
let Some(addr) = conn_entry.addr else {
|
||||||
|
tracing::debug!("Peer does not have an address we can ban");
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let ban_id = addr.ban_identifier();
|
||||||
|
|
||||||
|
self.white_list.remove_peers_with_ban_id(&ban_id);
|
||||||
|
self.gray_list.remove_peers_with_ban_id(&ban_id);
|
||||||
|
// Dont remove from anchor list or connection list as this will happen when
|
||||||
|
// the connection is closed.
|
||||||
|
|
||||||
|
// tell the connection task of peers with the same ban id to shutdown.
|
||||||
|
for conn in self.connected_peers.values() {
|
||||||
|
if let Some(addr) = conn.addr {
|
||||||
|
if addr.ban_identifier() == ban_id {
|
||||||
|
conn.connection_handle.kill_connection()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add the ban identifier to the ban list
|
||||||
|
self.baned_peers.0.insert(ban_id.clone());
|
||||||
|
self.baned_peers
|
||||||
|
.1
|
||||||
|
.push(BanedPeerFut(ban_id, tokio::time::sleep(time)));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Update the last seen timestamp of a connected peer.
|
||||||
|
fn update_last_seen(
|
||||||
|
&mut self,
|
||||||
|
peer: PeerID,
|
||||||
|
last_seen: chrono::NaiveDateTime,
|
||||||
|
) -> Result<(), AddressBookError> {
|
||||||
|
if let Some(mut peer) = self.connected_peers.get_mut(&peer) {
|
||||||
|
peer.last_seen = last_seen;
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(AddressBookError::PeerNotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// adds a peer to the gray list.
|
||||||
fn add_peer_to_gray_list(&mut self, mut peer: PeerListEntryBase) {
|
fn add_peer_to_gray_list(&mut self, mut peer: PeerListEntryBase) {
|
||||||
if self.white_list.contains_peer(&peer.adr) {
|
if self.white_list.contains_peer(&peer.adr) {
|
||||||
return;
|
return;
|
||||||
|
@ -157,6 +289,9 @@ impl AddressBook {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// handles an incoming peer list,
|
||||||
|
/// dose some basic validation on the addresses
|
||||||
|
/// appends the good peers to our book.
|
||||||
fn handle_new_peerlist(
|
fn handle_new_peerlist(
|
||||||
&mut self,
|
&mut self,
|
||||||
mut peers: Vec<PeerListEntryBase>,
|
mut peers: Vec<PeerListEntryBase>,
|
||||||
|
@ -198,77 +333,262 @@ impl AddressBook {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_random_gray_peer(&mut self) -> Option<PeerListEntryBase> {
|
/// Gets a random peer from our gray list.
|
||||||
self.gray_list.get_random_peer(&mut self.rng).map(|p| *p)
|
/// If pruning seed is set we will get a peer with that pruning seed.
|
||||||
|
fn get_random_gray_peer(
|
||||||
|
&mut self,
|
||||||
|
pruning_seed: Option<PruningSeed>,
|
||||||
|
) -> Option<PeerListEntryBase> {
|
||||||
|
self.gray_list
|
||||||
|
.get_random_peer(&mut rand::thread_rng(), pruning_seed.map(Into::into))
|
||||||
|
.map(|p| *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_random_white_peer(&mut self) -> Option<PeerListEntryBase> {
|
/// Gets a random peer from our white list.
|
||||||
self.white_list.get_random_peer(&mut self.rng).map(|p| *p)
|
/// If pruning seed is set we will get a peer with that pruning seed.
|
||||||
|
fn get_random_white_peer(
|
||||||
|
&mut self,
|
||||||
|
pruning_seed: Option<PruningSeed>,
|
||||||
|
) -> Option<PeerListEntryBase> {
|
||||||
|
self.white_list
|
||||||
|
.get_random_peer(&mut rand::thread_rng(), pruning_seed.map(Into::into))
|
||||||
|
.map(|p| *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_peer_info(&mut self, peer: PeerListEntryBase) -> Result<(), AddressBookError> {
|
/// Gets random peers from our white list.
|
||||||
if let Some(peer_stored) = self.gray_list.get_peer_mut(&peer.adr) {
|
/// will be less than or equal to `len`.
|
||||||
*peer_stored = peer;
|
fn get_random_white_peers(&mut self, len: usize) -> Vec<PeerListEntryBase> {
|
||||||
Ok(())
|
let white_len = self.white_list.len();
|
||||||
} else if let Some(peer_stored) = self.white_list.get_peer_mut(&peer.adr) {
|
let len = if len < white_len { len } else { white_len };
|
||||||
*peer_stored = peer;
|
let mut white_peers: Vec<&PeerListEntryBase> = self.white_list.iter_all_peers().collect();
|
||||||
Ok(())
|
white_peers.shuffle(&mut rand::thread_rng());
|
||||||
} else {
|
white_peers[0..len].iter().map(|peb| **peb).collect()
|
||||||
return Err(AddressBookError::PeerNotFound);
|
}
|
||||||
|
|
||||||
|
/// Updates an entry in the white list, if the peer is not found and `reachable` is true then
|
||||||
|
/// the peer will be added to the white list.
|
||||||
|
fn update_white_list_peer_entry(
|
||||||
|
&mut self,
|
||||||
|
addr: &NetworkAddress,
|
||||||
|
id: PeerID,
|
||||||
|
conn_entry: &ConnectionPeerEntry,
|
||||||
|
) -> Result<(), AddressBookError> {
|
||||||
|
if let Some(peb) = self.white_list.get_peer_mut(addr) {
|
||||||
|
if peb.pruning_seed == conn_entry.pruning_seed.into() {
|
||||||
|
return Err(AddressBookError::PeersPruningSeedChanged);
|
||||||
|
}
|
||||||
|
peb.id = id;
|
||||||
|
peb.last_seen = conn_entry.last_seen.timestamp();
|
||||||
|
peb.rpc_port = conn_entry.rpc_port;
|
||||||
|
peb.rpc_credits_per_hash = conn_entry.rpc_credits_per_hash;
|
||||||
|
peb.pruning_seed = conn_entry.pruning_seed.into();
|
||||||
|
} else if conn_entry.reachable {
|
||||||
|
// if the peer is reachable add it to our white list
|
||||||
|
let peb = PeerListEntryBase {
|
||||||
|
id,
|
||||||
|
adr: *addr,
|
||||||
|
last_seen: conn_entry.last_seen.timestamp(),
|
||||||
|
rpc_port: conn_entry.rpc_port,
|
||||||
|
rpc_credits_per_hash: conn_entry.rpc_credits_per_hash,
|
||||||
|
pruning_seed: conn_entry.pruning_seed.into(),
|
||||||
|
};
|
||||||
|
self.white_list.add_new_peer(peb);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handles a new connection, adding it to the white list if the
|
||||||
|
/// peer is reachable by our node.
|
||||||
|
fn handle_new_connection(
|
||||||
|
&mut self,
|
||||||
|
connection_handle: ConnectionAddressBookHandle,
|
||||||
|
addr: Option<NetworkAddress>,
|
||||||
|
id: PeerID,
|
||||||
|
reachable: bool,
|
||||||
|
last_seen: chrono::NaiveDateTime,
|
||||||
|
pruning_seed: PruningSeed,
|
||||||
|
rpc_port: u16,
|
||||||
|
rpc_credits_per_hash: u32,
|
||||||
|
) -> Result<(), AddressBookError> {
|
||||||
|
let connection_entry = ConnectionPeerEntry {
|
||||||
|
connection_handle,
|
||||||
|
addr,
|
||||||
|
reachable,
|
||||||
|
last_seen,
|
||||||
|
pruning_seed,
|
||||||
|
rpc_port,
|
||||||
|
rpc_credits_per_hash,
|
||||||
|
};
|
||||||
|
if let Some(addr) = addr {
|
||||||
|
if self.baned_peers.0.contains(&addr.ban_identifier()) {
|
||||||
|
return Err(AddressBookError::PeerIsBanned);
|
||||||
|
}
|
||||||
|
// remove the peer from the gray list as we know it's active.
|
||||||
|
let _ = self.gray_list.remove_peer(&addr);
|
||||||
|
if !reachable {
|
||||||
|
// If we can't reach the peer remove it from the white list as well
|
||||||
|
let _ = self.white_list.remove_peer(&addr);
|
||||||
|
} else {
|
||||||
|
// The peer is reachable, update our white list and add it to the anchor connections.
|
||||||
|
self.update_white_list_peer_entry(&addr, id, &connection_entry)?;
|
||||||
|
self.anchor_list.insert(addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.connected_peers.insert(id, connection_entry);
|
||||||
|
self.white_list
|
||||||
|
.reduce_list(&self.anchor_list, self.max_white_peers());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get and empties the anchor list, used at startup to
|
||||||
|
/// connect to some peers we were previously connected to.
|
||||||
|
fn get_and_empty_anchor_list(&mut self) -> Vec<PeerListEntryBase> {
|
||||||
|
self.anchor_list
|
||||||
|
.drain()
|
||||||
|
.map(|addr| {
|
||||||
|
self.white_list
|
||||||
|
.get_peer(&addr)
|
||||||
|
.expect("If peer is in anchor it must be in white list")
|
||||||
|
.clone()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handles an [`AddressBookClientRequest`] to the address book.
|
||||||
|
async fn handle_request(&mut self, req: AddressBookClientRequest) {
|
||||||
|
let _guard = req.span.enter();
|
||||||
|
|
||||||
|
tracing::trace!("received request: {}", req.req);
|
||||||
|
|
||||||
|
let res = match req.req {
|
||||||
|
AddressBookRequest::HandleNewPeerList(new_peers, _) => self
|
||||||
|
.handle_new_peerlist(new_peers)
|
||||||
|
.map(|_| AddressBookResponse::Ok),
|
||||||
|
AddressBookRequest::SetPeerSeen(peer, last_seen, _) => self
|
||||||
|
.update_last_seen(peer, last_seen)
|
||||||
|
.map(|_| AddressBookResponse::Ok),
|
||||||
|
AddressBookRequest::BanPeer(peer, time, _) => {
|
||||||
|
self.ban_peer(peer, time).map(|_| AddressBookResponse::Ok)
|
||||||
|
}
|
||||||
|
AddressBookRequest::ConnectedToPeer {
|
||||||
|
zone: _,
|
||||||
|
connection_handle,
|
||||||
|
addr,
|
||||||
|
id,
|
||||||
|
reachable,
|
||||||
|
last_seen,
|
||||||
|
pruning_seed,
|
||||||
|
rpc_port,
|
||||||
|
rpc_credits_per_hash,
|
||||||
|
} => self
|
||||||
|
.handle_new_connection(
|
||||||
|
connection_handle,
|
||||||
|
addr,
|
||||||
|
id,
|
||||||
|
reachable,
|
||||||
|
last_seen,
|
||||||
|
pruning_seed,
|
||||||
|
rpc_port,
|
||||||
|
rpc_credits_per_hash,
|
||||||
|
)
|
||||||
|
.map(|_| AddressBookResponse::Ok),
|
||||||
|
|
||||||
|
AddressBookRequest::GetAndEmptyAnchorList(_) => {
|
||||||
|
Ok(AddressBookResponse::Peers(self.get_and_empty_anchor_list()))
|
||||||
|
}
|
||||||
|
|
||||||
|
AddressBookRequest::GetRandomGrayPeer(_, pruning_seed) => {
|
||||||
|
match self.get_random_gray_peer(pruning_seed) {
|
||||||
|
Some(peer) => Ok(AddressBookResponse::Peer(peer)),
|
||||||
|
None => Err(AddressBookError::PeerListEmpty),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AddressBookRequest::GetRandomWhitePeer(_, pruning_seed) => {
|
||||||
|
match self.get_random_white_peer(pruning_seed) {
|
||||||
|
Some(peer) => Ok(AddressBookResponse::Peer(peer)),
|
||||||
|
None => Err(AddressBookError::PeerListEmpty),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AddressBookRequest::GetRandomWhitePeers(_, len) => {
|
||||||
|
Ok(AddressBookResponse::Peers(self.get_random_white_peers(len)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = &res {
|
||||||
|
tracing::debug!("Error when handling request, err: {e}")
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = req.tx.send(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the white list with the information in the `connected_peers` list.
|
||||||
|
/// This only updates the `last_seen` timestamp as that's the only thing that should
|
||||||
|
/// change during connections.
|
||||||
|
fn update_white_list_with_conn_list(&mut self) {
|
||||||
|
for (_, peer) in self.connected_peers.iter() {
|
||||||
|
if peer.reachable {
|
||||||
|
if let Some(peer_eb) = self.white_list.get_peer_mut(&peer.addr.unwrap()) {
|
||||||
|
peer_eb.last_seen = peer.last_seen.timestamp();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Saves the address book to persistent storage.
|
||||||
|
/// TODO: save the banned peer list.
|
||||||
|
#[tracing::instrument(level="trace", skip(self), fields(name = self.book_name()) )]
|
||||||
|
async fn save(&mut self) {
|
||||||
|
self.update_white_list_with_conn_list();
|
||||||
|
tracing::trace!(
|
||||||
|
"white_len: {}, gray_len: {}, anchor_len: {}, banned_len: {}",
|
||||||
|
self.len_white_list(),
|
||||||
|
self.len_gray_list(),
|
||||||
|
self.len_anchor_list(),
|
||||||
|
self.len_banned_list()
|
||||||
|
);
|
||||||
|
let res = self
|
||||||
|
.p2p_store
|
||||||
|
.save_peers(
|
||||||
|
self.zone,
|
||||||
|
(&self.white_list).into(),
|
||||||
|
(&self.gray_list).into(),
|
||||||
|
self.anchor_list.iter().collect(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match res {
|
||||||
|
Ok(()) => tracing::trace!("Complete"),
|
||||||
|
Err(e) => tracing::error!("Error saving address book: {e}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs the address book task
|
||||||
|
/// Should be spawned in a task.
|
||||||
pub(crate) async fn run(mut self, mut rx: mpsc::Receiver<AddressBookClientRequest>) {
|
pub(crate) async fn run(mut self, mut rx: mpsc::Receiver<AddressBookClientRequest>) {
|
||||||
|
let mut save_interval = {
|
||||||
|
let mut interval = tokio::time::interval(ADDRESS_BOOK_SAVE_INTERVAL);
|
||||||
|
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
|
||||||
|
// Interval ticks at 0, interval, 2 interval, ...
|
||||||
|
// this is just to ignore the first tick
|
||||||
|
interval.tick().await;
|
||||||
|
tokio_stream::wrappers::IntervalStream::new(interval).fuse()
|
||||||
|
};
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let Some(req) = rx.next().await else {
|
|
||||||
// the client has been dropped the node has *possibly* shut down
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
self.check_unban_peers();
|
self.check_unban_peers();
|
||||||
|
self.check_connected_peers();
|
||||||
let span = tracing::debug_span!(parent: &req.span, "AddressBook");
|
futures::select! {
|
||||||
let _guard = span.enter();
|
req = rx.next() => {
|
||||||
|
if let Some(req) = req {
|
||||||
tracing::debug!("{} received request: {}", self.book_name(), req.req);
|
self.handle_request(req).await
|
||||||
|
} else {
|
||||||
let res = match req.req {
|
tracing::debug!("{} req channel closed, saving and shutting down book", self.book_name());
|
||||||
AddressBookRequest::HandleNewPeerList(new_peers, _) => self
|
self.save().await;
|
||||||
.handle_new_peerlist(new_peers)
|
return;
|
||||||
.map(|_| AddressBookResponse::Ok),
|
}
|
||||||
AddressBookRequest::SetPeerSeen(peer, last_seen) => self
|
|
||||||
.set_peer_seen(peer, last_seen)
|
|
||||||
.map(|_| AddressBookResponse::Ok),
|
|
||||||
AddressBookRequest::BanPeer(peer, till) => {
|
|
||||||
self.ban_peer(peer, till);
|
|
||||||
Ok(AddressBookResponse::Ok)
|
|
||||||
}
|
}
|
||||||
AddressBookRequest::AddPeerToAnchor(peer) => self
|
_ = save_interval.next() => self.save().await
|
||||||
.add_peer_to_anchor(peer)
|
|
||||||
.map(|_| AddressBookResponse::Ok),
|
|
||||||
AddressBookRequest::RemovePeerFromAnchor(peer) => {
|
|
||||||
self.remove_peer_from_anchor(peer);
|
|
||||||
Ok(AddressBookResponse::Ok)
|
|
||||||
}
|
|
||||||
AddressBookRequest::UpdatePeerInfo(peer) => {
|
|
||||||
self.update_peer_info(peer).map(|_| AddressBookResponse::Ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
AddressBookRequest::GetRandomGrayPeer(_) => match self.get_random_gray_peer() {
|
|
||||||
Some(peer) => Ok(AddressBookResponse::Peer(peer)),
|
|
||||||
None => Err(AddressBookError::PeerListEmpty),
|
|
||||||
},
|
|
||||||
AddressBookRequest::GetRandomWhitePeer(_) => match self.get_random_white_peer() {
|
|
||||||
Some(peer) => Ok(AddressBookResponse::Peer(peer)),
|
|
||||||
None => Err(AddressBookError::PeerListEmpty),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(e) = &res {
|
|
||||||
tracing::debug!("Error when handling request, err: {e}")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let _ = req.tx.send(res);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,17 +1,42 @@
|
||||||
|
//! This module contains the individual address books peer lists.
|
||||||
|
//!
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::hash::Hash;
|
||||||
|
|
||||||
|
use cuprate_common::CRYPTONOTE_PRUNING_LOG_STRIPES;
|
||||||
use monero_wire::{messages::PeerListEntryBase, NetworkAddress};
|
use monero_wire::{messages::PeerListEntryBase, NetworkAddress};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
/// A Peer list in the address book.
|
||||||
|
///
|
||||||
|
/// This could either be the white list or gray list.
|
||||||
pub struct PeerList {
|
pub struct PeerList {
|
||||||
|
/// The peers with their peer data.
|
||||||
peers: HashMap<NetworkAddress, PeerListEntryBase>,
|
peers: HashMap<NetworkAddress, PeerListEntryBase>,
|
||||||
|
/// An index of Pruning seed to address, so
|
||||||
|
/// can quickly grab peers with the pruning seed
|
||||||
|
/// we want.
|
||||||
pruning_idxs: HashMap<u32, Vec<NetworkAddress>>,
|
pruning_idxs: HashMap<u32, Vec<NetworkAddress>>,
|
||||||
|
/// An index of [`ban_identifier`](NetworkAddress::ban_identifier) to Address
|
||||||
|
/// to allow us to quickly remove baned peers.
|
||||||
|
ban_id_idxs: HashMap<Vec<u8>, Vec<NetworkAddress>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Into<Vec<&'a PeerListEntryBase>> for &'a PeerList {
|
||||||
|
fn into(self) -> Vec<&'a PeerListEntryBase> {
|
||||||
|
self.peers.iter().map(|(_, peb)| peb).collect()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PeerList {
|
impl PeerList {
|
||||||
|
/// Creates a new peer list.
|
||||||
pub fn new(list: Vec<PeerListEntryBase>) -> PeerList {
|
pub fn new(list: Vec<PeerListEntryBase>) -> PeerList {
|
||||||
let mut peers = HashMap::with_capacity(list.len());
|
let mut peers = HashMap::with_capacity(list.len());
|
||||||
let mut pruning_idxs = HashMap::with_capacity(8);
|
let mut pruning_idxs = HashMap::with_capacity(2 << CRYPTONOTE_PRUNING_LOG_STRIPES);
|
||||||
|
let mut ban_id_idxs = HashMap::with_capacity(list.len()); // worse case, every peer has a different NetworkAddress and ban id
|
||||||
|
|
||||||
for peer in list {
|
for peer in list {
|
||||||
peers.insert(peer.adr, peer);
|
peers.insert(peer.adr, peer);
|
||||||
|
@ -20,79 +45,157 @@ impl PeerList {
|
||||||
.entry(peer.pruning_seed)
|
.entry(peer.pruning_seed)
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
.push(peer.adr);
|
.push(peer.adr);
|
||||||
|
|
||||||
|
ban_id_idxs
|
||||||
|
.entry(peer.adr.ban_identifier())
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push(peer.adr);
|
||||||
}
|
}
|
||||||
PeerList {
|
PeerList {
|
||||||
peers,
|
peers,
|
||||||
pruning_idxs,
|
pruning_idxs,
|
||||||
|
ban_id_idxs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets the length of the peer list
|
||||||
pub fn len(&self) -> usize {
|
pub fn len(&self) -> usize {
|
||||||
self.peers.len()
|
self.peers.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets the amount of peers with a specific seed.
|
||||||
|
pub fn len_by_seed(&self, pruning_seed: &u32) -> usize {
|
||||||
|
self.pruning_idxs
|
||||||
|
.get(pruning_seed)
|
||||||
|
.map(|indexes| indexes.len())
|
||||||
|
.unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a new peer to the peer list
|
||||||
pub fn add_new_peer(&mut self, peer: PeerListEntryBase) {
|
pub fn add_new_peer(&mut self, peer: PeerListEntryBase) {
|
||||||
if self.peers.insert(peer.adr, peer.clone()).is_none() {
|
if let None = self.peers.insert(peer.adr, peer) {
|
||||||
self.pruning_idxs
|
self.pruning_idxs
|
||||||
.entry(peer.pruning_seed)
|
.entry(peer.pruning_seed)
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
.push(peer.adr);
|
.push(peer.adr);
|
||||||
|
|
||||||
|
self.ban_id_idxs
|
||||||
|
.entry(peer.adr.ban_identifier())
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push(peer.adr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets a reference to a peer
|
||||||
pub fn get_peer(&self, peer: &NetworkAddress) -> Option<&PeerListEntryBase> {
|
pub fn get_peer(&self, peer: &NetworkAddress) -> Option<&PeerListEntryBase> {
|
||||||
self.peers.get(peer)
|
self.peers.get(peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_peer_by_idx(&self, n: usize) -> Option<&PeerListEntryBase> {
|
/// Returns an iterator over every peer in this peer list
|
||||||
self.peers.iter().nth(n).map(|(_, ret)| ret)
|
pub fn iter_all_peers(&self) -> impl Iterator<Item = &PeerListEntryBase> {
|
||||||
|
self.peers.values()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_random_peer<R: Rng>(&self, r: &mut R) -> Option<&PeerListEntryBase> {
|
/// Returns a random peer.
|
||||||
let len = self.len();
|
/// If the pruning seed is specified then we will get a random peer with
|
||||||
if len == 0 {
|
/// that pruning seed otherwise we will just get a random peer in the whole
|
||||||
None
|
/// list.
|
||||||
} else {
|
pub fn get_random_peer<R: Rng>(
|
||||||
let n = r.gen_range(0..len);
|
&self,
|
||||||
|
r: &mut R,
|
||||||
|
pruning_seed: Option<u32>,
|
||||||
|
) -> Option<&PeerListEntryBase> {
|
||||||
|
if let Some(seed) = pruning_seed {
|
||||||
|
let mut peers = self.get_peers_with_pruning(&seed)?;
|
||||||
|
let len = self.len_by_seed(&seed);
|
||||||
|
if len == 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let n = r.gen_range(0..len);
|
||||||
|
|
||||||
self.get_peer_by_idx(n)
|
peers.nth(n)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let mut peers = self.iter_all_peers();
|
||||||
|
let len = self.len();
|
||||||
|
if len == 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let n = r.gen_range(0..len);
|
||||||
|
|
||||||
|
peers.nth(n)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to a peer.
|
||||||
pub fn get_peer_mut(&mut self, peer: &NetworkAddress) -> Option<&mut PeerListEntryBase> {
|
pub fn get_peer_mut(&mut self, peer: &NetworkAddress) -> Option<&mut PeerListEntryBase> {
|
||||||
self.peers.get_mut(peer)
|
self.peers.get_mut(peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns true if the list contains this peer.
|
||||||
pub fn contains_peer(&self, peer: &NetworkAddress) -> bool {
|
pub fn contains_peer(&self, peer: &NetworkAddress) -> bool {
|
||||||
self.peers.contains_key(peer)
|
self.peers.contains_key(peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_peers_by_pruning_seed(
|
/// Returns an iterator of peer info of peers with a specific pruning seed.
|
||||||
|
fn get_peers_with_pruning(
|
||||||
&self,
|
&self,
|
||||||
seed: &u32,
|
seed: &u32,
|
||||||
) -> Option<impl Iterator<Item = &PeerListEntryBase>> {
|
) -> Option<impl Iterator<Item = &PeerListEntryBase>> {
|
||||||
let addrs = self.pruning_idxs.get(seed)?;
|
let addrs = self.pruning_idxs.get(seed)?;
|
||||||
Some(addrs.iter().filter_map(move |addr| self.peers.get(addr)))
|
|
||||||
|
Some(addrs.iter().map(move |addr| {
|
||||||
|
self.peers
|
||||||
|
.get(addr)
|
||||||
|
.expect("Address must be in peer list if we have an idx for it")
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Removes a peer from the pruning idx
|
||||||
|
///
|
||||||
|
/// MUST NOT BE USED ALONE
|
||||||
fn remove_peer_pruning_idx(&mut self, peer: &PeerListEntryBase) {
|
fn remove_peer_pruning_idx(&mut self, peer: &PeerListEntryBase) {
|
||||||
if let Some(peer_list) = self.pruning_idxs.get_mut(&peer.pruning_seed) {
|
remove_peer_idx(&mut self.pruning_idxs, &peer.pruning_seed, &peer.adr)
|
||||||
if let Some(idx) = peer_list.iter().position(|peer_adr| peer_adr == &peer.adr) {
|
|
||||||
peer_list.remove(idx);
|
|
||||||
} else {
|
|
||||||
unreachable!("This function will only be called when the peer exists.");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
unreachable!("Pruning seed must exist if a peer has that seed.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Removes a peer from the ban idx
|
||||||
|
///
|
||||||
|
/// MUST NOT BE USED ALONE
|
||||||
|
fn remove_peer_ban_idx(&mut self, peer: &PeerListEntryBase) {
|
||||||
|
remove_peer_idx(&mut self.ban_id_idxs, &peer.adr.ban_identifier(), &peer.adr)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes a peer from all the indexes
|
||||||
|
///
|
||||||
|
/// MUST NOT BE USED ALONE
|
||||||
|
fn remove_peer_from_all_idxs(&mut self, peer: &PeerListEntryBase) {
|
||||||
|
self.remove_peer_ban_idx(peer);
|
||||||
|
self.remove_peer_pruning_idx(peer);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes a peer from the peer list
|
||||||
pub fn remove_peer(&mut self, peer: &NetworkAddress) -> Option<PeerListEntryBase> {
|
pub fn remove_peer(&mut self, peer: &NetworkAddress) -> Option<PeerListEntryBase> {
|
||||||
let peer_eb = self.peers.remove(peer)?;
|
let peer_eb = self.peers.remove(peer)?;
|
||||||
self.remove_peer_pruning_idx(&peer_eb);
|
self.remove_peer_from_all_idxs(&peer_eb);
|
||||||
Some(peer_eb)
|
Some(peer_eb)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Removes all peers with a specific ban id.
|
||||||
|
pub fn remove_peers_with_ban_id(&mut self, ban_id: &Vec<u8>) {
|
||||||
|
let Some(addresses) = self.ban_id_idxs.get(ban_id) else {
|
||||||
|
// No peers to ban
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
for addr in addresses.clone() {
|
||||||
|
self.remove_peer(&addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to reduce the peer list to `new_len`.
|
||||||
|
///
|
||||||
|
/// This function could keep the list bigger than `new_len` if `must_keep_peers`s length
|
||||||
|
/// is larger than new_len, in that case we will remove as much as we can.
|
||||||
pub fn reduce_list(&mut self, must_keep_peers: &HashSet<NetworkAddress>, new_len: usize) {
|
pub fn reduce_list(&mut self, must_keep_peers: &HashSet<NetworkAddress>, new_len: usize) {
|
||||||
if new_len >= self.len() {
|
if new_len >= self.len() {
|
||||||
return;
|
return;
|
||||||
|
@ -118,165 +221,19 @@ impl PeerList {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
/// Remove a peer from an index.
|
||||||
mod tests {
|
fn remove_peer_idx<T: Hash + Eq + PartialEq>(
|
||||||
use std::{collections::HashSet, vec};
|
idx_map: &mut HashMap<T, Vec<NetworkAddress>>,
|
||||||
|
idx: &T,
|
||||||
use monero_wire::{messages::PeerListEntryBase, NetworkAddress};
|
addr: &NetworkAddress,
|
||||||
use rand::Rng;
|
) {
|
||||||
|
if let Some(peer_list) = idx_map.get_mut(idx) {
|
||||||
use super::PeerList;
|
if let Some(idx) = peer_list.iter().position(|peer_adr| peer_adr == addr) {
|
||||||
|
peer_list.swap_remove(idx);
|
||||||
fn make_fake_peer_list(numb_o_peers: usize) -> PeerList {
|
|
||||||
let mut peer_list = vec![PeerListEntryBase::default(); numb_o_peers];
|
|
||||||
for (idx, peer) in peer_list.iter_mut().enumerate() {
|
|
||||||
let NetworkAddress::IPv4(ip) = &mut peer.adr else {panic!("this test requires default to be ipv4")};
|
|
||||||
ip.m_ip += idx as u32;
|
|
||||||
}
|
|
||||||
|
|
||||||
PeerList::new(peer_list)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_fake_peer_list_with_random_pruning_seeds(numb_o_peers: usize) -> PeerList {
|
|
||||||
let mut r = rand::thread_rng();
|
|
||||||
|
|
||||||
let mut peer_list = vec![PeerListEntryBase::default(); numb_o_peers];
|
|
||||||
for (idx, peer) in peer_list.iter_mut().enumerate() {
|
|
||||||
let NetworkAddress::IPv4(ip) = &mut peer.adr else {panic!("this test requires default to be ipv4")};
|
|
||||||
ip.m_ip += idx as u32;
|
|
||||||
|
|
||||||
peer.pruning_seed = if r.gen_bool(0.4) {
|
|
||||||
0
|
|
||||||
} else {
|
|
||||||
r.gen_range(384..=391)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
PeerList::new(peer_list)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_list_reduce_length() {
|
|
||||||
let mut peer_list = make_fake_peer_list(2090);
|
|
||||||
let must_keep_peers = HashSet::new();
|
|
||||||
|
|
||||||
let target_len = 2000;
|
|
||||||
|
|
||||||
peer_list.reduce_list(&must_keep_peers, target_len);
|
|
||||||
|
|
||||||
assert_eq!(peer_list.len(), target_len);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_list_reduce_length_with_peers_we_need() {
|
|
||||||
let mut peer_list = make_fake_peer_list(500);
|
|
||||||
let must_keep_peers = HashSet::from_iter(peer_list.peers.iter().map(|(adr, _)| *adr));
|
|
||||||
|
|
||||||
let target_len = 49;
|
|
||||||
|
|
||||||
peer_list.reduce_list(&must_keep_peers, target_len);
|
|
||||||
|
|
||||||
// we can't remove any of the peers we said we need them all
|
|
||||||
assert_eq!(peer_list.len(), 500);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_list_get_peers_by_pruning_seed() {
|
|
||||||
let mut r = rand::thread_rng();
|
|
||||||
|
|
||||||
let peer_list = make_fake_peer_list_with_random_pruning_seeds(1000);
|
|
||||||
let seed = if r.gen_bool(0.4) {
|
|
||||||
0
|
|
||||||
} else {
|
} else {
|
||||||
r.gen_range(384..=391)
|
unreachable!("This function will only be called when the peer exists.");
|
||||||
};
|
|
||||||
|
|
||||||
let peers_with_seed = peer_list
|
|
||||||
.get_peers_by_pruning_seed(&seed)
|
|
||||||
.expect("If you hit this buy a lottery ticket");
|
|
||||||
|
|
||||||
for peer in peers_with_seed {
|
|
||||||
assert_eq!(peer.pruning_seed, seed);
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
assert_eq!(peer_list.len(), 1000);
|
unreachable!("Index must exist if a peer has that index");
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_list_remove_specific_peer() {
|
|
||||||
let mut peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
|
|
||||||
|
|
||||||
// generate peer at a random point in the list
|
|
||||||
let mut peer = NetworkAddress::default();
|
|
||||||
let NetworkAddress::IPv4(ip) = &mut peer else {panic!("this test requires default to be ipv4")};
|
|
||||||
ip.m_ip += 50;
|
|
||||||
|
|
||||||
assert!(peer_list.remove_peer(&peer).is_some());
|
|
||||||
|
|
||||||
let pruning_idxs = peer_list.pruning_idxs;
|
|
||||||
let peers = peer_list.peers;
|
|
||||||
|
|
||||||
for (_, addrs) in pruning_idxs {
|
|
||||||
addrs.iter().for_each(|adr| assert!(adr != &peer))
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!(!peers.contains_key(&peer));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_list_pruning_idxs_are_correct() {
|
|
||||||
let peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
|
|
||||||
let mut total_len = 0;
|
|
||||||
|
|
||||||
for (seed, list) in peer_list.pruning_idxs {
|
|
||||||
for peer in list.iter() {
|
|
||||||
assert_eq!(peer_list.peers.get(peer).unwrap().pruning_seed, seed);
|
|
||||||
total_len += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(total_len, peer_list.peers.len())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_list_add_new_peer() {
|
|
||||||
let mut peer_list = make_fake_peer_list(10);
|
|
||||||
let mut new_peer = PeerListEntryBase::default();
|
|
||||||
let NetworkAddress::IPv4(ip) = &mut new_peer.adr else {panic!("this test requires default to be ipv4")};
|
|
||||||
ip.m_ip += 50;
|
|
||||||
|
|
||||||
peer_list.add_new_peer(new_peer.clone());
|
|
||||||
|
|
||||||
assert_eq!(peer_list.len(), 11);
|
|
||||||
assert_eq!(peer_list.get_peer(&new_peer.adr), Some(&new_peer));
|
|
||||||
assert!(peer_list
|
|
||||||
.pruning_idxs
|
|
||||||
.get(&new_peer.pruning_seed)
|
|
||||||
.unwrap()
|
|
||||||
.contains(&new_peer.adr));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_list_add_existing_peer() {
|
|
||||||
let mut peer_list = make_fake_peer_list(10);
|
|
||||||
let existing_peer = peer_list
|
|
||||||
.get_peer(&NetworkAddress::default())
|
|
||||||
.unwrap()
|
|
||||||
.clone();
|
|
||||||
|
|
||||||
peer_list.add_new_peer(existing_peer.clone());
|
|
||||||
|
|
||||||
assert_eq!(peer_list.len(), 10);
|
|
||||||
assert_eq!(peer_list.get_peer(&existing_peer.adr), Some(&existing_peer));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_list_get_non_existent_peer() {
|
|
||||||
let peer_list = make_fake_peer_list(10);
|
|
||||||
let mut non_existent_peer = NetworkAddress::default();
|
|
||||||
let NetworkAddress::IPv4(ip) = &mut non_existent_peer else {panic!("this test requires default to be ipv4")};
|
|
||||||
ip.m_ip += 50;
|
|
||||||
|
|
||||||
assert_eq!(peer_list.get_peer(&non_existent_peer), None);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
176
p2p/src/address_book/address_book/peer_list/tests.rs
Normal file
176
p2p/src/address_book/address_book/peer_list/tests.rs
Normal file
|
@ -0,0 +1,176 @@
|
||||||
|
use std::{collections::HashSet, vec};
|
||||||
|
|
||||||
|
use monero_wire::{messages::PeerListEntryBase, NetworkAddress};
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
use super::PeerList;
|
||||||
|
|
||||||
|
fn make_fake_peer_list(numb_o_peers: usize) -> PeerList {
|
||||||
|
let mut peer_list = vec![PeerListEntryBase::default(); numb_o_peers];
|
||||||
|
for (idx, peer) in peer_list.iter_mut().enumerate() {
|
||||||
|
let NetworkAddress::IPv4(ip) = &mut peer.adr else {panic!("this test requires default to be ipv4")};
|
||||||
|
ip.m_ip += idx as u32;
|
||||||
|
}
|
||||||
|
|
||||||
|
PeerList::new(peer_list)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_fake_peer_list_with_random_pruning_seeds(numb_o_peers: usize) -> PeerList {
|
||||||
|
let mut r = rand::thread_rng();
|
||||||
|
|
||||||
|
let mut peer_list = vec![PeerListEntryBase::default(); numb_o_peers];
|
||||||
|
for (idx, peer) in peer_list.iter_mut().enumerate() {
|
||||||
|
let NetworkAddress::IPv4(ip) = &mut peer.adr else {panic!("this test requires default to be ipv4")};
|
||||||
|
ip.m_ip += idx as u32;
|
||||||
|
ip.m_port += r.gen_range(0..15);
|
||||||
|
|
||||||
|
peer.pruning_seed = if r.gen_bool(0.4) {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
r.gen_range(384..=391)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
PeerList::new(peer_list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_reduce_length() {
|
||||||
|
let mut peer_list = make_fake_peer_list(2090);
|
||||||
|
let must_keep_peers = HashSet::new();
|
||||||
|
|
||||||
|
let target_len = 2000;
|
||||||
|
|
||||||
|
peer_list.reduce_list(&must_keep_peers, target_len);
|
||||||
|
|
||||||
|
assert_eq!(peer_list.len(), target_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_reduce_length_with_peers_we_need() {
|
||||||
|
let mut peer_list = make_fake_peer_list(500);
|
||||||
|
let must_keep_peers = HashSet::from_iter(peer_list.peers.iter().map(|(adr, _)| *adr));
|
||||||
|
|
||||||
|
let target_len = 49;
|
||||||
|
|
||||||
|
peer_list.reduce_list(&must_keep_peers, target_len);
|
||||||
|
|
||||||
|
// we can't remove any of the peers we said we need them all
|
||||||
|
assert_eq!(peer_list.len(), 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_get_peers_by_pruning_seed() {
|
||||||
|
let mut r = rand::thread_rng();
|
||||||
|
|
||||||
|
let peer_list = make_fake_peer_list_with_random_pruning_seeds(1000);
|
||||||
|
let seed = if r.gen_bool(0.4) {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
r.gen_range(384..=391)
|
||||||
|
};
|
||||||
|
|
||||||
|
let peers_with_seed = peer_list
|
||||||
|
.get_peers_with_pruning(&seed)
|
||||||
|
.expect("If you hit this buy a lottery ticket");
|
||||||
|
|
||||||
|
for peer in peers_with_seed {
|
||||||
|
assert_eq!(peer.pruning_seed, seed);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(peer_list.len(), 1000);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_remove_specific_peer() {
|
||||||
|
let mut peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
|
||||||
|
|
||||||
|
let peer = peer_list
|
||||||
|
.get_random_peer(&mut rand::thread_rng(), None)
|
||||||
|
.unwrap()
|
||||||
|
.clone();
|
||||||
|
|
||||||
|
assert!(peer_list.remove_peer(&peer.adr).is_some());
|
||||||
|
|
||||||
|
let pruning_idxs = peer_list.pruning_idxs;
|
||||||
|
let peers = peer_list.peers;
|
||||||
|
|
||||||
|
for (_, addrs) in pruning_idxs {
|
||||||
|
addrs.iter().for_each(|adr| assert_ne!(adr, &peer.adr))
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(!peers.contains_key(&peer.adr));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_pruning_idxs_are_correct() {
|
||||||
|
let peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
|
||||||
|
let mut total_len = 0;
|
||||||
|
|
||||||
|
for (seed, list) in peer_list.pruning_idxs {
|
||||||
|
for peer in list.iter() {
|
||||||
|
assert_eq!(peer_list.peers.get(peer).unwrap().pruning_seed, seed);
|
||||||
|
total_len += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(total_len, peer_list.peers.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_add_new_peer() {
|
||||||
|
let mut peer_list = make_fake_peer_list(10);
|
||||||
|
let mut new_peer = PeerListEntryBase::default();
|
||||||
|
let NetworkAddress::IPv4(ip) = &mut new_peer.adr else {panic!("this test requires default to be ipv4")};
|
||||||
|
ip.m_ip += 50;
|
||||||
|
|
||||||
|
peer_list.add_new_peer(new_peer.clone());
|
||||||
|
|
||||||
|
assert_eq!(peer_list.len(), 11);
|
||||||
|
assert_eq!(peer_list.get_peer(&new_peer.adr), Some(&new_peer));
|
||||||
|
assert!(peer_list
|
||||||
|
.pruning_idxs
|
||||||
|
.get(&new_peer.pruning_seed)
|
||||||
|
.unwrap()
|
||||||
|
.contains(&new_peer.adr));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_add_existing_peer() {
|
||||||
|
let mut peer_list = make_fake_peer_list(10);
|
||||||
|
let existing_peer = peer_list
|
||||||
|
.get_peer(&NetworkAddress::default())
|
||||||
|
.unwrap()
|
||||||
|
.clone();
|
||||||
|
|
||||||
|
peer_list.add_new_peer(existing_peer.clone());
|
||||||
|
|
||||||
|
assert_eq!(peer_list.len(), 10);
|
||||||
|
assert_eq!(peer_list.get_peer(&existing_peer.adr), Some(&existing_peer));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_get_non_existent_peer() {
|
||||||
|
let peer_list = make_fake_peer_list(10);
|
||||||
|
let mut non_existent_peer = NetworkAddress::default();
|
||||||
|
let NetworkAddress::IPv4(ip) = &mut non_existent_peer else {panic!("this test requires default to be ipv4")};
|
||||||
|
ip.m_ip += 50;
|
||||||
|
|
||||||
|
assert_eq!(peer_list.get_peer(&non_existent_peer), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn peer_list_ban_peers() {
|
||||||
|
let mut peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
|
||||||
|
let peer = peer_list
|
||||||
|
.get_random_peer(&mut rand::thread_rng(), None)
|
||||||
|
.unwrap();
|
||||||
|
let ban_id = peer.adr.ban_identifier();
|
||||||
|
assert!(peer_list.contains_peer(&peer.adr));
|
||||||
|
assert_ne!(peer_list.ban_id_idxs.get(&ban_id).unwrap().len(), 0);
|
||||||
|
peer_list.remove_peers_with_ban_id(&ban_id);
|
||||||
|
assert_eq!(peer_list.ban_id_idxs.get(&ban_id).unwrap().len(), 0);
|
||||||
|
for (addr, _) in peer_list.peers {
|
||||||
|
assert_ne!(addr.ban_identifier(), ban_id);
|
||||||
|
}
|
||||||
|
}
|
81
p2p/src/address_book/address_book/tests.rs
Normal file
81
p2p/src/address_book/address_book/tests.rs
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
use super::*;
|
||||||
|
use crate::NetZoneBasicNodeData;
|
||||||
|
use monero_wire::network_address::IPv4Address;
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
fn create_random_net_address<R: Rng>(r: &mut R) -> NetworkAddress {
|
||||||
|
NetworkAddress::IPv4(IPv4Address {
|
||||||
|
m_ip: r.gen(),
|
||||||
|
m_port: r.gen(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_random_net_addr_vec<R: Rng>(r: &mut R, len: usize) -> Vec<NetworkAddress> {
|
||||||
|
let mut ret = Vec::with_capacity(len);
|
||||||
|
for i in 0..len {
|
||||||
|
ret.push(create_random_net_address(r));
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_random_peer<R: Rng>(r: &mut R) -> PeerListEntryBase {
|
||||||
|
PeerListEntryBase {
|
||||||
|
adr: create_random_net_address(r),
|
||||||
|
pruning_seed: r.gen_range(384..=391),
|
||||||
|
id: PeerID(r.gen()),
|
||||||
|
last_seen: r.gen(),
|
||||||
|
rpc_port: r.gen(),
|
||||||
|
rpc_credits_per_hash: r.gen(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_random_peer_vec<R: Rng>(r: &mut R, len: usize) -> Vec<PeerListEntryBase> {
|
||||||
|
let mut ret = Vec::with_capacity(len);
|
||||||
|
for i in 0..len {
|
||||||
|
ret.push(create_random_peer(r));
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct MockPeerStore;
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl P2PStore for MockPeerStore {
|
||||||
|
async fn basic_node_data(&mut self) -> Result<Option<NetZoneBasicNodeData>, &'static str> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
async fn save_basic_node_data(
|
||||||
|
&mut self,
|
||||||
|
node_id: &NetZoneBasicNodeData,
|
||||||
|
) -> Result<(), &'static str> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
async fn load_peers(
|
||||||
|
&mut self,
|
||||||
|
zone: NetZone,
|
||||||
|
) -> Result<
|
||||||
|
(
|
||||||
|
Vec<PeerListEntryBase>,
|
||||||
|
Vec<PeerListEntryBase>,
|
||||||
|
Vec<NetworkAddress>,
|
||||||
|
),
|
||||||
|
&'static str,
|
||||||
|
> {
|
||||||
|
let mut r = rand::thread_rng();
|
||||||
|
Ok((
|
||||||
|
create_random_peer_vec(&mut r, 300),
|
||||||
|
create_random_peer_vec(&mut r, 1500),
|
||||||
|
create_random_net_addr_vec(&mut r, 50),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
async fn save_peers(
|
||||||
|
&mut self,
|
||||||
|
zone: NetZone,
|
||||||
|
white: Vec<&PeerListEntryBase>,
|
||||||
|
gray: Vec<&PeerListEntryBase>,
|
||||||
|
anchor: Vec<&NetworkAddress>,
|
||||||
|
) -> Result<(), &'static str> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
110
p2p/src/address_book/connection_handle.rs
Normal file
110
p2p/src/address_book/connection_handle.rs
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
//! This module contains the address book [`Connection`](crate::peer::connection::Connection) handle
|
||||||
|
//!
|
||||||
|
//! # Why do we need a handle between the address book and connection task
|
||||||
|
//!
|
||||||
|
//! When banning a peer we need to tell the connection task to close and
|
||||||
|
//! when we close a connection we need to remove it from our connection
|
||||||
|
//! and anchor list.
|
||||||
|
//!
|
||||||
|
//!
|
||||||
|
use futures::channel::oneshot;
|
||||||
|
use tokio_util::sync::CancellationToken;
|
||||||
|
|
||||||
|
/// A message sent to tell the address book that a peer has disconnected.
|
||||||
|
pub struct PeerConnectionClosed;
|
||||||
|
|
||||||
|
/// The connection side of the address book to connection
|
||||||
|
/// communication.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct AddressBookConnectionHandle {
|
||||||
|
connection_closed: Option<oneshot::Sender<PeerConnectionClosed>>,
|
||||||
|
close: CancellationToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AddressBookConnectionHandle {
|
||||||
|
/// Returns true if the address book has told us to kill the
|
||||||
|
/// connection.
|
||||||
|
pub fn is_canceled(&self) -> bool {
|
||||||
|
self.close.is_cancelled()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for AddressBookConnectionHandle {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let connection_closed = std::mem::replace(&mut self.connection_closed, None).unwrap();
|
||||||
|
let _ = connection_closed.send(PeerConnectionClosed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The address book side of the address book to connection
|
||||||
|
/// communication.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ConnectionAddressBookHandle {
|
||||||
|
connection_closed: oneshot::Receiver<PeerConnectionClosed>,
|
||||||
|
killer: CancellationToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionAddressBookHandle {
|
||||||
|
/// Checks if the connection task has closed, returns
|
||||||
|
/// true if the task has closed
|
||||||
|
pub fn connection_closed(&mut self) -> bool {
|
||||||
|
let Ok(mes) = self.connection_closed.try_recv() else {
|
||||||
|
panic!("This must not be called again after returning true and the connection task must tell us if a connection is closed")
|
||||||
|
};
|
||||||
|
match mes {
|
||||||
|
None => false,
|
||||||
|
Some(_) => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ends the connection task, the caller of this function should
|
||||||
|
/// wait to be told the connection has closed by [`check_if_connection_closed`](Self::check_if_connection_closed)
|
||||||
|
/// before acting on the closed connection.
|
||||||
|
pub fn kill_connection(&self) {
|
||||||
|
self.killer.cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new handle pair that can be given to the connection task and
|
||||||
|
/// address book respectively.
|
||||||
|
pub fn new_address_book_connection_handle(
|
||||||
|
) -> (AddressBookConnectionHandle, ConnectionAddressBookHandle) {
|
||||||
|
let (tx, rx) = oneshot::channel();
|
||||||
|
let token = CancellationToken::new();
|
||||||
|
|
||||||
|
let ab_c_h = AddressBookConnectionHandle {
|
||||||
|
connection_closed: Some(tx),
|
||||||
|
close: token.clone(),
|
||||||
|
};
|
||||||
|
let c_ab_h = ConnectionAddressBookHandle {
|
||||||
|
connection_closed: rx,
|
||||||
|
killer: token,
|
||||||
|
};
|
||||||
|
|
||||||
|
(ab_c_h, c_ab_h)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::address_book::connection_handle::new_address_book_connection_handle;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn close_connection_from_address_book() {
|
||||||
|
let (conn_side, mut addr_side) = new_address_book_connection_handle();
|
||||||
|
|
||||||
|
assert!(!conn_side.is_canceled());
|
||||||
|
assert!(!addr_side.connection_closed());
|
||||||
|
addr_side.kill_connection();
|
||||||
|
assert!(conn_side.is_canceled());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn close_connection_from_connection() {
|
||||||
|
let (conn_side, mut addr_side) = new_address_book_connection_handle();
|
||||||
|
|
||||||
|
assert!(!conn_side.is_canceled());
|
||||||
|
assert!(!addr_side.connection_closed());
|
||||||
|
drop(conn_side);
|
||||||
|
assert!(addr_side.connection_closed());
|
||||||
|
}
|
||||||
|
}
|
78
p2p/src/config.rs
Normal file
78
p2p/src/config.rs
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
use cuprate_common::Network;
|
||||||
|
use monero_wire::messages::{common::PeerSupportFlags, BasicNodeData, PeerID};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
constants::{
|
||||||
|
CUPRATE_SUPPORT_FLAGS, DEFAULT_IN_PEERS, DEFAULT_LOAD_OUT_PEERS_MULTIPLIER,
|
||||||
|
DEFAULT_TARGET_OUT_PEERS, MAX_GRAY_LIST_PEERS, MAX_WHITE_LIST_PEERS,
|
||||||
|
},
|
||||||
|
NodeID,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub struct Config {
|
||||||
|
/// Port
|
||||||
|
my_port: u32,
|
||||||
|
/// The Network
|
||||||
|
network: Network,
|
||||||
|
/// RPC Port
|
||||||
|
rpc_port: u16,
|
||||||
|
|
||||||
|
target_out_peers: usize,
|
||||||
|
out_peers_load_multiplier: usize,
|
||||||
|
max_in_peers: usize,
|
||||||
|
max_white_peers: usize,
|
||||||
|
max_gray_peers: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
|
fn default() -> Self {
|
||||||
|
Config {
|
||||||
|
my_port: 18080,
|
||||||
|
network: Network::MainNet,
|
||||||
|
rpc_port: 18081,
|
||||||
|
target_out_peers: DEFAULT_TARGET_OUT_PEERS,
|
||||||
|
out_peers_load_multiplier: DEFAULT_LOAD_OUT_PEERS_MULTIPLIER,
|
||||||
|
max_in_peers: DEFAULT_IN_PEERS,
|
||||||
|
max_white_peers: MAX_WHITE_LIST_PEERS,
|
||||||
|
max_gray_peers: MAX_GRAY_LIST_PEERS,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
pub fn basic_node_data(&self, peer_id: PeerID) -> BasicNodeData {
|
||||||
|
BasicNodeData {
|
||||||
|
my_port: self.my_port,
|
||||||
|
network_id: self.network.network_id(),
|
||||||
|
peer_id,
|
||||||
|
support_flags: CUPRATE_SUPPORT_FLAGS,
|
||||||
|
rpc_port: self.rpc_port,
|
||||||
|
rpc_credits_per_hash: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn peerset_total_connection_limit(&self) -> usize {
|
||||||
|
self.target_out_peers * self.out_peers_load_multiplier + self.max_in_peers
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn network(&self) -> Network {
|
||||||
|
self.network
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn max_white_peers(&self) -> usize {
|
||||||
|
self.max_white_peers
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn max_gray_peers(&self) -> usize {
|
||||||
|
self.max_gray_peers
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn public_port(&self) -> u32 {
|
||||||
|
self.my_port
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn public_rpc_port(&self) -> u16 {
|
||||||
|
self.rpc_port
|
||||||
|
}
|
||||||
|
}
|
130
p2p/src/connection_counter.rs
Normal file
130
p2p/src/connection_counter.rs
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
//! Counting active connections used by Cuprate.
|
||||||
|
//!
|
||||||
|
//! These types can be used to count any kind of active resource.
|
||||||
|
//! But they are currently used to track the number of open connections.
|
||||||
|
|
||||||
|
use std::{fmt, sync::Arc};
|
||||||
|
|
||||||
|
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
|
||||||
|
|
||||||
|
/// A counter for active connections.
|
||||||
|
///
|
||||||
|
/// Creates a [`ConnectionTracker`] to track each active connection.
|
||||||
|
/// When these trackers are dropped, the counter gets notified.
|
||||||
|
pub struct ActiveConnectionCounter {
|
||||||
|
/// The limit for this type of connection, for diagnostics only.
|
||||||
|
/// The caller must enforce the limit by ignoring, delaying, or dropping connections.
|
||||||
|
limit: usize,
|
||||||
|
|
||||||
|
/// The label for this connection counter, typically its type.
|
||||||
|
label: Arc<str>,
|
||||||
|
|
||||||
|
semaphore: Arc<Semaphore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for ActiveConnectionCounter {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("ActiveConnectionCounter")
|
||||||
|
.field("label", &self.label)
|
||||||
|
.field("count", &self.count())
|
||||||
|
.field("limit", &self.limit)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ActiveConnectionCounter {
|
||||||
|
/// Create and return a new active connection counter.
|
||||||
|
pub fn new_counter() -> Self {
|
||||||
|
Self::new_counter_with(Semaphore::MAX_PERMITS, "Active Connections")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create and return a new active connection counter with `limit` and `label`.
|
||||||
|
/// The caller must check and enforce limits using [`update_count()`](Self::update_count).
|
||||||
|
pub fn new_counter_with<S: ToString>(limit: usize, label: S) -> Self {
|
||||||
|
let label = label.to_string();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
limit,
|
||||||
|
label: label.into(),
|
||||||
|
semaphore: Arc::new(Semaphore::new(limit)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create and return a new [`ConnectionTracker`], using a permit from the semaphore,
|
||||||
|
/// SAFETY:
|
||||||
|
/// This function will panic if the semaphore doesn't have anymore permits.
|
||||||
|
pub fn track_connection(&mut self) -> ConnectionTracker {
|
||||||
|
ConnectionTracker::new(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn count(&self) -> usize {
|
||||||
|
let count = self
|
||||||
|
.limit
|
||||||
|
.checked_sub(self.semaphore.available_permits())
|
||||||
|
.expect("Limit is less than available connection permits");
|
||||||
|
|
||||||
|
tracing::trace!(
|
||||||
|
open_connections = ?count,
|
||||||
|
limit = ?self.limit,
|
||||||
|
label = ?self.label,
|
||||||
|
);
|
||||||
|
|
||||||
|
count
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn available_permits(&self) -> usize {
|
||||||
|
self.semaphore.available_permits()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A per-connection tracker.
|
||||||
|
///
|
||||||
|
/// [`ActiveConnectionCounter`] creates a tracker instance for each active connection.
|
||||||
|
pub struct ConnectionTracker {
|
||||||
|
/// The permit for this connection, updates the semaphore when dropped.
|
||||||
|
permit: OwnedSemaphorePermit,
|
||||||
|
|
||||||
|
/// The label for this connection counter, typically its type.
|
||||||
|
label: Arc<str>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for ConnectionTracker {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_tuple("ConnectionTracker")
|
||||||
|
.field(&self.label)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionTracker {
|
||||||
|
/// Create and return a new active connection tracker, and add 1 to `counter`.
|
||||||
|
/// All connection trackers share a label with their connection counter.
|
||||||
|
///
|
||||||
|
/// When the returned tracker is dropped, `counter` will be notified.
|
||||||
|
///
|
||||||
|
/// SAFETY:
|
||||||
|
/// This function will panic if the [`ActiveConnectionCounter`] doesn't have anymore permits.
|
||||||
|
fn new(counter: &mut ActiveConnectionCounter) -> Self {
|
||||||
|
tracing::debug!(
|
||||||
|
open_connections = ?counter.count(),
|
||||||
|
limit = ?counter.limit,
|
||||||
|
label = ?counter.label,
|
||||||
|
"opening a new peer connection",
|
||||||
|
);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
permit: counter.semaphore.clone().try_acquire_owned().unwrap(),
|
||||||
|
label: counter.label.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for ConnectionTracker {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
tracing::debug!(
|
||||||
|
label = ?self.label,
|
||||||
|
"A peer connection has closed",
|
||||||
|
);
|
||||||
|
// the permit is automatically dropped
|
||||||
|
}
|
||||||
|
}
|
98
p2p/src/connection_handle.rs
Normal file
98
p2p/src/connection_handle.rs
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
//!
|
||||||
|
//! # Why do we need a handle between the address book and connection task
|
||||||
|
//!
|
||||||
|
//! When banning a peer we need to tell the connection task to close and
|
||||||
|
//! when we close a connection we need to tell the address book.
|
||||||
|
//!
|
||||||
|
//!
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use futures::channel::mpsc;
|
||||||
|
use futures::SinkExt;
|
||||||
|
use tokio_util::sync::CancellationToken;
|
||||||
|
|
||||||
|
use crate::connection_counter::ConnectionTracker;
|
||||||
|
|
||||||
|
#[derive(Default, Debug)]
|
||||||
|
pub struct HandleBuilder {
|
||||||
|
tracker: Option<ConnectionTracker>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HandleBuilder {
|
||||||
|
pub fn set_tracker(&mut self, tracker: ConnectionTracker) {
|
||||||
|
self.tracker = Some(tracker)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(self) -> (DisconnectSignal, ConnectionHandle, PeerHandle) {
|
||||||
|
let token = CancellationToken::new();
|
||||||
|
let (tx, rx) = mpsc::channel(0);
|
||||||
|
|
||||||
|
(
|
||||||
|
DisconnectSignal {
|
||||||
|
token: token.clone(),
|
||||||
|
tracker: self.tracker.expect("Tracker was not set!"),
|
||||||
|
},
|
||||||
|
ConnectionHandle {
|
||||||
|
token: token.clone(),
|
||||||
|
ban: rx,
|
||||||
|
},
|
||||||
|
PeerHandle { ban: tx },
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BanPeer(pub Duration);
|
||||||
|
|
||||||
|
/// A struct given to the connection task.
|
||||||
|
pub struct DisconnectSignal {
|
||||||
|
token: CancellationToken,
|
||||||
|
tracker: ConnectionTracker,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DisconnectSignal {
|
||||||
|
pub fn should_shutdown(&self) -> bool {
|
||||||
|
self.token.is_cancelled()
|
||||||
|
}
|
||||||
|
pub fn connection_closed(&self) {
|
||||||
|
self.token.cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for DisconnectSignal {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.token.cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A handle given to a task that needs to cancel this connection.
|
||||||
|
pub struct ConnectionHandle {
|
||||||
|
token: CancellationToken,
|
||||||
|
ban: mpsc::Receiver<BanPeer>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionHandle {
|
||||||
|
pub fn is_closed(&self) -> bool {
|
||||||
|
self.token.is_cancelled()
|
||||||
|
}
|
||||||
|
pub fn check_should_ban(&mut self) -> Option<BanPeer> {
|
||||||
|
match self.ban.try_next() {
|
||||||
|
Ok(res) => res,
|
||||||
|
Err(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn send_close_signal(&self) {
|
||||||
|
self.token.cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A handle given to a task that needs to be able to ban a connection.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct PeerHandle {
|
||||||
|
ban: mpsc::Sender<BanPeer>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PeerHandle {
|
||||||
|
pub fn ban_peer(&mut self, duration: Duration) {
|
||||||
|
let _ = self.ban.send(BanPeer(duration));
|
||||||
|
}
|
||||||
|
}
|
58
p2p/src/constants.rs
Normal file
58
p2p/src/constants.rs
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
use core::time::Duration;
|
||||||
|
|
||||||
|
use monero_wire::messages::common::PeerSupportFlags;
|
||||||
|
|
||||||
|
pub const CUPRATE_SUPPORT_FLAGS: PeerSupportFlags =
|
||||||
|
PeerSupportFlags::get_support_flag_fluffy_blocks();
|
||||||
|
|
||||||
|
pub const CUPRATE_MINIMUM_SUPPORT_FLAGS: PeerSupportFlags =
|
||||||
|
PeerSupportFlags::get_support_flag_fluffy_blocks();
|
||||||
|
|
||||||
|
pub const DEFAULT_TARGET_OUT_PEERS: usize = 20;
|
||||||
|
|
||||||
|
pub const DEFAULT_LOAD_OUT_PEERS_MULTIPLIER: usize = 3;
|
||||||
|
|
||||||
|
pub const DEFAULT_IN_PEERS: usize = 20;
|
||||||
|
|
||||||
|
pub const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
pub const ADDRESS_BOOK_SAVE_INTERVAL: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
|
pub const ADDRESS_BOOK_BUFFER_SIZE: usize = 3;
|
||||||
|
|
||||||
|
pub const PEERSET_BUFFER_SIZE: usize = 3;
|
||||||
|
|
||||||
|
/// The maximum size of the address books white list.
|
||||||
|
/// This number is copied from monerod.
|
||||||
|
pub const MAX_WHITE_LIST_PEERS: usize = 1000;
|
||||||
|
|
||||||
|
/// The maximum size of the address books gray list.
|
||||||
|
/// This number is copied from monerod.
|
||||||
|
pub const MAX_GRAY_LIST_PEERS: usize = 5000;
|
||||||
|
|
||||||
|
/// The max amount of peers that can be sent in one
|
||||||
|
/// message.
|
||||||
|
pub const P2P_MAX_PEERS_IN_HANDSHAKE: usize = 250;
|
||||||
|
|
||||||
|
/// The timeout for sending a message to a remote peer,
|
||||||
|
/// and receiving a response from a remote peer.
|
||||||
|
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(20);
|
||||||
|
|
||||||
|
/// The default RTT estimate for peer responses.
|
||||||
|
///
|
||||||
|
/// We choose a high value for the default RTT, so that new peers must prove they
|
||||||
|
/// are fast, before we prefer them to other peers. This is particularly
|
||||||
|
/// important on testnet, which has a small number of peers, which are often
|
||||||
|
/// slow.
|
||||||
|
///
|
||||||
|
/// Make the default RTT slightly higher than the request timeout.
|
||||||
|
pub const EWMA_DEFAULT_RTT: Duration = Duration::from_secs(REQUEST_TIMEOUT.as_secs() + 1);
|
||||||
|
|
||||||
|
/// The decay time for the EWMA response time metric used for load balancing.
|
||||||
|
///
|
||||||
|
/// This should be much larger than the `SYNC_RESTART_TIMEOUT`, so we choose
|
||||||
|
/// better peers when we restart the sync.
|
||||||
|
pub const EWMA_DECAY_TIME_NANOS: f64 = 200.0 * NANOS_PER_SECOND;
|
||||||
|
|
||||||
|
/// The number of nanoseconds in one second.
|
||||||
|
const NANOS_PER_SECOND: f64 = 1_000_000_000.0;
|
|
@ -1,3 +1,81 @@
|
||||||
pub mod address_book;
|
pub mod address_book;
|
||||||
|
pub mod config;
|
||||||
|
pub mod connection_counter;
|
||||||
|
mod connection_handle;
|
||||||
|
mod constants;
|
||||||
pub mod peer;
|
pub mod peer;
|
||||||
mod protocol;
|
mod protocol;
|
||||||
|
|
||||||
|
pub use config::Config;
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct NetZoneBasicNodeData {
|
||||||
|
public: monero_wire::BasicNodeData,
|
||||||
|
tor: monero_wire::BasicNodeData,
|
||||||
|
i2p: monero_wire::BasicNodeData,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetZoneBasicNodeData {
|
||||||
|
pub fn basic_node_data(&self, net_zone: &monero_wire::NetZone) -> monero_wire::BasicNodeData {
|
||||||
|
match net_zone {
|
||||||
|
monero_wire::NetZone::Public => self.public.clone(),
|
||||||
|
_ => todo!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn new(config: &Config, node_id: &NodeID) -> Self {
|
||||||
|
let bnd = monero_wire::BasicNodeData {
|
||||||
|
my_port: config.public_port(),
|
||||||
|
network_id: config.network().network_id(),
|
||||||
|
peer_id: node_id.public,
|
||||||
|
support_flags: constants::CUPRATE_SUPPORT_FLAGS,
|
||||||
|
rpc_port: config.public_rpc_port(),
|
||||||
|
rpc_credits_per_hash: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
// obviously this is wrong, i will change when i add tor support
|
||||||
|
NetZoneBasicNodeData {
|
||||||
|
public: bnd.clone(),
|
||||||
|
tor: bnd.clone(),
|
||||||
|
i2p: bnd,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait P2PStore: Clone + Send + 'static {
|
||||||
|
/// Loads the peers from the peer store.
|
||||||
|
/// returns (in order):
|
||||||
|
/// the white list,
|
||||||
|
/// the gray list,
|
||||||
|
/// the anchor list,
|
||||||
|
/// the ban list
|
||||||
|
async fn load_peers(
|
||||||
|
&mut self,
|
||||||
|
zone: monero_wire::NetZone,
|
||||||
|
) -> Result<
|
||||||
|
(
|
||||||
|
Vec<monero_wire::PeerListEntryBase>, // white list
|
||||||
|
Vec<monero_wire::PeerListEntryBase>, // gray list
|
||||||
|
Vec<monero_wire::NetworkAddress>, // anchor list
|
||||||
|
// Vec<(monero_wire::NetworkAddress, chrono::NaiveDateTime)>, // ban list
|
||||||
|
),
|
||||||
|
&'static str,
|
||||||
|
>;
|
||||||
|
|
||||||
|
async fn save_peers(
|
||||||
|
&mut self,
|
||||||
|
zone: monero_wire::NetZone,
|
||||||
|
white: Vec<&monero_wire::PeerListEntryBase>,
|
||||||
|
gray: Vec<&monero_wire::PeerListEntryBase>,
|
||||||
|
anchor: Vec<&monero_wire::NetworkAddress>,
|
||||||
|
// bans: Vec<(&monero_wire::NetworkAddress, &chrono::NaiveDateTime)>, // ban lists
|
||||||
|
) -> Result<(), &'static str>;
|
||||||
|
|
||||||
|
async fn basic_node_data(&mut self) -> Result<Option<NetZoneBasicNodeData>, &'static str>;
|
||||||
|
|
||||||
|
async fn save_basic_node_data(
|
||||||
|
&mut self,
|
||||||
|
node_id: &NetZoneBasicNodeData,
|
||||||
|
) -> Result<(), &'static str>;
|
||||||
|
}
|
||||||
|
|
|
@ -1,42 +1,16 @@
|
||||||
pub mod client;
|
pub mod client;
|
||||||
pub mod connection;
|
pub mod connection;
|
||||||
|
pub mod connector;
|
||||||
pub mod handshaker;
|
pub mod handshaker;
|
||||||
|
pub mod load_tracked_client;
|
||||||
|
|
||||||
|
mod error;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
use monero_wire::levin::BucketError;
|
pub use client::Client;
|
||||||
use thiserror::Error;
|
pub use client::ConnectionInfo;
|
||||||
|
pub use connection::Connection;
|
||||||
#[derive(Debug, Error, Clone, Copy)]
|
pub use connector::{Connector, OutboundConnectorRequest};
|
||||||
pub enum RequestServiceError {}
|
pub use handshaker::Handshaker;
|
||||||
|
pub use load_tracked_client::LoadTrackedClient;
|
||||||
#[derive(Debug, Error, Clone, Copy)]
|
|
||||||
pub enum PeerError {
|
|
||||||
#[error("Peer is on a different network")]
|
|
||||||
PeerIsOnAnotherNetwork,
|
|
||||||
#[error("Peer sent an unexpected response")]
|
|
||||||
PeerSentUnSolicitedResponse,
|
|
||||||
#[error("Internal service did not respond when required")]
|
|
||||||
InternalServiceDidNotRespond,
|
|
||||||
#[error("Connection to peer has been terminated")]
|
|
||||||
PeerConnectionClosed,
|
|
||||||
#[error("The Client `internal` channel was closed")]
|
|
||||||
ClientChannelClosed,
|
|
||||||
#[error("The Peer sent an unexpected response")]
|
|
||||||
PeerSentUnexpectedResponse,
|
|
||||||
#[error("The peer sent a bad response: {0}")]
|
|
||||||
ResponseError(&'static str),
|
|
||||||
#[error("Internal service error: {0}")]
|
|
||||||
InternalService(#[from] RequestServiceError),
|
|
||||||
#[error("Internal peer sync channel closed")]
|
|
||||||
InternalPeerSyncChannelClosed,
|
|
||||||
#[error("Levin Error")]
|
|
||||||
LevinError, // remove me, this is just temporary
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<BucketError> for PeerError {
|
|
||||||
fn from(_: BucketError) -> Self {
|
|
||||||
PeerError::LevinError
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,45 +1,150 @@
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
use std::sync::atomic::AtomicU64;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
use std::{future::Future, sync::Arc};
|
use std::{future::Future, sync::Arc};
|
||||||
|
|
||||||
use crate::protocol::{InternalMessageRequest, InternalMessageResponse};
|
|
||||||
use futures::{
|
use futures::{
|
||||||
channel::{mpsc, oneshot},
|
channel::{mpsc, oneshot},
|
||||||
FutureExt,
|
FutureExt,
|
||||||
};
|
};
|
||||||
use monero_wire::messages::PeerID;
|
use tokio::task::JoinHandle;
|
||||||
|
use tower::BoxError;
|
||||||
|
|
||||||
|
use cuprate_common::PruningSeed;
|
||||||
use monero_wire::{messages::common::PeerSupportFlags, NetworkAddress};
|
use monero_wire::{messages::common::PeerSupportFlags, NetworkAddress};
|
||||||
|
|
||||||
use super::{connection::ClientRequest, PeerError};
|
use super::{
|
||||||
|
connection::ClientRequest,
|
||||||
|
error::{ErrorSlot, PeerError, SharedPeerError},
|
||||||
|
PeerError,
|
||||||
|
};
|
||||||
|
use crate::connection_handle::PeerHandle;
|
||||||
|
use crate::protocol::{InternalMessageRequest, InternalMessageResponse};
|
||||||
|
|
||||||
pub struct ConnectionInfo {
|
pub struct ConnectionInfo {
|
||||||
pub addr: NetworkAddress,
|
|
||||||
pub support_flags: PeerSupportFlags,
|
pub support_flags: PeerSupportFlags,
|
||||||
/// Peer ID
|
pub pruning_seed: PruningSeed,
|
||||||
pub peer_id: PeerID,
|
pub handle: PeerHandle,
|
||||||
pub rpc_port: u16,
|
pub rpc_port: u16,
|
||||||
pub rpc_credits_per_hash: u32,
|
pub rpc_credits_per_hash: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Client {
|
pub struct Client {
|
||||||
pub connection_info: Arc<ConnectionInfo>,
|
pub connection_info: Arc<ConnectionInfo>,
|
||||||
|
/// Used to shut down the corresponding heartbeat.
|
||||||
|
/// This is always Some except when we take it on drop.
|
||||||
|
heartbeat_shutdown_tx: Option<oneshot::Sender<()>>,
|
||||||
server_tx: mpsc::Sender<ClientRequest>,
|
server_tx: mpsc::Sender<ClientRequest>,
|
||||||
|
connection_task: JoinHandle<()>,
|
||||||
|
heartbeat_task: JoinHandle<()>,
|
||||||
|
|
||||||
|
error_slot: ErrorSlot,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client {
|
impl Client {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
connection_info: Arc<ConnectionInfo>,
|
connection_info: Arc<ConnectionInfo>,
|
||||||
|
heartbeat_shutdown_tx: oneshot::Sender<()>,
|
||||||
server_tx: mpsc::Sender<ClientRequest>,
|
server_tx: mpsc::Sender<ClientRequest>,
|
||||||
|
connection_task: JoinHandle<()>,
|
||||||
|
heartbeat_task: JoinHandle<()>,
|
||||||
|
error_slot: ErrorSlot,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Client {
|
Client {
|
||||||
connection_info,
|
connection_info,
|
||||||
|
heartbeat_shutdown_tx: Some(heartbeat_shutdown_tx),
|
||||||
server_tx,
|
server_tx,
|
||||||
|
connection_task,
|
||||||
|
heartbeat_task,
|
||||||
|
error_slot,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this connection's heartbeat task has exited.
|
||||||
|
#[allow(clippy::unwrap_in_result)]
|
||||||
|
fn check_heartbeat(&mut self, cx: &mut Context<'_>) -> Result<(), SharedPeerError> {
|
||||||
|
let is_canceled = self
|
||||||
|
.heartbeat_shutdown_tx
|
||||||
|
.as_mut()
|
||||||
|
.expect("only taken on drop")
|
||||||
|
.poll_canceled(cx)
|
||||||
|
.is_ready();
|
||||||
|
|
||||||
|
if is_canceled {
|
||||||
|
return self.set_task_exited_error(
|
||||||
|
"heartbeat",
|
||||||
|
PeerError::HeartbeatTaskExited("Task was cancelled".to_string()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.heartbeat_task.poll_unpin(cx) {
|
||||||
|
Poll::Pending => {
|
||||||
|
// Heartbeat task is still running.
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Poll::Ready(Ok(Ok(_))) => {
|
||||||
|
// Heartbeat task stopped unexpectedly, without panic or error.
|
||||||
|
self.set_task_exited_error(
|
||||||
|
"heartbeat",
|
||||||
|
PeerError::HeartbeatTaskExited(
|
||||||
|
"Heartbeat task stopped unexpectedly".to_string(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Poll::Ready(Ok(Err(error))) => {
|
||||||
|
// Heartbeat task stopped unexpectedly, with error.
|
||||||
|
self.set_task_exited_error(
|
||||||
|
"heartbeat",
|
||||||
|
PeerError::HeartbeatTaskExited(error.to_string()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(error)) => {
|
||||||
|
// Heartbeat task was cancelled.
|
||||||
|
if error.is_cancelled() {
|
||||||
|
self.set_task_exited_error(
|
||||||
|
"heartbeat",
|
||||||
|
PeerError::HeartbeatTaskExited("Task was cancelled".to_string()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// Heartbeat task stopped with panic.
|
||||||
|
else if error.is_panic() {
|
||||||
|
panic!("heartbeat task has panicked: {error}");
|
||||||
|
}
|
||||||
|
// Heartbeat task stopped with error.
|
||||||
|
else {
|
||||||
|
self.set_task_exited_error(
|
||||||
|
"heartbeat",
|
||||||
|
PeerError::HeartbeatTaskExited(error.to_string()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the connection's task has exited.
|
||||||
|
fn check_connection(&mut self, context: &mut Context<'_>) -> Result<(), PeerError> {
|
||||||
|
match self.connection_task.poll_unpin(context) {
|
||||||
|
Poll::Pending => {
|
||||||
|
// Connection task is still running.
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Poll::Ready(Ok(())) => {
|
||||||
|
// Connection task stopped unexpectedly, without panicking.
|
||||||
|
return Err(PeerError::ConnectionTaskClosed);
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(error)) => {
|
||||||
|
// Connection task stopped unexpectedly with a panic. shut the node down.
|
||||||
|
tracing::error!("Peer Connection task panicked: {error}, shutting the node down!");
|
||||||
|
set_shutting_down();
|
||||||
|
return Err(PeerError::ConnectionTaskClosed);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl tower::Service<InternalMessageRequest> for Client {
|
impl tower::Service<InternalMessageRequest> for Client {
|
||||||
type Error = PeerError;
|
|
||||||
type Response = InternalMessageResponse;
|
type Response = InternalMessageResponse;
|
||||||
|
type Error = SharedPeerError;
|
||||||
type Future =
|
type Future =
|
||||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
@ -49,7 +154,7 @@ impl tower::Service<InternalMessageRequest> for Client {
|
||||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||||
self.server_tx
|
self.server_tx
|
||||||
.poll_ready(cx)
|
.poll_ready(cx)
|
||||||
.map_err(|e| PeerError::ClientChannelClosed)
|
.map_err(|e| PeerError::ClientChannelClosed.into())
|
||||||
}
|
}
|
||||||
fn call(&mut self, req: InternalMessageRequest) -> Self::Future {
|
fn call(&mut self, req: InternalMessageRequest) -> Self::Future {
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
|
@ -59,11 +164,12 @@ impl tower::Service<InternalMessageRequest> for Client {
|
||||||
.map(|recv_result| {
|
.map(|recv_result| {
|
||||||
recv_result
|
recv_result
|
||||||
.expect("ClientRequest oneshot sender must not be dropped before send")
|
.expect("ClientRequest oneshot sender must not be dropped before send")
|
||||||
|
.map_err(|e| e.into())
|
||||||
})
|
})
|
||||||
.boxed(),
|
.boxed(),
|
||||||
Err(_e) => {
|
Err(_) => {
|
||||||
// TODO: better error handling
|
// TODO: better error handling
|
||||||
futures::future::ready(Err(PeerError::ClientChannelClosed)).boxed()
|
futures::future::ready(Err(PeerError::ClientChannelClosed.into())).boxed()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,116 +1,78 @@
|
||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use futures::channel::{mpsc, oneshot};
|
use futures::channel::{mpsc, oneshot};
|
||||||
use futures::stream::Fuse;
|
use futures::stream::FusedStream;
|
||||||
use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt};
|
use futures::{Sink, SinkExt, Stream, StreamExt};
|
||||||
|
|
||||||
use levin::{MessageSink, MessageStream};
|
use monero_wire::{Message, BucketError};
|
||||||
use monero_wire::messages::CoreSyncData;
|
use tower::{BoxError, Service};
|
||||||
use monero_wire::{levin, Message, NetworkAddress};
|
|
||||||
use tower::{Service, ServiceExt};
|
|
||||||
|
|
||||||
use crate::protocol::{
|
use crate::connection_handle::DisconnectSignal;
|
||||||
InternalMessageRequest, InternalMessageResponse, BLOCKS_IDS_SYNCHRONIZING_MAX_COUNT,
|
use crate::peer::error::{ErrorSlot, PeerError, SharedPeerError};
|
||||||
P2P_MAX_PEERS_IN_HANDSHAKE,
|
use crate::peer::handshaker::ConnectionAddr;
|
||||||
};
|
use crate::protocol::internal_network::{MessageID, Request, Response};
|
||||||
|
|
||||||
use super::PeerError;
|
|
||||||
|
|
||||||
pub enum PeerSyncChange {
|
|
||||||
CoreSyncData(NetworkAddress, CoreSyncData),
|
|
||||||
ObjectsResponse(NetworkAddress, Vec<[u8; 32]>, u64),
|
|
||||||
PeerDisconnected(NetworkAddress),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ClientRequest {
|
pub struct ClientRequest {
|
||||||
pub req: InternalMessageRequest,
|
pub req: Request,
|
||||||
pub tx: oneshot::Sender<Result<InternalMessageResponse, PeerError>>,
|
pub tx: oneshot::Sender<Result<Response, SharedPeerError>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum State {
|
pub enum State {
|
||||||
WaitingForRequest,
|
WaitingForRequest,
|
||||||
WaitingForResponse {
|
WaitingForResponse {
|
||||||
request: InternalMessageRequest,
|
request_id: MessageID,
|
||||||
tx: oneshot::Sender<Result<InternalMessageResponse, PeerError>>,
|
tx: oneshot::Sender<Result<Response, SharedPeerError>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl State {
|
pub struct Connection<Svc, Snk> {
|
||||||
pub fn expected_response_id(&self) -> Option<u32> {
|
address: ConnectionAddr,
|
||||||
match self {
|
|
||||||
Self::WaitingForRequest => None,
|
|
||||||
Self::WaitingForResponse { request, tx: _ } => request.expected_id(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Connection<Svc, Aw, Ar> {
|
|
||||||
address: NetworkAddress,
|
|
||||||
state: State,
|
state: State,
|
||||||
sink: MessageSink<Aw, Message>,
|
sink: Snk,
|
||||||
stream: Fuse<MessageStream<Ar, Message>>,
|
|
||||||
client_rx: mpsc::Receiver<ClientRequest>,
|
client_rx: mpsc::Receiver<ClientRequest>,
|
||||||
sync_state_tx: mpsc::Sender<PeerSyncChange>,
|
|
||||||
|
error_slot: ErrorSlot,
|
||||||
|
|
||||||
|
/// # Security
|
||||||
|
///
|
||||||
|
/// If this connection tracker or `Connection`s are leaked,
|
||||||
|
/// the number of active connections will appear higher than it actually is.
|
||||||
|
/// If enough connections leak, Cuprate will stop making new connections.
|
||||||
|
connection_tracker: DisconnectSignal,
|
||||||
|
|
||||||
svc: Svc,
|
svc: Svc,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Svc, Aw, Ar> Connection<Svc, Aw, Ar>
|
impl<Svc, Snk> Connection<Svc, Snk>
|
||||||
where
|
where
|
||||||
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = PeerError>,
|
Svc: Service<Request, Response = Response, Error = BoxError>,
|
||||||
Aw: AsyncWrite + std::marker::Unpin,
|
Snk: Sink<Message, Error = BucketError> + Unpin,
|
||||||
Ar: AsyncRead + std::marker::Unpin,
|
|
||||||
{
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
address: NetworkAddress,
|
address: ConnectionAddr,
|
||||||
sink: MessageSink<Aw, Message>,
|
sink: Snk,
|
||||||
stream: MessageStream<Ar, Message>,
|
|
||||||
client_rx: mpsc::Receiver<ClientRequest>,
|
client_rx: mpsc::Receiver<ClientRequest>,
|
||||||
sync_state_tx: mpsc::Sender<PeerSyncChange>,
|
error_slot: ErrorSlot,
|
||||||
|
connection_tracker: DisconnectSignal,
|
||||||
svc: Svc,
|
svc: Svc,
|
||||||
) -> Connection<Svc, Aw, Ar> {
|
) -> Connection<Svc, Snk> {
|
||||||
Connection {
|
Connection {
|
||||||
address,
|
address,
|
||||||
state: State::WaitingForRequest,
|
state: State::WaitingForRequest,
|
||||||
sink,
|
sink,
|
||||||
stream: stream.fuse(),
|
|
||||||
client_rx,
|
client_rx,
|
||||||
sync_state_tx,
|
error_slot,
|
||||||
|
connection_tracker,
|
||||||
svc,
|
svc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async fn handle_response(&mut self, res: InternalMessageResponse) -> Result<(), PeerError> {
|
async fn handle_response(&mut self, res: Response) -> Result<(), PeerError> {
|
||||||
let state = std::mem::replace(&mut self.state, State::WaitingForRequest);
|
let state = std::mem::replace(&mut self.state, State::WaitingForRequest);
|
||||||
if let State::WaitingForResponse { request, tx } = state {
|
if let State::WaitingForResponse { request_id, tx } = state {
|
||||||
match (request, &res) {
|
if request_id != res.id() {
|
||||||
(InternalMessageRequest::Handshake(_), InternalMessageResponse::Handshake(_)) => {}
|
// TODO: Fail here
|
||||||
(
|
return Err(PeerError::PeerSentIncorrectResponse);
|
||||||
InternalMessageRequest::SupportFlags(_),
|
|
||||||
InternalMessageResponse::SupportFlags(_),
|
|
||||||
) => {}
|
|
||||||
(InternalMessageRequest::TimedSync(_), InternalMessageResponse::TimedSync(res)) => {
|
|
||||||
}
|
|
||||||
(
|
|
||||||
InternalMessageRequest::GetObjectsRequest(req),
|
|
||||||
InternalMessageResponse::GetObjectsResponse(res),
|
|
||||||
) => {}
|
|
||||||
(
|
|
||||||
InternalMessageRequest::ChainRequest(_),
|
|
||||||
InternalMessageResponse::ChainResponse(res),
|
|
||||||
) => {}
|
|
||||||
(
|
|
||||||
InternalMessageRequest::FluffyMissingTransactionsRequest(req),
|
|
||||||
InternalMessageResponse::NewFluffyBlock(blk),
|
|
||||||
) => {}
|
|
||||||
(
|
|
||||||
InternalMessageRequest::GetTxPoolCompliment(_),
|
|
||||||
InternalMessageResponse::NewTransactions(_),
|
|
||||||
) => {
|
|
||||||
// we could check we received no transactions that we said we knew about but thats going to happen later anyway when they get added to our
|
|
||||||
// mempool
|
|
||||||
}
|
|
||||||
_ => return Err(PeerError::ResponseError("Peer sent incorrect response")),
|
|
||||||
}
|
}
|
||||||
// response passed our tests we can send it to the requestor
|
|
||||||
|
// response passed our tests we can send it to the requester
|
||||||
let _ = tx.send(Ok(res));
|
let _ = tx.send(Ok(res));
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
|
@ -122,30 +84,36 @@ where
|
||||||
Ok(self.sink.send(mes.into()).await?)
|
Ok(self.sink.send(mes.into()).await?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_peer_request(&mut self, req: InternalMessageRequest) -> Result<(), PeerError> {
|
async fn handle_peer_request(&mut self, req: Request) -> Result<(), PeerError> {
|
||||||
// we should check contents of peer requests for obvious errors like we do with responses
|
// we should check contents of peer requests for obvious errors like we do with responses
|
||||||
|
todo!()
|
||||||
|
/*
|
||||||
let ready_svc = self.svc.ready().await?;
|
let ready_svc = self.svc.ready().await?;
|
||||||
let res = ready_svc.call(req).await?;
|
let res = ready_svc.call(req).await?;
|
||||||
self.send_message_to_peer(res).await
|
self.send_message_to_peer(res).await
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_client_request(&mut self, req: ClientRequest) -> Result<(), PeerError> {
|
async fn handle_client_request(&mut self, req: ClientRequest) -> Result<(), PeerError> {
|
||||||
// check we need a response
|
if req.req.needs_response() {
|
||||||
if let Some(_) = req.req.expected_id() {
|
|
||||||
self.state = State::WaitingForResponse {
|
self.state = State::WaitingForResponse {
|
||||||
request: req.req.clone(),
|
request_id: req.req.id(),
|
||||||
tx: req.tx,
|
tx: req.tx,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
// TODO: send NA response to requester
|
||||||
self.send_message_to_peer(req.req).await
|
self.send_message_to_peer(req.req).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn state_waiting_for_request(&mut self) -> Result<(), PeerError> {
|
async fn state_waiting_for_request<Str>(&mut self, stream: &mut Str) -> Result<(), PeerError>
|
||||||
|
where
|
||||||
|
Str: FusedStream<Item = Result<Message, BucketError>> + Unpin,
|
||||||
|
{
|
||||||
futures::select! {
|
futures::select! {
|
||||||
peer_message = self.stream.next() => {
|
peer_message = stream.next() => {
|
||||||
match peer_message.expect("MessageStream will never return None") {
|
match peer_message.expect("MessageStream will never return None") {
|
||||||
Ok(message) => {
|
Ok(message) => {
|
||||||
self.handle_peer_request(message.try_into().map_err(|_| PeerError::PeerSentUnexpectedResponse)?).await
|
self.handle_peer_request(message.try_into().map_err(|_| PeerError::ResponseError(""))?).await
|
||||||
},
|
},
|
||||||
Err(e) => Err(e.into()),
|
Err(e) => Err(e.into()),
|
||||||
}
|
}
|
||||||
|
@ -156,10 +124,12 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn state_waiting_for_response(&mut self) -> Result<(), PeerError> {
|
async fn state_waiting_for_response<Str>(&mut self, stream: &mut Str) -> Result<(), PeerError>
|
||||||
|
where
|
||||||
|
Str: FusedStream<Item = Result<Message, BucketError>> + Unpin,
|
||||||
|
{
|
||||||
// put a timeout on this
|
// put a timeout on this
|
||||||
let peer_message = self
|
let peer_message = stream
|
||||||
.stream
|
|
||||||
.next()
|
.next()
|
||||||
.await
|
.await
|
||||||
.expect("MessageStream will never return None")?;
|
.expect("MessageStream will never return None")?;
|
||||||
|
@ -183,12 +153,15 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run(mut self) {
|
pub async fn run<Str>(mut self, mut stream: Str)
|
||||||
|
where
|
||||||
|
Str: FusedStream<Item = Result<Message, BucketError>> + Unpin,
|
||||||
|
{
|
||||||
loop {
|
loop {
|
||||||
let _res = match self.state {
|
let _res = match self.state {
|
||||||
State::WaitingForRequest => self.state_waiting_for_request().await,
|
State::WaitingForRequest => self.state_waiting_for_request(&mut stream).await,
|
||||||
State::WaitingForResponse { request: _, tx: _ } => {
|
State::WaitingForResponse { .. } => {
|
||||||
self.state_waiting_for_response().await
|
self.state_waiting_for_response(&mut stream).await
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
159
p2p/src/peer/connector.rs
Normal file
159
p2p/src/peer/connector.rs
Normal file
|
@ -0,0 +1,159 @@
|
||||||
|
//! Wrapper around handshake logic that also opens a TCP connection.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
future::Future,
|
||||||
|
net::SocketAddr,
|
||||||
|
pin::Pin,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::{AsyncRead, AsyncWrite, FutureExt};
|
||||||
|
use monero_wire::{network_address::NetZone, NetworkAddress};
|
||||||
|
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||||
|
use tower::{BoxError, Service, ServiceExt};
|
||||||
|
use tracing::Instrument;
|
||||||
|
|
||||||
|
use crate::peer::handshaker::ConnectionAddr;
|
||||||
|
use crate::{
|
||||||
|
address_book::{AddressBookRequest, AddressBookResponse},
|
||||||
|
connection_counter::ConnectionTracker,
|
||||||
|
protocol::{
|
||||||
|
CoreSyncDataRequest, CoreSyncDataResponse, InternalMessageRequest, InternalMessageResponse,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{
|
||||||
|
handshaker::{DoHandshakeRequest, Handshaker},
|
||||||
|
Client,
|
||||||
|
};
|
||||||
|
|
||||||
|
async fn connect(addr: &NetworkAddress) -> Result<(impl AsyncRead, impl AsyncWrite), BoxError> {
|
||||||
|
match addr.get_zone() {
|
||||||
|
NetZone::Public => {
|
||||||
|
let stream =
|
||||||
|
tokio::net::TcpStream::connect(SocketAddr::try_from(*addr).unwrap()).await?;
|
||||||
|
let (read, write) = stream.into_split();
|
||||||
|
Ok((read.compat(), write.compat_write()))
|
||||||
|
}
|
||||||
|
_ => unimplemented!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper around [`Handshake`] that opens a connection before
|
||||||
|
/// forwarding to the inner handshake service. Writing this as its own
|
||||||
|
/// [`tower::Service`] lets us apply unified timeout policies, etc.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Connector<Svc, CoreSync, AdrBook>
|
||||||
|
where
|
||||||
|
CoreSync: Service<CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
CoreSync::Future: Send,
|
||||||
|
|
||||||
|
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
Svc::Future: Send,
|
||||||
|
|
||||||
|
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
AdrBook::Future: Send,
|
||||||
|
{
|
||||||
|
handshaker: Handshaker<Svc, CoreSync, AdrBook>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Svc, CoreSync, AdrBook> Connector<Svc, CoreSync, AdrBook>
|
||||||
|
where
|
||||||
|
CoreSync: Service<CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
CoreSync::Future: Send,
|
||||||
|
|
||||||
|
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
Svc::Future: Send,
|
||||||
|
|
||||||
|
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
AdrBook::Future: Send,
|
||||||
|
{
|
||||||
|
pub fn new(handshaker: Handshaker<Svc, CoreSync, AdrBook>) -> Self {
|
||||||
|
Connector { handshaker }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A connector request.
|
||||||
|
/// Contains the information needed to make an outbound connection to the peer.
|
||||||
|
pub struct OutboundConnectorRequest {
|
||||||
|
/// The Monero listener address of the peer.
|
||||||
|
pub addr: NetworkAddress,
|
||||||
|
|
||||||
|
/// A connection tracker that reduces the open connection count when dropped.
|
||||||
|
///
|
||||||
|
/// Used to limit the number of open connections in Cuprate.
|
||||||
|
pub connection_tracker: ConnectionTracker,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Svc, CoreSync, AdrBook> Service<OutboundConnectorRequest> for Connector<Svc, CoreSync, AdrBook>
|
||||||
|
where
|
||||||
|
CoreSync: Service<CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
CoreSync::Future: Send,
|
||||||
|
|
||||||
|
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
Svc::Future: Send,
|
||||||
|
|
||||||
|
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = BoxError>
|
||||||
|
+ Clone
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
AdrBook::Future: Send,
|
||||||
|
{
|
||||||
|
type Response = (NetworkAddress, Client);
|
||||||
|
type Error = BoxError;
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: OutboundConnectorRequest) -> Self::Future {
|
||||||
|
let OutboundConnectorRequest {
|
||||||
|
addr: address,
|
||||||
|
connection_tracker,
|
||||||
|
}: OutboundConnectorRequest = req;
|
||||||
|
|
||||||
|
let hs = self.handshaker.clone();
|
||||||
|
let connector_span = tracing::info_span!("connector", peer = ?address);
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let (read, write) = connect(&address).await?;
|
||||||
|
let client = hs
|
||||||
|
.oneshot(DoHandshakeRequest {
|
||||||
|
read,
|
||||||
|
write,
|
||||||
|
addr: ConnectionAddr::OutBound { address },
|
||||||
|
connection_tracker,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
Ok((address, client))
|
||||||
|
}
|
||||||
|
.instrument(connector_span)
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
}
|
116
p2p/src/peer/error.rs
Normal file
116
p2p/src/peer/error.rs
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use monero_wire::BucketError;
|
||||||
|
use thiserror::Error;
|
||||||
|
use tracing_error::TracedError;
|
||||||
|
|
||||||
|
/// A wrapper around `Arc<PeerError>` that implements `Error`.
|
||||||
|
#[derive(Error, Debug, Clone)]
|
||||||
|
#[error(transparent)]
|
||||||
|
pub struct SharedPeerError(Arc<TracedError<PeerError>>);
|
||||||
|
|
||||||
|
impl<E> From<E> for SharedPeerError
|
||||||
|
where
|
||||||
|
PeerError: From<E>,
|
||||||
|
{
|
||||||
|
fn from(source: E) -> Self {
|
||||||
|
Self(Arc::new(TracedError::from(PeerError::from(source))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SharedPeerError {
|
||||||
|
/// Returns a debug-formatted string describing the inner [`PeerError`].
|
||||||
|
///
|
||||||
|
/// Unfortunately, [`TracedError`] makes it impossible to get a reference to the original error.
|
||||||
|
pub fn inner_debug(&self) -> String {
|
||||||
|
format!("{:?}", self.0.as_ref())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Error)]
|
||||||
|
pub enum PeerError {
|
||||||
|
#[error("The connection task has closed.")]
|
||||||
|
ConnectionTaskClosed,
|
||||||
|
#[error("Error with peers response: {0}.")]
|
||||||
|
ResponseError(&'static str),
|
||||||
|
#[error("The connected peer sent an an unexpected response message.")]
|
||||||
|
PeerSentUnexpectedResponse,
|
||||||
|
#[error("The connected peer sent an incorrect response.")]
|
||||||
|
BucketError(#[from] BucketError),
|
||||||
|
#[error("The channel was closed.")]
|
||||||
|
ClientChannelClosed,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A shared error slot for peer errors.
|
||||||
|
///
|
||||||
|
/// # Correctness
|
||||||
|
///
|
||||||
|
/// Error slots are shared between sync and async code. In async code, the error
|
||||||
|
/// mutex should be held for as short a time as possible. This avoids blocking
|
||||||
|
/// the async task thread on acquiring the mutex.
|
||||||
|
///
|
||||||
|
/// > If the value behind the mutex is just data, it’s usually appropriate to use a blocking mutex
|
||||||
|
/// > ...
|
||||||
|
/// > wrap the `Arc<Mutex<...>>` in a struct
|
||||||
|
/// > that provides non-async methods for performing operations on the data within,
|
||||||
|
/// > and only lock the mutex inside these methods
|
||||||
|
///
|
||||||
|
/// <https://docs.rs/tokio/1.15.0/tokio/sync/struct.Mutex.html#which-kind-of-mutex-should-you-use>
|
||||||
|
#[derive(Default, Clone)]
|
||||||
|
pub struct ErrorSlot(Arc<std::sync::Mutex<Option<SharedPeerError>>>);
|
||||||
|
|
||||||
|
impl std::fmt::Debug for ErrorSlot {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
// don't hang if the mutex is locked
|
||||||
|
// show the panic if the mutex was poisoned
|
||||||
|
f.debug_struct("ErrorSlot")
|
||||||
|
.field("error", &self.0.try_lock())
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ErrorSlot {
|
||||||
|
/// Read the current error in the slot.
|
||||||
|
///
|
||||||
|
/// Returns `None` if there is no error in the slot.
|
||||||
|
///
|
||||||
|
/// # Correctness
|
||||||
|
///
|
||||||
|
/// Briefly locks the error slot's threaded `std::sync::Mutex`, to get a
|
||||||
|
/// reference to the error in the slot.
|
||||||
|
#[allow(clippy::unwrap_in_result)]
|
||||||
|
pub fn try_get_error(&self) -> Option<SharedPeerError> {
|
||||||
|
self.0
|
||||||
|
.lock()
|
||||||
|
.expect("error mutex should be unpoisoned")
|
||||||
|
.as_ref()
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the current error in the slot.
|
||||||
|
///
|
||||||
|
/// Returns `Err(AlreadyErrored)` if there was already an error in the slot.
|
||||||
|
///
|
||||||
|
/// # Correctness
|
||||||
|
///
|
||||||
|
/// Briefly locks the error slot's threaded `std::sync::Mutex`, to check for
|
||||||
|
/// a previous error, then update the error in the slot.
|
||||||
|
#[allow(clippy::unwrap_in_result)]
|
||||||
|
pub fn try_update_error(&self, e: SharedPeerError) -> Result<(), AlreadyErrored> {
|
||||||
|
let mut guard = self.0.lock().expect("error mutex should be unpoisoned");
|
||||||
|
|
||||||
|
if let Some(original_error) = guard.clone() {
|
||||||
|
Err(AlreadyErrored { original_error })
|
||||||
|
} else {
|
||||||
|
*guard = Some(e);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Error returned when the [`ErrorSlot`] already contains an error.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct AlreadyErrored {
|
||||||
|
/// The original error in the error slot.
|
||||||
|
pub original_error: SharedPeerError,
|
||||||
|
}
|
|
@ -1,274 +1,360 @@
|
||||||
|
/// This module contains the logic for turning [`AsyncRead`] and [`AsyncWrite`]
|
||||||
|
/// into [`Client`] and [`Connection`].
|
||||||
|
///
|
||||||
|
/// The main entry point is modeled as a [`tower::Service`] the struct being
|
||||||
|
/// [`Handshaker`]. The [`Handshaker`] accepts handshake requests: [`DoHandshakeRequest`]
|
||||||
|
/// and creates a state machine that's drives the handshake forward: [`HandshakeSM`] and
|
||||||
|
/// eventually outputs a [`Client`] and [`Connection`].
|
||||||
|
///
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
|
use std::net::SocketAddr;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use futures::FutureExt;
|
use futures::{channel::mpsc, sink::Sink, SinkExt, Stream};
|
||||||
use futures::{channel::mpsc, AsyncRead, AsyncWrite, SinkExt, StreamExt};
|
use futures::{FutureExt, StreamExt};
|
||||||
use monero_wire::messages::admin::{SupportFlagsRequest, SupportFlagsResponse};
|
|
||||||
use monero_wire::messages::MessageRequest;
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use tokio::time;
|
use tokio::{
|
||||||
use tower::{Service, ServiceExt};
|
io::{AsyncRead, AsyncWrite},
|
||||||
|
time,
|
||||||
use crate::address_book::{AddressBookError, AddressBookRequest, AddressBookResponse};
|
|
||||||
use crate::protocol::temp_database::{DataBaseRequest, DataBaseResponse, DatabaseError};
|
|
||||||
use crate::protocol::{
|
|
||||||
Direction, InternalMessageRequest, InternalMessageResponse, P2P_MAX_PEERS_IN_HANDSHAKE,
|
|
||||||
};
|
};
|
||||||
use cuprate_common::{HardForks, Network, PruningSeed};
|
use tokio_util::codec::{FramedRead, FramedWrite};
|
||||||
|
use tower::{BoxError, Service, ServiceExt};
|
||||||
|
use tracing::Instrument;
|
||||||
|
|
||||||
|
use cuprate_common::{Network, PruningSeed};
|
||||||
|
use monero_wire::messages::admin::SupportFlagsResponse;
|
||||||
use monero_wire::{
|
use monero_wire::{
|
||||||
levin::{BucketError, MessageSink, MessageStream},
|
|
||||||
messages::{
|
messages::{
|
||||||
admin::{HandshakeRequest, HandshakeResponse},
|
admin::{HandshakeRequest, HandshakeResponse},
|
||||||
common::PeerSupportFlags,
|
common::PeerSupportFlags,
|
||||||
BasicNodeData, CoreSyncData, MessageResponse, PeerID, PeerListEntryBase,
|
BasicNodeData, CoreSyncData, PeerID, PeerListEntryBase,
|
||||||
},
|
},
|
||||||
Message, NetworkAddress,
|
BucketError, Message, MoneroWireCodec, NetZone, NetworkAddress, RequestMessage,
|
||||||
|
ResponseMessage,
|
||||||
};
|
};
|
||||||
use tracing::Instrument;
|
|
||||||
|
|
||||||
use super::client::Client;
|
|
||||||
use super::{
|
use super::{
|
||||||
client::ConnectionInfo,
|
client::{Client, ConnectionInfo},
|
||||||
connection::{ClientRequest, Connection, PeerSyncChange},
|
connection::Connection,
|
||||||
PeerError,
|
PeerError,
|
||||||
};
|
};
|
||||||
|
use crate::address_book::connection_handle::new_address_book_connection_handle;
|
||||||
|
use crate::address_book::{AddressBookRequest, AddressBookResponse};
|
||||||
|
use crate::connection_counter::ConnectionTracker;
|
||||||
|
use crate::constants::{
|
||||||
|
CUPRATE_MINIMUM_SUPPORT_FLAGS, HANDSHAKE_TIMEOUT, P2P_MAX_PEERS_IN_HANDSHAKE,
|
||||||
|
};
|
||||||
|
use crate::protocol::{
|
||||||
|
CoreSyncDataRequest, CoreSyncDataResponse, Direction, InternalMessageRequest,
|
||||||
|
InternalMessageResponse,
|
||||||
|
};
|
||||||
|
use crate::NetZoneBasicNodeData;
|
||||||
|
|
||||||
|
/// Possible handshake errors
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum HandShakeError {
|
pub enum HandShakeError {
|
||||||
|
/// The peer did not complete the handshake fast enough.
|
||||||
#[error("The peer did not complete the handshake fast enough")]
|
#[error("The peer did not complete the handshake fast enough")]
|
||||||
PeerTimedOut,
|
PeerTimedOut,
|
||||||
|
/// The Peer has non-standard pruning.
|
||||||
#[error("The peer has a weird pruning scheme")]
|
#[error("The peer has a weird pruning scheme")]
|
||||||
PeerClaimedWeirdPruning,
|
PeerClaimedWeirdPruning,
|
||||||
#[error("The peer has an unexpected top version")]
|
/// The peer does not have the minimum support flags
|
||||||
PeerHasUnexpectedTopVersion,
|
|
||||||
#[error("The peer does not have the minimum support flags")]
|
#[error("The peer does not have the minimum support flags")]
|
||||||
PeerDoesNotHaveTheMinimumSupportFlags,
|
PeerDoesNotHaveTheMinimumSupportFlags,
|
||||||
|
/// The peer is not on the network we are on (MAINNET|TESTNET|STAGENET)
|
||||||
#[error("The peer is on a different network")]
|
#[error("The peer is on a different network")]
|
||||||
PeerIsOnADifferentNetwork,
|
PeerIsOnADifferentNetwork,
|
||||||
#[error("Address book err: {0}")]
|
/// The peer sent us too many peers, more than [`P2P_MAX_PEERS_IN_HANDSHAKE`]
|
||||||
AddressBookError(#[from] AddressBookError),
|
|
||||||
#[error("The peer sent too many peers, considered spamming")]
|
#[error("The peer sent too many peers, considered spamming")]
|
||||||
PeerSentTooManyPeers,
|
PeerSentTooManyPeers,
|
||||||
|
/// The peer sent an incorrect response
|
||||||
#[error("The peer sent a wrong response to our handshake")]
|
#[error("The peer sent a wrong response to our handshake")]
|
||||||
PeerSentWrongResponse,
|
PeerSentWrongResponse,
|
||||||
#[error("The syncer returned an error")]
|
/// Error communicating with peer
|
||||||
DataBaseError(#[from] DatabaseError),
|
|
||||||
#[error("Bucket error while communicating with peer: {0}")]
|
#[error("Bucket error while communicating with peer: {0}")]
|
||||||
BucketError(#[from] BucketError),
|
BucketError(#[from] BucketError),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct NetworkConfig {
|
/// An address used to connect to a peer.
|
||||||
/// Port
|
#[derive(Debug, Copy, Clone)]
|
||||||
my_port: u32,
|
pub enum ConnectionAddr {
|
||||||
/// The Network
|
/// Outbound connection to another peer.
|
||||||
|
OutBound { address: NetworkAddress },
|
||||||
|
/// An inbound direct connection to our node.
|
||||||
|
InBoundDirect { transient_address: SocketAddr },
|
||||||
|
/// An inbound connection through a hidden network
|
||||||
|
/// like Tor/ I2p
|
||||||
|
InBoundProxy { net_zone: NetZone },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionAddr {
|
||||||
|
/// Gets the [`NetworkAddress`] of this connection.
|
||||||
|
pub fn get_network_address(&self, port: u16) -> Option<NetworkAddress> {
|
||||||
|
match self {
|
||||||
|
ConnectionAddr::OutBound { address } => Some(*address),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Gets the [`NetZone`] of this connection.
|
||||||
|
pub fn get_zone(&self) -> NetZone {
|
||||||
|
match self {
|
||||||
|
ConnectionAddr::OutBound { address } => address.get_zone(),
|
||||||
|
ConnectionAddr::InBoundDirect { .. } => NetZone::Public,
|
||||||
|
ConnectionAddr::InBoundProxy { net_zone } => *net_zone,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the [`Direction`] of this connection.
|
||||||
|
pub fn direction(&self) -> Direction {
|
||||||
|
match self {
|
||||||
|
ConnectionAddr::OutBound { .. } => Direction::Outbound,
|
||||||
|
ConnectionAddr::InBoundDirect { .. } | ConnectionAddr::InBoundProxy { .. } => {
|
||||||
|
Direction::Inbound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A request to handshake with a peer.
|
||||||
|
pub struct DoHandshakeRequest<W, R> {
|
||||||
|
/// The read-half of the connection.
|
||||||
|
pub read: R,
|
||||||
|
/// The write-half of the connection.
|
||||||
|
pub write: W,
|
||||||
|
/// The [`ConnectionAddr`] of this connection.
|
||||||
|
pub addr: ConnectionAddr,
|
||||||
|
/// The [`ConnectionTracker`] of this connection.
|
||||||
|
pub connection_tracker: ConnectionTracker,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [`Service`] that accepts [`DoHandshakeRequest`] and
|
||||||
|
/// produces a [`Client`] and [`Connection`].
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Handshaker<Svc, CoreSync, AdrBook> {
|
||||||
|
/// A collection of our [`BasicNodeData`] for each [`NetZone`]
|
||||||
|
/// for more info see: [`NetZoneBasicNodeData`]
|
||||||
|
basic_node_data: NetZoneBasicNodeData,
|
||||||
|
/// The [`Network`] our node is using
|
||||||
network: Network,
|
network: Network,
|
||||||
/// Peer ID
|
/// The span [`Connection`] tasks will be [`tracing::instrument`]ed with
|
||||||
peer_id: PeerID,
|
|
||||||
/// RPC Port
|
|
||||||
rpc_port: u16,
|
|
||||||
/// RPC Credits Per Hash
|
|
||||||
rpc_credits_per_hash: u32,
|
|
||||||
our_support_flags: PeerSupportFlags,
|
|
||||||
minimum_peer_support_flags: PeerSupportFlags,
|
|
||||||
handshake_timeout: time::Duration,
|
|
||||||
max_in_peers: u32,
|
|
||||||
target_out_peers: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for NetworkConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
NetworkConfig {
|
|
||||||
my_port: 18080,
|
|
||||||
network: Network::MainNet,
|
|
||||||
peer_id: PeerID(21),
|
|
||||||
rpc_port: 0,
|
|
||||||
rpc_credits_per_hash: 0,
|
|
||||||
our_support_flags: PeerSupportFlags::get_support_flag_fluffy_blocks(),
|
|
||||||
minimum_peer_support_flags: PeerSupportFlags::from(0_u32),
|
|
||||||
handshake_timeout: time::Duration::from_secs(5),
|
|
||||||
max_in_peers: 13,
|
|
||||||
target_out_peers: 21,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetworkConfig {
|
|
||||||
pub fn basic_node_data(&self) -> BasicNodeData {
|
|
||||||
BasicNodeData {
|
|
||||||
my_port: self.my_port,
|
|
||||||
network_id: self.network.network_id(),
|
|
||||||
peer_id: self.peer_id,
|
|
||||||
support_flags: self.our_support_flags,
|
|
||||||
rpc_port: self.rpc_port,
|
|
||||||
rpc_credits_per_hash: self.rpc_credits_per_hash,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Handshake<W, R> {
|
|
||||||
sink: MessageSink<W, Message>,
|
|
||||||
stream: MessageStream<R, Message>,
|
|
||||||
direction: Direction,
|
|
||||||
addr: NetworkAddress,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Handshaker<Bc, Svc, AdrBook> {
|
|
||||||
config: NetworkConfig,
|
|
||||||
parent_span: tracing::Span,
|
parent_span: tracing::Span,
|
||||||
|
/// The address book [`Service`]
|
||||||
address_book: AdrBook,
|
address_book: AdrBook,
|
||||||
blockchain: Bc,
|
/// A [`Service`] to handle incoming [`CoreSyncData`] and to get
|
||||||
peer_sync_states: mpsc::Sender<PeerSyncChange>,
|
/// our [`CoreSyncData`].
|
||||||
|
core_sync_svc: CoreSync,
|
||||||
|
/// A service given to the [`Connection`] task to answer incoming
|
||||||
|
/// requests to our node.
|
||||||
peer_request_service: Svc,
|
peer_request_service: Svc,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Bc, Svc, AdrBook, W, R> tower::Service<Handshake<W, R>> for Handshaker<Bc, Svc, AdrBook>
|
impl<Svc, CoreSync, AdrBook> Handshaker<Svc, CoreSync, AdrBook> {
|
||||||
|
pub fn new(
|
||||||
|
basic_node_data: NetZoneBasicNodeData,
|
||||||
|
network: Network,
|
||||||
|
address_book: AdrBook,
|
||||||
|
core_sync_svc: CoreSync,
|
||||||
|
peer_request_service: Svc,
|
||||||
|
) -> Self {
|
||||||
|
Handshaker {
|
||||||
|
basic_node_data,
|
||||||
|
network,
|
||||||
|
parent_span: tracing::Span::current(),
|
||||||
|
address_book,
|
||||||
|
core_sync_svc,
|
||||||
|
peer_request_service,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Svc, CoreSync, AdrBook, W, R> Service<DoHandshakeRequest<W, R>>
|
||||||
|
for Handshaker<Svc, CoreSync, AdrBook>
|
||||||
where
|
where
|
||||||
Bc: Service<DataBaseRequest, Response = DataBaseResponse, Error = DatabaseError>
|
CoreSync: Service<CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = BoxError>
|
||||||
+ Clone
|
+ Clone
|
||||||
+ Send
|
+ Send
|
||||||
+ 'static,
|
+ 'static,
|
||||||
Bc::Future: Send,
|
CoreSync::Future: Send,
|
||||||
|
|
||||||
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = PeerError>
|
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = BoxError>
|
||||||
+ Clone
|
+ Clone
|
||||||
+ Send
|
+ Send
|
||||||
+ 'static,
|
+ 'static,
|
||||||
Svc::Future: Send,
|
Svc::Future: Send,
|
||||||
|
|
||||||
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = AddressBookError>
|
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = BoxError>
|
||||||
+ Clone
|
+ Clone
|
||||||
+ Send
|
+ Send
|
||||||
+ 'static,
|
+ 'static,
|
||||||
AdrBook::Future: Send,
|
AdrBook::Future: Send,
|
||||||
|
|
||||||
W: AsyncWrite + std::marker::Unpin + Send + 'static,
|
W: AsyncWrite + Unpin + Send + 'static,
|
||||||
R: AsyncRead + std::marker::Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
{
|
{
|
||||||
type Error = HandShakeError;
|
|
||||||
type Response = Client;
|
type Response = Client;
|
||||||
|
type Error = BoxError;
|
||||||
type Future =
|
type Future =
|
||||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
fn poll_ready(
|
fn poll_ready(
|
||||||
&mut self,
|
&mut self,
|
||||||
cx: &mut std::task::Context<'_>,
|
_cx: &mut std::task::Context<'_>,
|
||||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||||
|
// We are always ready.
|
||||||
std::task::Poll::Ready(Ok(()))
|
std::task::Poll::Ready(Ok(()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&mut self, req: Handshake<W, R>) -> Self::Future {
|
fn call(&mut self, req: DoHandshakeRequest<W, R>) -> Self::Future {
|
||||||
let Handshake {
|
let DoHandshakeRequest {
|
||||||
sink: mut peer_sink,
|
read,
|
||||||
stream: mut peer_stream,
|
write,
|
||||||
direction,
|
|
||||||
addr,
|
addr,
|
||||||
|
connection_tracker,
|
||||||
} = req;
|
} = req;
|
||||||
|
|
||||||
|
// create the levin message stream/ sink.
|
||||||
|
let peer_stream = FramedRead::new(read, MoneroWireCodec::default());
|
||||||
|
let peer_sink = FramedWrite::new(write, MoneroWireCodec::default());
|
||||||
|
|
||||||
|
// The span the handshake state machine will use
|
||||||
let span = tracing::debug_span!("Handshaker");
|
let span = tracing::debug_span!("Handshaker");
|
||||||
|
|
||||||
|
// The span the connection task will use.
|
||||||
let connection_span = tracing::debug_span!(parent: &self.parent_span, "Connection");
|
let connection_span = tracing::debug_span!(parent: &self.parent_span, "Connection");
|
||||||
|
|
||||||
let blockchain = self.blockchain.clone();
|
// clone the services that the handshake state machine will need.
|
||||||
|
let core_sync_svc = self.core_sync_svc.clone();
|
||||||
let address_book = self.address_book.clone();
|
let address_book = self.address_book.clone();
|
||||||
let syncer_tx = self.peer_sync_states.clone();
|
|
||||||
let peer_request_service = self.peer_request_service.clone();
|
let peer_request_service = self.peer_request_service.clone();
|
||||||
|
|
||||||
let state_machine = HandshakeSM {
|
let state_machine = HandshakeSM {
|
||||||
peer_sink,
|
peer_sink,
|
||||||
peer_stream,
|
peer_stream,
|
||||||
direction,
|
|
||||||
addr,
|
addr,
|
||||||
network: self.config.network,
|
network: self.network,
|
||||||
basic_node_data: self.config.basic_node_data(),
|
basic_node_data: self.basic_node_data.basic_node_data(&addr.get_zone()),
|
||||||
minimum_support_flags: self.config.minimum_peer_support_flags,
|
|
||||||
address_book,
|
address_book,
|
||||||
blockchain,
|
core_sync_svc,
|
||||||
peer_request_service,
|
peer_request_service,
|
||||||
connection_span,
|
connection_span,
|
||||||
|
connection_tracker,
|
||||||
state: HandshakeState::Start,
|
state: HandshakeState::Start,
|
||||||
};
|
};
|
||||||
|
// although callers should use a timeout do one here as well just to be safe.
|
||||||
let ret = time::timeout(self.config.handshake_timeout, state_machine.do_handshake());
|
let ret = time::timeout(HANDSHAKE_TIMEOUT, state_machine.do_handshake());
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
match ret.await {
|
match ret.await {
|
||||||
Ok(handshake) => handshake,
|
Ok(handshake) => handshake,
|
||||||
Err(_) => Err(HandShakeError::PeerTimedOut),
|
Err(_) => Err(HandShakeError::PeerTimedOut.into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
.instrument(span)
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The states a handshake can be in.
|
||||||
enum HandshakeState {
|
enum HandshakeState {
|
||||||
|
/// The initial state.
|
||||||
|
/// if this is an inbound handshake then this state means we
|
||||||
|
/// are waiting for a [`HandshakeRequest`].
|
||||||
Start,
|
Start,
|
||||||
|
/// Waiting for a [`HandshakeResponse`].
|
||||||
WaitingForHandshakeResponse,
|
WaitingForHandshakeResponse,
|
||||||
WaitingForSupportFlagResponse(BasicNodeData),
|
/// Waiting for a [`SupportFlagsResponse`]
|
||||||
Complete(BasicNodeData),
|
/// This contains the peers node data.
|
||||||
|
WaitingForSupportFlagResponse(BasicNodeData, CoreSyncData),
|
||||||
|
/// The handshake is complete.
|
||||||
|
/// This contains the peers node data.
|
||||||
|
Complete(BasicNodeData, CoreSyncData),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HandshakeState {
|
impl HandshakeState {
|
||||||
|
/// Returns true if the handshake is completed.
|
||||||
pub fn is_complete(&self) -> bool {
|
pub fn is_complete(&self) -> bool {
|
||||||
matches!(self, HandshakeState::Complete(_))
|
matches!(self, Self::Complete(..))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn peer_basic_node_data(self) -> Option<BasicNodeData> {
|
/// returns the peers [`BasicNodeData`] and [`CoreSyncData`] if the peer
|
||||||
|
/// is in state [`HandshakeState::Complete`].
|
||||||
|
pub fn peer_data(self) -> Option<(BasicNodeData, CoreSyncData)> {
|
||||||
match self {
|
match self {
|
||||||
HandshakeState::Complete(sup) => Some(sup),
|
HandshakeState::Complete(bnd, coresync) => Some((bnd, coresync)),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct HandshakeSM<Bc, Svc, AdrBook, W, R> {
|
/// The state machine that drives a handshake forward and
|
||||||
peer_sink: MessageSink<W, Message>,
|
/// accepts requests (that can happen during a handshake)
|
||||||
peer_stream: MessageStream<R, Message>,
|
/// from a peer.
|
||||||
direction: Direction,
|
struct HandshakeSM<Svc, CoreSync, AdrBook, W, R> {
|
||||||
addr: NetworkAddress,
|
/// The levin [`FramedWrite`] for the peer.
|
||||||
|
peer_sink: W,
|
||||||
|
/// The levin [`FramedRead`] for the peer.
|
||||||
|
peer_stream: R,
|
||||||
|
/// The [`ConnectionAddr`] for the peer.
|
||||||
|
addr: ConnectionAddr,
|
||||||
|
/// The [`Network`] we are on.
|
||||||
network: Network,
|
network: Network,
|
||||||
|
|
||||||
|
/// Our [`BasicNodeData`].
|
||||||
basic_node_data: BasicNodeData,
|
basic_node_data: BasicNodeData,
|
||||||
minimum_support_flags: PeerSupportFlags,
|
/// The address book [`Service`]
|
||||||
address_book: AdrBook,
|
address_book: AdrBook,
|
||||||
blockchain: Bc,
|
/// The core sync [`Service`] to handle incoming
|
||||||
|
/// [`CoreSyncData`] and to retrieve ours.
|
||||||
|
core_sync_svc: CoreSync,
|
||||||
|
/// The [`Service`] passed to the [`Connection`]
|
||||||
|
/// task to handle incoming peer requests.
|
||||||
peer_request_service: Svc,
|
peer_request_service: Svc,
|
||||||
|
|
||||||
|
/// The [`tracing::Span`] the [`Connection`] task
|
||||||
|
/// will be [`tracing::instrument`]ed with.
|
||||||
connection_span: tracing::Span,
|
connection_span: tracing::Span,
|
||||||
|
/// A connection tracker to keep track of the
|
||||||
|
/// number of connections Cuprate is making.
|
||||||
|
connection_tracker: ConnectionTracker,
|
||||||
|
|
||||||
state: HandshakeState,
|
state: HandshakeState,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Bc, Svc, AdrBook, W, R> HandshakeSM<Bc, Svc, AdrBook, W, R>
|
impl<Svc, CoreSync, AdrBook, W, R> HandshakeSM<Svc, CoreSync, AdrBook, W, R>
|
||||||
where
|
where
|
||||||
Bc: Service<DataBaseRequest, Response = DataBaseResponse, Error = DatabaseError>
|
CoreSync: Service<CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = BoxError>
|
||||||
+ Clone
|
+ Clone
|
||||||
+ Send
|
+ Send
|
||||||
+ 'static,
|
+ 'static,
|
||||||
Bc::Future: Send,
|
CoreSync::Future: Send,
|
||||||
|
|
||||||
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = PeerError>
|
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = BoxError>
|
||||||
+ Clone
|
+ Clone
|
||||||
+ Send
|
+ Send
|
||||||
+ 'static,
|
+ 'static,
|
||||||
Svc::Future: Send,
|
Svc::Future: Send,
|
||||||
|
|
||||||
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = AddressBookError>
|
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = BoxError>
|
||||||
+ Clone
|
+ Clone
|
||||||
+ Send
|
+ Send
|
||||||
+ 'static,
|
+ 'static,
|
||||||
AdrBook::Future: Send,
|
AdrBook::Future: Send,
|
||||||
|
|
||||||
W: AsyncWrite + std::marker::Unpin + Send + 'static,
|
W: Sink<Message, Error = BucketError> + Unpin,
|
||||||
R: AsyncRead + std::marker::Unpin + Send + 'static,
|
R: Stream<Item = Result<Message, BucketError>> + Unpin,
|
||||||
{
|
{
|
||||||
async fn get_our_core_sync(&mut self) -> Result<CoreSyncData, DatabaseError> {
|
/// Gets our [`CoreSyncData`] from the `core_sync_svc`.
|
||||||
let blockchain = self.blockchain.ready().await?;
|
async fn get_our_core_sync(&mut self) -> Result<CoreSyncData, BoxError> {
|
||||||
let DataBaseResponse::CoreSyncData(core_sync) = blockchain.call(DataBaseRequest::CoreSyncData).await? else {
|
let core_sync_svc = self.core_sync_svc.ready().await?;
|
||||||
unreachable!("Database will always return the requested item")
|
let CoreSyncDataResponse::Ours(core_sync) = core_sync_svc.call(CoreSyncDataRequest::GetOurs).await? else {
|
||||||
|
unreachable!("The Service must give correct responses");
|
||||||
};
|
};
|
||||||
|
tracing::trace!("Got core sync data: {core_sync:?}");
|
||||||
Ok(core_sync)
|
Ok(core_sync)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Sends a [`HandshakeRequest`] to the peer.
|
||||||
async fn send_handshake_req(
|
async fn send_handshake_req(
|
||||||
&mut self,
|
&mut self,
|
||||||
node_data: BasicNodeData,
|
node_data: BasicNodeData,
|
||||||
|
@ -281,59 +367,62 @@ where
|
||||||
|
|
||||||
tracing::trace!("Sending handshake request: {handshake_req:?}");
|
tracing::trace!("Sending handshake request: {handshake_req:?}");
|
||||||
|
|
||||||
let message: Message = Message::Request(handshake_req.into());
|
let message: Message = Message::Request(RequestMessage::Handshake(handshake_req));
|
||||||
self.peer_sink.send(message).await?;
|
self.peer_sink.send(message).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_handshake_res(&mut self) -> Result<HandshakeResponse, HandShakeError> {
|
/// Sends a [`SupportFlagsRequest`] to the peer.
|
||||||
// put a timeout on this
|
/// This is done when a peer sends no support flags in their
|
||||||
let Message::Response(MessageResponse::Handshake(handshake_res)) = self.peer_stream.next().await.expect("MessageSink will not return None")? else {
|
/// [`HandshakeRequest`] or [`HandshakeResponse`].
|
||||||
return Err(HandShakeError::PeerSentWrongResponse);
|
///
|
||||||
};
|
/// *note because Cuprate has minimum required support flags this won't
|
||||||
|
/// happeen but is included here just in case this changes.
|
||||||
tracing::trace!("Received handshake response: {handshake_res:?}");
|
|
||||||
|
|
||||||
Ok(handshake_res)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_support_flag_req(&mut self) -> Result<(), HandShakeError> {
|
async fn send_support_flag_req(&mut self) -> Result<(), HandShakeError> {
|
||||||
tracing::trace!("Peer sent no support flags, sending request");
|
tracing::trace!("Peer sent no support flags, sending request");
|
||||||
|
|
||||||
let message: Message = Message::Request(SupportFlagsRequest.into());
|
let message: Message = Message::Request(RequestMessage::SupportFlags);
|
||||||
self.peer_sink.send(message).await?;
|
self.peer_sink.send(message).await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_handshake_response(
|
/// Handles an incoming [`HandshakeResponse`].
|
||||||
&mut self,
|
async fn handle_handshake_response(&mut self, res: HandshakeResponse) -> Result<(), BoxError> {
|
||||||
res: HandshakeResponse,
|
|
||||||
) -> Result<(), HandShakeError> {
|
|
||||||
let HandshakeResponse {
|
let HandshakeResponse {
|
||||||
node_data: peer_node_data,
|
node_data: peer_node_data,
|
||||||
payload_data: peer_core_sync,
|
payload_data: peer_core_sync,
|
||||||
local_peerlist_new,
|
local_peerlist_new,
|
||||||
} = res;
|
} = res;
|
||||||
|
|
||||||
if !peer_node_data
|
// Check the peer is on the correct network.
|
||||||
.support_flags
|
|
||||||
.contains(&self.minimum_support_flags)
|
|
||||||
{
|
|
||||||
tracing::debug!("Handshake failed: peer does not have minimum support flags");
|
|
||||||
return Err(HandShakeError::PeerDoesNotHaveTheMinimumSupportFlags);
|
|
||||||
}
|
|
||||||
|
|
||||||
if peer_node_data.network_id != self.network.network_id() {
|
if peer_node_data.network_id != self.network.network_id() {
|
||||||
tracing::debug!("Handshake failed: peer is on a different network");
|
tracing::debug!("Handshake failed: peer is on a different network");
|
||||||
return Err(HandShakeError::PeerIsOnADifferentNetwork);
|
return Err(HandShakeError::PeerIsOnADifferentNetwork.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check the peer meets the minimum support flags.
|
||||||
|
if !peer_node_data
|
||||||
|
.support_flags
|
||||||
|
.contains(&CUPRATE_MINIMUM_SUPPORT_FLAGS)
|
||||||
|
{
|
||||||
|
tracing::debug!("Handshake failed: peer does not have minimum required support flags");
|
||||||
|
return Err(HandShakeError::PeerDoesNotHaveTheMinimumSupportFlags.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the peer didn't send too many peers.
|
||||||
if local_peerlist_new.len() > P2P_MAX_PEERS_IN_HANDSHAKE {
|
if local_peerlist_new.len() > P2P_MAX_PEERS_IN_HANDSHAKE {
|
||||||
tracing::debug!("Handshake failed: peer sent too many peers in response");
|
tracing::debug!("Handshake failed: peer sent too many peers in response");
|
||||||
return Err(HandShakeError::PeerSentTooManyPeers);
|
return Err(HandShakeError::PeerSentTooManyPeers.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tell the sync mgr about the new incoming core sync data.
|
||||||
|
self.core_sync_svc
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(CoreSyncDataRequest::NewIncoming(peer_core_sync.clone()))
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Tell the address book about the new peers
|
// Tell the address book about the new peers
|
||||||
self.address_book
|
self.address_book
|
||||||
.ready()
|
.ready()
|
||||||
|
@ -344,52 +433,65 @@ where
|
||||||
))
|
))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// coresync, pruning seed
|
// This won't actually happen (as long as we have a none 0 minimum support flags)
|
||||||
|
// it's just included here for completeness.
|
||||||
if peer_node_data.support_flags.is_empty() {
|
if peer_node_data.support_flags.is_empty() {
|
||||||
self.send_support_flag_req().await?;
|
self.send_support_flag_req().await?;
|
||||||
self.state = HandshakeState::WaitingForSupportFlagResponse(peer_node_data);
|
self.state =
|
||||||
|
HandshakeState::WaitingForSupportFlagResponse(peer_node_data, peer_core_sync);
|
||||||
} else {
|
} else {
|
||||||
self.state = HandshakeState::Complete(peer_node_data);
|
// this will always happen.
|
||||||
|
self.state = HandshakeState::Complete(peer_node_data, peer_core_sync);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_message_response(
|
/// Handles a [`MessageResponse`].
|
||||||
&mut self,
|
async fn handle_message_response(&mut self, response: ResponseMessage) -> Result<(), BoxError> {
|
||||||
response: MessageResponse,
|
// The functions called here will change the state of the HandshakeSM so `HandshakeState::Start`
|
||||||
) -> Result<(), HandShakeError> {
|
// is just used as a place holder.
|
||||||
match (&mut self.state, response) {
|
//
|
||||||
|
// doing this allows us to not clone the BasicNodeData and CoreSyncData for WaitingForSupportFlagResponse.
|
||||||
|
let prv_state = std::mem::replace(&mut self.state, HandshakeState::Start);
|
||||||
|
|
||||||
|
match (prv_state, response) {
|
||||||
(
|
(
|
||||||
HandshakeState::WaitingForHandshakeResponse,
|
HandshakeState::WaitingForHandshakeResponse,
|
||||||
MessageResponse::Handshake(handshake),
|
ResponseMessage::Handshake(handshake),
|
||||||
) => self.handle_handshake_response(handshake).await,
|
) => self.handle_handshake_response(handshake).await,
|
||||||
(
|
(
|
||||||
HandshakeState::WaitingForSupportFlagResponse(bnd),
|
HandshakeState::WaitingForSupportFlagResponse(mut bnd, coresync),
|
||||||
MessageResponse::SupportFlags(support_flags),
|
ResponseMessage::SupportFlags(support_flags),
|
||||||
) => {
|
) => {
|
||||||
bnd.support_flags = support_flags.support_flags;
|
bnd.support_flags = support_flags.support_flags;
|
||||||
self.state = HandshakeState::Complete(bnd.clone());
|
self.state = HandshakeState::Complete(bnd, coresync);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
_ => Err(HandShakeError::PeerSentWrongResponse),
|
_ => Err(HandShakeError::PeerSentWrongResponse.into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Sends our [`PeerSupportFlags`] to the peer.
|
||||||
async fn send_support_flags(
|
async fn send_support_flags(
|
||||||
&mut self,
|
&mut self,
|
||||||
support_flags: PeerSupportFlags,
|
support_flags: PeerSupportFlags,
|
||||||
) -> Result<(), HandShakeError> {
|
) -> Result<(), HandShakeError> {
|
||||||
let message = Message::Response(SupportFlagsResponse { support_flags }.into());
|
let message = Message::Response(ResponseMessage::SupportFlags(SupportFlagsResponse {
|
||||||
|
support_flags,
|
||||||
|
}));
|
||||||
self.peer_sink.send(message).await?;
|
self.peer_sink.send(message).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_outbound_handshake(&mut self) -> Result<(), HandShakeError> {
|
/// Attempts an outbound handshake with the peer.
|
||||||
|
async fn do_outbound_handshake(&mut self) -> Result<(), BoxError> {
|
||||||
|
// Get the data needed for the handshake request.
|
||||||
let core_sync = self.get_our_core_sync().await?;
|
let core_sync = self.get_our_core_sync().await?;
|
||||||
|
// send the handshake request.
|
||||||
self.send_handshake_req(self.basic_node_data.clone(), core_sync)
|
self.send_handshake_req(self.basic_node_data.clone(), core_sync)
|
||||||
.await?;
|
.await?;
|
||||||
|
// set the state to waiting for a response.
|
||||||
self.state = HandshakeState::WaitingForHandshakeResponse;
|
self.state = HandshakeState::WaitingForHandshakeResponse;
|
||||||
|
|
||||||
while !self.state.is_complete() {
|
while !self.state.is_complete() {
|
||||||
|
@ -397,14 +499,17 @@ where
|
||||||
Some(mes) => {
|
Some(mes) => {
|
||||||
let mes = mes?;
|
let mes = mes?;
|
||||||
match mes {
|
match mes {
|
||||||
Message::Request(MessageRequest::SupportFlags(_)) => {
|
Message::Request(RequestMessage::SupportFlags) => {
|
||||||
|
// The only request we should be getting during an outbound handshake
|
||||||
|
// is a support flag request.
|
||||||
self.send_support_flags(self.basic_node_data.support_flags)
|
self.send_support_flags(self.basic_node_data.support_flags)
|
||||||
.await?
|
.await?
|
||||||
}
|
}
|
||||||
Message::Response(response) => {
|
Message::Response(response) => {
|
||||||
|
// This could be a handshake response or a support flags response.
|
||||||
self.handle_message_response(response).await?
|
self.handle_message_response(response).await?
|
||||||
}
|
}
|
||||||
_ => return Err(HandShakeError::PeerSentWrongResponse),
|
_ => return Err(HandShakeError::PeerSentWrongResponse.into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => unreachable!("peer_stream wont return None"),
|
None => unreachable!("peer_stream wont return None"),
|
||||||
|
@ -414,40 +519,108 @@ where
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_handshake(mut self) -> Result<Client, HandShakeError> {
|
/// Completes a handshake with a peer.
|
||||||
match self.direction {
|
async fn do_handshake(mut self) -> Result<Client, BoxError> {
|
||||||
Direction::Outbound => self.do_outbound_handshake().await?,
|
let mut peer_reachable = false;
|
||||||
|
match self.addr.direction() {
|
||||||
|
Direction::Outbound => {
|
||||||
|
self.do_outbound_handshake().await?;
|
||||||
|
// If this is an outbound handshake then obviously the peer
|
||||||
|
// is reachable.
|
||||||
|
peer_reachable = true
|
||||||
|
}
|
||||||
Direction::Inbound => todo!(),
|
Direction::Inbound => todo!(),
|
||||||
}
|
}
|
||||||
|
|
||||||
let (server_tx, server_rx) = mpsc::channel(3);
|
let (server_tx, server_rx) = mpsc::channel(0);
|
||||||
|
|
||||||
let (replace_me, replace_me_rx) = mpsc::channel(3);
|
let (peer_node_data, coresync) = self
|
||||||
|
|
||||||
let peer_node_data = self
|
|
||||||
.state
|
.state
|
||||||
.peer_basic_node_data()
|
.peer_data()
|
||||||
.expect("We must be in state complete to be here");
|
.expect("We must be in state complete to be here");
|
||||||
|
|
||||||
|
let pruning_seed = PruningSeed::try_from(coresync.pruning_seed).map_err(|e| Box::new(e))?;
|
||||||
|
|
||||||
|
// create the handle between the Address book and the connection task to
|
||||||
|
// allow the address book to shutdown the connection task and to update
|
||||||
|
// the address book when the connection is closed.
|
||||||
|
let (book_connection_side_handle, connection_book_side_handle) =
|
||||||
|
new_address_book_connection_handle();
|
||||||
|
|
||||||
|
// tell the address book about the new connection.
|
||||||
|
self.address_book
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(AddressBookRequest::ConnectedToPeer {
|
||||||
|
zone: self.addr.get_zone(),
|
||||||
|
connection_handle: connection_book_side_handle,
|
||||||
|
addr: self.addr.get_network_address(
|
||||||
|
peer_node_data
|
||||||
|
.my_port
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| "Peer sent a port that does not fit into a u16")?,
|
||||||
|
),
|
||||||
|
id: peer_node_data.peer_id,
|
||||||
|
reachable: peer_reachable,
|
||||||
|
last_seen: chrono::Utc::now().naive_utc(),
|
||||||
|
pruning_seed: pruning_seed.clone(),
|
||||||
|
rpc_port: peer_node_data.rpc_port,
|
||||||
|
rpc_credits_per_hash: peer_node_data.rpc_credits_per_hash,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// This block below is for keeping the last seen times in the address book
|
||||||
|
// upto date. We only update the last seen times on timed syncs to reduce
|
||||||
|
// the load on the address book.
|
||||||
|
//
|
||||||
|
// first clone the items needed
|
||||||
|
let mut address_book = self.address_book.clone();
|
||||||
|
let peer_id = peer_node_data.peer_id;
|
||||||
|
let net_zone = self.addr.get_zone();
|
||||||
|
|
||||||
|
/*
|
||||||
|
let peer_stream = self.peer_stream.then(|mes| async move {
|
||||||
|
if let Ok(mes) = &mes {
|
||||||
|
if mes.id() == TimedSync::ID {
|
||||||
|
if let Ok(ready_book) = address_book.ready().await {
|
||||||
|
// we dont care about address book errors here, If there is a problem
|
||||||
|
// with the address book the node will get shutdown.
|
||||||
|
let _ = ready_book
|
||||||
|
.call(AddressBookRequest::SetPeerSeen(
|
||||||
|
peer_id,
|
||||||
|
chrono::Utc::now().naive_utc(),
|
||||||
|
net_zone,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// return the message
|
||||||
|
mes
|
||||||
|
});
|
||||||
|
|
||||||
|
*/
|
||||||
|
let connection = Connection::new(
|
||||||
|
self.addr,
|
||||||
|
self.peer_sink,
|
||||||
|
server_rx,
|
||||||
|
self.connection_tracker,
|
||||||
|
book_connection_side_handle,
|
||||||
|
self.peer_request_service,
|
||||||
|
);
|
||||||
|
|
||||||
|
let connection_task = tokio::task::spawn(connection.run().instrument(self.connection_span));
|
||||||
|
|
||||||
let connection_info = ConnectionInfo {
|
let connection_info = ConnectionInfo {
|
||||||
addr: self.addr,
|
addr: self.addr,
|
||||||
support_flags: peer_node_data.support_flags,
|
support_flags: peer_node_data.support_flags,
|
||||||
|
pruning_seed,
|
||||||
peer_id: peer_node_data.peer_id,
|
peer_id: peer_node_data.peer_id,
|
||||||
rpc_port: peer_node_data.rpc_port,
|
rpc_port: peer_node_data.rpc_port,
|
||||||
rpc_credits_per_hash: peer_node_data.rpc_credits_per_hash,
|
rpc_credits_per_hash: peer_node_data.rpc_credits_per_hash,
|
||||||
};
|
};
|
||||||
|
|
||||||
let connection = Connection::new(
|
let client = Client::new(connection_info.into(), /* futures::futures_channel::oneshot::Sender<()> */, server_tx, connection_task, /* tokio::task::JoinHandle<()> */);
|
||||||
self.addr,
|
|
||||||
self.peer_sink,
|
|
||||||
self.peer_stream,
|
|
||||||
server_rx,
|
|
||||||
replace_me,
|
|
||||||
self.peer_request_service,
|
|
||||||
);
|
|
||||||
|
|
||||||
let client = Client::new(connection_info.into(), server_tx);
|
|
||||||
|
|
||||||
tokio::task::spawn(connection.run().instrument(self.connection_span));
|
|
||||||
|
|
||||||
Ok(client)
|
Ok(client)
|
||||||
}
|
}
|
||||||
|
|
74
p2p/src/peer/load_tracked_client.rs
Normal file
74
p2p/src/peer/load_tracked_client.rs
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
//! A peer connection service wrapper type to handle load tracking and provide access to the
|
||||||
|
//! reported protocol version.
|
||||||
|
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::{
|
||||||
|
sync::Arc,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
|
use cuprate_common::PruningSeed;
|
||||||
|
use tower::{
|
||||||
|
load::{Load, PeakEwma},
|
||||||
|
Service,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
constants::{EWMA_DECAY_TIME_NANOS, EWMA_DEFAULT_RTT},
|
||||||
|
peer::{Client, ConnectionInfo},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A client service wrapper that keeps track of its load.
|
||||||
|
///
|
||||||
|
/// It also keeps track of the peer's reported protocol version.
|
||||||
|
pub struct LoadTrackedClient {
|
||||||
|
/// A service representing a connected peer, wrapped in a load tracker.
|
||||||
|
service: PeakEwma<Client>,
|
||||||
|
|
||||||
|
/// The metadata for the connected peer `service`.
|
||||||
|
connection_info: Arc<ConnectionInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new [`LoadTrackedClient`] wrapping the provided `client` service.
|
||||||
|
impl From<Client> for LoadTrackedClient {
|
||||||
|
fn from(client: Client) -> Self {
|
||||||
|
let connection_info = client.connection_info.clone();
|
||||||
|
|
||||||
|
let service = PeakEwma::new(
|
||||||
|
client,
|
||||||
|
EWMA_DEFAULT_RTT,
|
||||||
|
EWMA_DECAY_TIME_NANOS,
|
||||||
|
tower::load::CompleteOnResponse::default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
LoadTrackedClient {
|
||||||
|
service,
|
||||||
|
connection_info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Request> Service<Request> for LoadTrackedClient
|
||||||
|
where
|
||||||
|
Client: Service<Request>,
|
||||||
|
{
|
||||||
|
type Response = <Client as Service<Request>>::Response;
|
||||||
|
type Error = <Client as Service<Request>>::Error;
|
||||||
|
type Future = <PeakEwma<Client> as Service<Request>>::Future;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, context: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.service.poll_ready(context)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, request: Request) -> Self::Future {
|
||||||
|
self.service.call(request)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Load for LoadTrackedClient {
|
||||||
|
type Metric = <PeakEwma<Client> as Load>::Metric;
|
||||||
|
|
||||||
|
fn load(&self) -> Self::Metric {
|
||||||
|
self.service.load()
|
||||||
|
}
|
||||||
|
}
|
|
@ -1 +1 @@
|
||||||
pub use crate::peer::handshaker::{Handshake, Handshaker};
|
pub use crate::peer::handshaker::Handshaker;
|
||||||
|
|
|
@ -1,13 +1,29 @@
|
||||||
pub mod internal_network;
|
pub mod internal_network;
|
||||||
pub mod temp_database;
|
|
||||||
|
|
||||||
pub use internal_network::{InternalMessageRequest, InternalMessageResponse};
|
pub use internal_network::{InternalMessageRequest, InternalMessageResponse};
|
||||||
|
|
||||||
pub const BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT: usize = 10000;
|
use monero_wire::messages::CoreSyncData;
|
||||||
pub const BLOCKS_IDS_SYNCHRONIZING_MAX_COUNT: usize = 25000;
|
|
||||||
pub const P2P_MAX_PEERS_IN_HANDSHAKE: usize = 250;
|
|
||||||
|
|
||||||
|
/// A request to a [`tower::Service`] that handles sync states.
|
||||||
|
pub enum CoreSyncDataRequest {
|
||||||
|
/// Get our [`CoreSyncData`].
|
||||||
|
GetOurs,
|
||||||
|
/// Handle an incoming [`CoreSyncData`].
|
||||||
|
NewIncoming(CoreSyncData),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A response from a [`tower::Service`] that handles sync states.
|
||||||
|
pub enum CoreSyncDataResponse {
|
||||||
|
/// Our [`CoreSyncData`]
|
||||||
|
Ours(CoreSyncData),
|
||||||
|
/// The incoming [`CoreSyncData`] is ok.
|
||||||
|
Ok,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The direction of a connection.
|
||||||
pub enum Direction {
|
pub enum Direction {
|
||||||
|
/// An inbound connection.
|
||||||
Inbound,
|
Inbound,
|
||||||
|
/// An outbound connection.
|
||||||
Outbound,
|
Outbound,
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,162 +22,104 @@
|
||||||
/// Request: NewFluffyBlock, Response: None,
|
/// Request: NewFluffyBlock, Response: None,
|
||||||
/// Request: NewTransactions, Response: None
|
/// Request: NewTransactions, Response: None
|
||||||
///
|
///
|
||||||
use monero_wire::messages::{
|
use monero_wire::{
|
||||||
AdminMessage, ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
|
ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
|
||||||
GetObjectsResponse, GetTxPoolCompliment, Handshake, Message, MessageNotification,
|
GetObjectsResponse, GetTxPoolCompliment, HandshakeRequest, HandshakeResponse, Message,
|
||||||
MessageRequest, MessageResponse, NewBlock, NewFluffyBlock, NewTransactions, Ping,
|
NewBlock, NewFluffyBlock, NewTransactions, PingResponse, RequestMessage, SupportFlagsResponse,
|
||||||
ProtocolMessage, SupportFlags, TimedSync,
|
TimedSyncRequest, TimedSyncResponse,
|
||||||
};
|
};
|
||||||
|
|
||||||
macro_rules! client_request_peer_response {
|
mod try_from;
|
||||||
(
|
|
||||||
Admin:
|
|
||||||
$($admin_mes:ident),+
|
|
||||||
Protocol:
|
|
||||||
$(Request: $protocol_req:ident, Response: $(SOME: $protocol_res:ident)? $(NULL: $none:expr)? ),+
|
|
||||||
) => {
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
/// An enum representing a request/ response combination, so a handshake request
|
||||||
pub enum InternalMessageRequest {
|
/// and response would have the same [`MessageID`]. This allows associating the
|
||||||
$($admin_mes(<$admin_mes as AdminMessage>::Request),)+
|
/// correct response to a request.
|
||||||
$($protocol_req(<$protocol_req as ProtocolMessage>::Notification),)+
|
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||||
}
|
pub enum MessageID {
|
||||||
|
Handshake,
|
||||||
|
TimedSync,
|
||||||
|
Ping,
|
||||||
|
SupportFlags,
|
||||||
|
|
||||||
impl InternalMessageRequest {
|
GetObjects,
|
||||||
pub fn get_str_name(&self) -> &'static str {
|
GetChain,
|
||||||
match self {
|
FluffyMissingTxs,
|
||||||
$(InternalMessageRequest::$admin_mes(_) => $admin_mes::NAME,)+
|
GetTxPoolCompliment,
|
||||||
$(InternalMessageRequest::$protocol_req(_) => $protocol_req::NAME,)+
|
NewBlock,
|
||||||
}
|
NewFluffyBlock,
|
||||||
}
|
NewTransactions,
|
||||||
pub fn id(&self) -> u32 {
|
|
||||||
match self {
|
|
||||||
$(InternalMessageRequest::$admin_mes(_) => $admin_mes::ID,)+
|
|
||||||
$(InternalMessageRequest::$protocol_req(_) => $protocol_req::ID,)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn expected_id(&self) -> Option<u32> {
|
|
||||||
match self {
|
|
||||||
$(InternalMessageRequest::$admin_mes(_) => Some($admin_mes::ID),)+
|
|
||||||
$(InternalMessageRequest::$protocol_req(_) => $(Some($protocol_res::ID))? $($none)?,)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn is_levin_request(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
$(InternalMessageRequest::$admin_mes(_) => true,)+
|
|
||||||
$(InternalMessageRequest::$protocol_req(_) => false,)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<MessageRequest> for InternalMessageRequest {
|
|
||||||
fn from(value: MessageRequest) -> Self {
|
|
||||||
match value {
|
|
||||||
$(MessageRequest::$admin_mes(mes) => InternalMessageRequest::$admin_mes(mes),)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<Message> for InternalMessageRequest {
|
|
||||||
fn into(self) -> Message {
|
|
||||||
match self {
|
|
||||||
$(InternalMessageRequest::$admin_mes(mes) => Message::Request(MessageRequest::$admin_mes(mes)),)+
|
|
||||||
$(InternalMessageRequest::$protocol_req(mes) => Message::Notification(MessageNotification::$protocol_req(mes)),)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct NotAnInternalRequest;
|
|
||||||
|
|
||||||
impl TryFrom<Message> for InternalMessageRequest {
|
|
||||||
type Error = NotAnInternalRequest;
|
|
||||||
fn try_from(value: Message) -> Result<Self, Self::Error> {
|
|
||||||
match value {
|
|
||||||
Message::Response(_) => Err(NotAnInternalRequest),
|
|
||||||
Message::Request(req) => Ok(req.into()),
|
|
||||||
Message::Notification(noti) => {
|
|
||||||
match noti {
|
|
||||||
$(MessageNotification::$protocol_req(noti) => Ok(InternalMessageRequest::$protocol_req(noti)),)+
|
|
||||||
_ => Err(NotAnInternalRequest),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum InternalMessageResponse {
|
|
||||||
$($admin_mes(<$admin_mes as AdminMessage>::Response),)+
|
|
||||||
$($($protocol_res(<$protocol_res as ProtocolMessage>::Notification),)?)+
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InternalMessageResponse {
|
|
||||||
pub fn get_str_name(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
$(InternalMessageResponse::$admin_mes(_) => $admin_mes::NAME,)+
|
|
||||||
$($(InternalMessageResponse::$protocol_res(_) => $protocol_res::NAME,)?)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn id(&self) -> u32 {
|
|
||||||
match self{
|
|
||||||
$(InternalMessageResponse::$admin_mes(_) => $admin_mes::ID,)+
|
|
||||||
$($(InternalMessageResponse::$protocol_res(_) => $protocol_res::ID,)?)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<MessageResponse> for InternalMessageResponse {
|
|
||||||
fn from(value: MessageResponse) -> Self {
|
|
||||||
match value {
|
|
||||||
$(MessageResponse::$admin_mes(mes) => InternalMessageResponse::$admin_mes(mes),)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<Message> for InternalMessageResponse {
|
|
||||||
fn into(self) -> Message {
|
|
||||||
match self {
|
|
||||||
$(InternalMessageResponse::$admin_mes(mes) => Message::Response(MessageResponse::$admin_mes(mes)),)+
|
|
||||||
$($(InternalMessageResponse::$protocol_res(mes) => Message::Notification(MessageNotification::$protocol_res(mes)),)?)+
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct NotAnInternalResponse;
|
|
||||||
|
|
||||||
impl TryFrom<Message> for InternalMessageResponse {
|
|
||||||
type Error = NotAnInternalResponse;
|
|
||||||
fn try_from(value: Message) -> Result<Self, Self::Error> {
|
|
||||||
match value {
|
|
||||||
Message::Response(res) => Ok(res.into()),
|
|
||||||
Message::Request(_) => Err(NotAnInternalResponse),
|
|
||||||
Message::Notification(noti) => {
|
|
||||||
match noti {
|
|
||||||
$($(MessageNotification::$protocol_res(noti) => Ok(InternalMessageResponse::$protocol_res(noti)),)?)+
|
|
||||||
_ => Err(NotAnInternalResponse),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
client_request_peer_response!(
|
pub enum Request {
|
||||||
Admin:
|
Handshake(HandshakeRequest),
|
||||||
Handshake,
|
TimedSync(TimedSyncRequest),
|
||||||
TimedSync,
|
Ping,
|
||||||
Ping,
|
SupportFlags,
|
||||||
SupportFlags
|
|
||||||
Protocol:
|
GetObjects(GetObjectsRequest),
|
||||||
Request: GetObjectsRequest, Response: SOME: GetObjectsResponse,
|
GetChain(ChainRequest),
|
||||||
Request: ChainRequest, Response: SOME: ChainResponse,
|
FluffyMissingTxs(FluffyMissingTransactionsRequest),
|
||||||
Request: FluffyMissingTransactionsRequest, Response: SOME: NewFluffyBlock, // these 2 could be requests or responses
|
GetTxPoolCompliment(GetTxPoolCompliment),
|
||||||
Request: GetTxPoolCompliment, Response: SOME: NewTransactions, //
|
NewBlock(NewBlock),
|
||||||
// these don't need to be responded to
|
NewFluffyBlock(NewFluffyBlock),
|
||||||
Request: NewBlock, Response: NULL: None,
|
NewTransactions(NewTransactions),
|
||||||
Request: NewFluffyBlock, Response: NULL: None,
|
}
|
||||||
Request: NewTransactions, Response: NULL: None
|
|
||||||
);
|
impl Request {
|
||||||
|
pub fn id(&self) -> MessageID {
|
||||||
|
match self {
|
||||||
|
Request::Handshake(_) => MessageID::Handshake,
|
||||||
|
Request::TimedSync(_) => MessageID::TimedSync,
|
||||||
|
Request::Ping => MessageID::Ping,
|
||||||
|
Request::SupportFlags => MessageID::SupportFlags,
|
||||||
|
|
||||||
|
Request::GetObjects(_) => MessageID::GetObjects,
|
||||||
|
Request::GetChain(_) => MessageID::GetChain,
|
||||||
|
Request::FluffyMissingTxs(_) => MessageID::FluffyMissingTxs,
|
||||||
|
Request::GetTxPoolCompliment(_) => MessageID::GetTxPoolCompliment,
|
||||||
|
Request::NewBlock(_) => MessageID::NewBlock,
|
||||||
|
Request::NewFluffyBlock(_) => MessageID::NewFluffyBlock,
|
||||||
|
Request::NewTransactions(_) => MessageID::NewTransactions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn needs_response(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
Request::NewBlock(_) | Request::NewFluffyBlock(_) | Request::NewTransactions(_) => {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
_ => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum Response {
|
||||||
|
Handshake(HandshakeResponse),
|
||||||
|
TimedSync(TimedSyncResponse),
|
||||||
|
Ping(PingResponse),
|
||||||
|
SupportFlags(SupportFlagsResponse),
|
||||||
|
|
||||||
|
GetObjects(GetObjectsResponse),
|
||||||
|
GetChain(ChainResponse),
|
||||||
|
NewFluffyBlock(NewFluffyBlock),
|
||||||
|
NewTransactions(NewTransactions),
|
||||||
|
NA,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Response {
|
||||||
|
pub fn id(&self) -> MessageID {
|
||||||
|
match self {
|
||||||
|
Response::Handshake(_) => MessageID::Handshake,
|
||||||
|
Response::TimedSync(_) => MessageID::TimedSync,
|
||||||
|
Response::Ping(_) => MessageID::Ping,
|
||||||
|
Response::SupportFlags(_) => MessageID::SupportFlags,
|
||||||
|
|
||||||
|
Response::GetObjects(_) => MessageID::GetObjects,
|
||||||
|
Response::GetChain(_) => MessageID::GetChain,
|
||||||
|
Response::NewFluffyBlock(_) => MessageID::NewBlock,
|
||||||
|
Response::NewTransactions(_) => MessageID::NewFluffyBlock,
|
||||||
|
|
||||||
|
Response::NA => panic!("Can't get message ID for a non existent response"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
163
p2p/src/protocol/internal_network/try_from.rs
Normal file
163
p2p/src/protocol/internal_network/try_from.rs
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
//! This module contains the implementations of [`TryFrom`] and [`From`] to convert between
|
||||||
|
//! [`Message`], [`Request`] and [`Response`].
|
||||||
|
|
||||||
|
use monero_wire::messages::{Message, ProtocolMessage, RequestMessage, ResponseMessage};
|
||||||
|
|
||||||
|
use super::{Request, Response};
|
||||||
|
|
||||||
|
pub struct MessageConversionError;
|
||||||
|
|
||||||
|
|
||||||
|
macro_rules! match_body {
|
||||||
|
(match $value: ident {$($body:tt)*} ($left:pat => $right_ty:expr) $($todo:tt)*) => {
|
||||||
|
match_body!( match $value {
|
||||||
|
$left => $right_ty,
|
||||||
|
$($body)*
|
||||||
|
} $($todo)* )
|
||||||
|
};
|
||||||
|
(match $value: ident {$($body:tt)*}) => {
|
||||||
|
match $value {
|
||||||
|
$($body)*
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
macro_rules! from {
|
||||||
|
($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => {
|
||||||
|
impl From<$left_ty> for $right_ty {
|
||||||
|
fn from(value: $left_ty) -> Self {
|
||||||
|
match_body!( match value {}
|
||||||
|
$(($left_ty::$left$(($val))? => $right_ty::$right$(($vall))?))+
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! try_from {
|
||||||
|
($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => {
|
||||||
|
impl TryFrom<$left_ty> for $right_ty {
|
||||||
|
type Error = MessageConversionError;
|
||||||
|
|
||||||
|
fn try_from(value: $left_ty) -> Result<Self, Self::Error> {
|
||||||
|
Ok(match_body!( match value {
|
||||||
|
_ => return Err(MessageConversionError)
|
||||||
|
}
|
||||||
|
$(($left_ty::$left$(($val))? => $right_ty::$right$(($vall))?))+
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! from_try_from {
|
||||||
|
($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => {
|
||||||
|
try_from!($left_ty, $right_ty, {$($left $(($val))? = $right $(($vall))?,)+});
|
||||||
|
from!($right_ty, $left_ty, {$($right $(($val))? = $left $(($vall))?,)+});
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! try_from_try_from {
|
||||||
|
($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => {
|
||||||
|
try_from!($left_ty, $right_ty, {$($left $(($val))? = $right $(($vall))?,)+});
|
||||||
|
try_from!($right_ty, $left_ty, {$($right $(($val))? = $left $(($val))?,)+});
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
from_try_from!(Request, RequestMessage,{
|
||||||
|
Handshake(val) = Handshake(val),
|
||||||
|
Ping = Ping,
|
||||||
|
SupportFlags = SupportFlags,
|
||||||
|
TimedSync(val) = TimedSync(val),
|
||||||
|
});
|
||||||
|
|
||||||
|
try_from_try_from!(Request, ProtocolMessage,{
|
||||||
|
NewBlock(val) = NewBlock(val),
|
||||||
|
NewFluffyBlock(val) = NewFluffyBlock(val),
|
||||||
|
GetObjects(val) = GetObjectsRequest(val),
|
||||||
|
GetChain(val) = ChainRequest(val),
|
||||||
|
NewTransactions(val) = NewTransactions(val),
|
||||||
|
FluffyMissingTxs(val) = FluffyMissingTransactionsRequest(val),
|
||||||
|
GetTxPoolCompliment(val) = GetTxPoolCompliment(val),
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
impl TryFrom<Message> for Request {
|
||||||
|
type Error = MessageConversionError;
|
||||||
|
|
||||||
|
fn try_from(value: Message) -> Result<Self, Self::Error> {
|
||||||
|
match value {
|
||||||
|
Message::Request(req) => Ok(req.into()),
|
||||||
|
Message::Protocol(pro) => pro.try_into(),
|
||||||
|
_ => Err(MessageConversionError),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Request> for Message {
|
||||||
|
fn from(value: Request) -> Self {
|
||||||
|
match value {
|
||||||
|
Request::Handshake(val) => Message::Request(RequestMessage::Handshake(val)),
|
||||||
|
Request::Ping => Message::Request(RequestMessage::Ping),
|
||||||
|
Request::SupportFlags => Message::Request(RequestMessage::SupportFlags),
|
||||||
|
Request::TimedSync(val) => Message::Request(RequestMessage::TimedSync(val)),
|
||||||
|
|
||||||
|
Request::NewBlock(val) => Message::Protocol(ProtocolMessage::NewBlock(val)),
|
||||||
|
Request::NewFluffyBlock(val) => Message::Protocol(ProtocolMessage::NewFluffyBlock(val)),
|
||||||
|
Request::GetObjects(val) => Message::Protocol(ProtocolMessage::GetObjectsRequest(val)),
|
||||||
|
Request::GetChain(val) => Message::Protocol(ProtocolMessage::ChainRequest(val)),
|
||||||
|
Request::NewTransactions(val) => Message::Protocol(ProtocolMessage::NewTransactions(val)),
|
||||||
|
Request::FluffyMissingTxs(val) => Message::Protocol(ProtocolMessage::FluffyMissingTransactionsRequest(val)),
|
||||||
|
Request::GetTxPoolCompliment(val) => Message::Protocol(ProtocolMessage::GetTxPoolCompliment(val)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
from_try_from!(Response, ResponseMessage,{
|
||||||
|
Handshake(val) = Handshake(val),
|
||||||
|
Ping(val) = Ping(val),
|
||||||
|
SupportFlags(val) = SupportFlags(val),
|
||||||
|
TimedSync(val) = TimedSync(val),
|
||||||
|
});
|
||||||
|
|
||||||
|
try_from_try_from!(Response, ProtocolMessage,{
|
||||||
|
NewFluffyBlock(val) = NewFluffyBlock(val),
|
||||||
|
GetObjects(val) = GetObjectsResponse(val),
|
||||||
|
GetChain(val) = ChainEntryResponse(val),
|
||||||
|
NewTransactions(val) = NewTransactions(val),
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
impl TryFrom<Message> for Response {
|
||||||
|
type Error = MessageConversionError;
|
||||||
|
|
||||||
|
fn try_from(value: Message) -> Result<Self, Self::Error> {
|
||||||
|
match value {
|
||||||
|
Message::Response(res) => Ok(res.into()),
|
||||||
|
Message::Protocol(pro) => pro.try_into(),
|
||||||
|
_ => Err(MessageConversionError),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<Response> for Message {
|
||||||
|
type Error = MessageConversionError;
|
||||||
|
|
||||||
|
fn try_from(value: Response) -> Result<Self, Self::Error> {
|
||||||
|
Ok(match value {
|
||||||
|
Response::Handshake(val) => Message::Response(ResponseMessage::Handshake(val)),
|
||||||
|
Response::Ping(val) => Message::Response(ResponseMessage::Ping(val)),
|
||||||
|
Response::SupportFlags(val) => Message::Response(ResponseMessage::SupportFlags(val)),
|
||||||
|
Response::TimedSync(val) => Message::Response(ResponseMessage::TimedSync(val)),
|
||||||
|
|
||||||
|
Response::NewFluffyBlock(val) => Message::Protocol(ProtocolMessage::NewFluffyBlock(val)),
|
||||||
|
Response::GetObjects(val) => Message::Protocol(ProtocolMessage::GetObjectsResponse(val)),
|
||||||
|
Response::GetChain(val) => Message::Protocol(ProtocolMessage::ChainEntryResponse(val)),
|
||||||
|
Response::NewTransactions(val) => Message::Protocol(ProtocolMessage::NewTransactions(val)),
|
||||||
|
|
||||||
|
Response::NA => return Err(MessageConversionError),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,13 +0,0 @@
|
||||||
pub mod internal_network;
|
|
||||||
pub mod temp_database;
|
|
||||||
|
|
||||||
pub use internal_network::{InternalMessageRequest, InternalMessageResponse};
|
|
||||||
|
|
||||||
pub const BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT: usize = 10000;
|
|
||||||
pub const BLOCKS_IDS_SYNCHRONIZING_MAX_COUNT: usize = 25000;
|
|
||||||
pub const P2P_MAX_PEERS_IN_HANDSHAKE: usize = 250;
|
|
||||||
|
|
||||||
pub enum Direction {
|
|
||||||
Inbound,
|
|
||||||
Outbound,
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
use monero_wire::messages::CoreSyncData;
|
|
||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
pub enum BlockKnown {
|
|
||||||
No,
|
|
||||||
OnMainChain,
|
|
||||||
OnSideChain,
|
|
||||||
KnownBad,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockKnown {
|
|
||||||
pub fn is_known(&self) -> bool {
|
|
||||||
!matches!(self, BlockKnown::No)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum DataBaseRequest {
|
|
||||||
CurrentHeight,
|
|
||||||
CumulativeDifficulty,
|
|
||||||
CoreSyncData,
|
|
||||||
Chain,
|
|
||||||
BlockHeight([u8; 32]),
|
|
||||||
BlockKnown([u8; 32]),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum DataBaseResponse {
|
|
||||||
CurrentHeight(u64),
|
|
||||||
CumulativeDifficulty(u128),
|
|
||||||
CoreSyncData(CoreSyncData),
|
|
||||||
Chain(Vec<[u8; 32]>),
|
|
||||||
BlockHeight(Option<u64>),
|
|
||||||
BlockKnown(BlockKnown),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Error, PartialEq, Eq)]
|
|
||||||
pub enum DatabaseError {}
|
|
|
@ -1,21 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "cuprate-sync-states"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
cuprate-common = {path = "../../common"}
|
|
||||||
cuprate-peer = {path = "../peer"}
|
|
||||||
cuprate-protocol = {path = "../protocol"}
|
|
||||||
monero = {git="https://github.com/Boog900/monero-rs.git", branch="db", features=["database"]}
|
|
||||||
monero-wire = {path= "../../net/monero-wire"}
|
|
||||||
futures = "0.3.26"
|
|
||||||
tower = {version = "0.4.13", features = ["util"]}
|
|
||||||
thiserror = "1.0.39"
|
|
||||||
|
|
||||||
|
|
||||||
tokio = {version="1.1", features=["full"]}
|
|
||||||
tokio-util = {version ="0.7", features=["compat"]}
|
|
||||||
|
|
|
@ -1,538 +0,0 @@
|
||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
use futures::channel::mpsc;
|
|
||||||
use futures::StreamExt;
|
|
||||||
use monero::Hash;
|
|
||||||
use thiserror::Error;
|
|
||||||
use tower::{Service, ServiceExt};
|
|
||||||
|
|
||||||
use cuprate_common::{hardforks, HardForks};
|
|
||||||
use cuprate_peer::connection::PeerSyncChange;
|
|
||||||
use cuprate_protocol::temp_database::{
|
|
||||||
BlockKnown, DataBaseRequest, DataBaseResponse, DatabaseError,
|
|
||||||
};
|
|
||||||
use cuprate_protocol::{InternalMessageRequest, InternalMessageResponse};
|
|
||||||
use monero_wire::messages::protocol::ChainResponse;
|
|
||||||
use monero_wire::messages::{ChainRequest, CoreSyncData};
|
|
||||||
use monero_wire::{Message, NetworkAddress};
|
|
||||||
|
|
||||||
// TODO: Move this!!!!!!!
|
|
||||||
// ********************************
|
|
||||||
|
|
||||||
pub enum PeerSetRequest {
|
|
||||||
DisconnectPeer(NetworkAddress),
|
|
||||||
BanPeer(NetworkAddress),
|
|
||||||
SendRequest(InternalMessageRequest, Option<NetworkAddress>),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct PeerSetResponse {
|
|
||||||
peer: NetworkAddress,
|
|
||||||
response: Option<InternalMessageResponse>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// *******************************
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct IndividualPeerSync {
|
|
||||||
height: u64,
|
|
||||||
// no grantee this is the same block as height
|
|
||||||
top_id: Hash,
|
|
||||||
top_version: u8,
|
|
||||||
cumulative_difficulty: u128,
|
|
||||||
/// the height the list of needed blocks starts at
|
|
||||||
start_height: u64,
|
|
||||||
/// list of block hashes our node does not have.
|
|
||||||
needed_blocks: Vec<(Hash, Option<u64>)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct PeersSyncData {
|
|
||||||
peers: HashMap<NetworkAddress, IndividualPeerSync>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PeersSyncData {
|
|
||||||
pub fn new_core_sync_data(
|
|
||||||
&mut self,
|
|
||||||
id: &NetworkAddress,
|
|
||||||
core_sync: CoreSyncData,
|
|
||||||
) -> Result<(), SyncStatesError> {
|
|
||||||
let peer_data = self.peers.get_mut(&id);
|
|
||||||
if peer_data.is_none() {
|
|
||||||
let ips = IndividualPeerSync {
|
|
||||||
height: core_sync.current_height,
|
|
||||||
top_id: core_sync.top_id,
|
|
||||||
top_version: core_sync.top_version,
|
|
||||||
cumulative_difficulty: core_sync.cumulative_difficulty(),
|
|
||||||
start_height: 0,
|
|
||||||
needed_blocks: vec![],
|
|
||||||
};
|
|
||||||
self.peers.insert(*id, ips);
|
|
||||||
} else {
|
|
||||||
let peer_data = peer_data.unwrap();
|
|
||||||
if peer_data.height > core_sync.current_height {
|
|
||||||
return Err(SyncStatesError::PeersHeightHasDropped);
|
|
||||||
}
|
|
||||||
if peer_data.cumulative_difficulty > core_sync.cumulative_difficulty() {
|
|
||||||
return Err(SyncStatesError::PeersCumulativeDifficultyDropped);
|
|
||||||
}
|
|
||||||
peer_data.height = core_sync.current_height;
|
|
||||||
peer_data.cumulative_difficulty = core_sync.cumulative_difficulty();
|
|
||||||
peer_data.top_id = core_sync.top_id;
|
|
||||||
peer_data.top_version = core_sync.top_version;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_chain_response(
|
|
||||||
&mut self,
|
|
||||||
id: &NetworkAddress,
|
|
||||||
chain_response: ChainResponse,
|
|
||||||
needed_blocks: Vec<(Hash, Option<u64>)>,
|
|
||||||
) -> Result<(), SyncStatesError> {
|
|
||||||
let peer_data = self
|
|
||||||
.peers
|
|
||||||
.get_mut(&id)
|
|
||||||
.expect("Peers must give use their core sync before chain response");
|
|
||||||
|
|
||||||
// it's sad we have to do this so late in the response validation process
|
|
||||||
if peer_data.height > chain_response.total_height {
|
|
||||||
return Err(SyncStatesError::PeersHeightHasDropped);
|
|
||||||
}
|
|
||||||
if peer_data.cumulative_difficulty > chain_response.cumulative_difficulty() {
|
|
||||||
return Err(SyncStatesError::PeersCumulativeDifficultyDropped);
|
|
||||||
}
|
|
||||||
|
|
||||||
peer_data.cumulative_difficulty = chain_response.cumulative_difficulty();
|
|
||||||
peer_data.height = chain_response.total_height;
|
|
||||||
peer_data.start_height = chain_response.start_height
|
|
||||||
+ chain_response.m_block_ids.len() as u64
|
|
||||||
- needed_blocks.len() as u64;
|
|
||||||
peer_data.needed_blocks = needed_blocks;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
// returns true if we have ran out of known blocks for that peer
|
|
||||||
pub fn new_objects_response(
|
|
||||||
&mut self,
|
|
||||||
id: &NetworkAddress,
|
|
||||||
mut block_ids: HashSet<Hash>,
|
|
||||||
) -> Result<bool, SyncStatesError> {
|
|
||||||
let peer_data = self
|
|
||||||
.peers
|
|
||||||
.get_mut(id)
|
|
||||||
.expect("Peers must give use their core sync before objects response");
|
|
||||||
let mut i = 0;
|
|
||||||
if peer_data.needed_blocks.is_empty() {
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
while !block_ids.contains(&peer_data.needed_blocks[i].0) {
|
|
||||||
i += 1;
|
|
||||||
if i == peer_data.needed_blocks.len() {
|
|
||||||
peer_data.needed_blocks = vec![];
|
|
||||||
peer_data.start_height = 0;
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _ in 0..block_ids.len() {
|
|
||||||
if !block_ids.remove(&peer_data.needed_blocks[i].0) {
|
|
||||||
return Err(SyncStatesError::PeerSentAnUnexpectedBlockId);
|
|
||||||
}
|
|
||||||
i += 1;
|
|
||||||
if i == peer_data.needed_blocks.len() {
|
|
||||||
peer_data.needed_blocks = vec![];
|
|
||||||
peer_data.start_height = 0;
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
peer_data.needed_blocks = peer_data.needed_blocks[i..].to_vec();
|
|
||||||
peer_data.start_height = peer_data.start_height + i as u64;
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn peer_disconnected(&mut self, id: &NetworkAddress) {
|
|
||||||
let _ = self.peers.remove(id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Error, PartialEq, Eq)]
|
|
||||||
pub enum SyncStatesError {
|
|
||||||
#[error("Peer sent a block id we know is bad")]
|
|
||||||
PeerSentKnownBadBlock,
|
|
||||||
#[error("Peer sent a block id we weren't expecting")]
|
|
||||||
PeerSentAnUnexpectedBlockId,
|
|
||||||
#[error("Peer sent a chain entry where we don't know the start")]
|
|
||||||
PeerSentNoneOverlappingFirstBlock,
|
|
||||||
#[error("We have the peers block just at a different height")]
|
|
||||||
WeHaveBlockAtDifferentHeight,
|
|
||||||
#[error("The peer sent a top version we weren't expecting")]
|
|
||||||
PeerSentBadTopVersion,
|
|
||||||
#[error("The peer sent a weird pruning seed")]
|
|
||||||
PeerSentBadPruningSeed,
|
|
||||||
#[error("The peer height has dropped")]
|
|
||||||
PeersHeightHasDropped,
|
|
||||||
#[error("The peers cumulative difficulty has dropped")]
|
|
||||||
PeersCumulativeDifficultyDropped,
|
|
||||||
#[error("Our database returned an error: {0}")]
|
|
||||||
DataBaseError(#[from] DatabaseError),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SyncStates<Db> {
|
|
||||||
peer_sync_rx: mpsc::Receiver<PeerSyncChange>,
|
|
||||||
hardforks: HardForks,
|
|
||||||
peer_sync_states: Arc<Mutex<PeersSyncData>>,
|
|
||||||
blockchain: Db,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Db> SyncStates<Db>
|
|
||||||
where
|
|
||||||
Db: Service<DataBaseRequest, Response = DataBaseResponse, Error = DatabaseError>,
|
|
||||||
{
|
|
||||||
pub fn new(
|
|
||||||
peer_sync_rx: mpsc::Receiver<PeerSyncChange>,
|
|
||||||
hardforks: HardForks,
|
|
||||||
peer_sync_states: Arc<Mutex<PeersSyncData>>,
|
|
||||||
blockchain: Db,
|
|
||||||
) -> Self {
|
|
||||||
SyncStates {
|
|
||||||
peer_sync_rx,
|
|
||||||
hardforks,
|
|
||||||
peer_sync_states,
|
|
||||||
blockchain,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
async fn send_database_request(
|
|
||||||
&mut self,
|
|
||||||
req: DataBaseRequest,
|
|
||||||
) -> Result<DataBaseResponse, DatabaseError> {
|
|
||||||
let ready_blockchain = self.blockchain.ready().await?;
|
|
||||||
ready_blockchain.call(req).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_core_sync_change(
|
|
||||||
&mut self,
|
|
||||||
id: &NetworkAddress,
|
|
||||||
core_sync: CoreSyncData,
|
|
||||||
) -> Result<bool, SyncStatesError> {
|
|
||||||
if core_sync.current_height > 0 {
|
|
||||||
let version = self
|
|
||||||
.hardforks
|
|
||||||
.get_ideal_version_from_height(core_sync.current_height - 1);
|
|
||||||
if version >= 6 && version != core_sync.top_version {
|
|
||||||
return Err(SyncStatesError::PeerSentBadTopVersion);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if core_sync.pruning_seed != 0 {
|
|
||||||
let log_stripes =
|
|
||||||
monero::database::pruning::get_pruning_log_stripes(core_sync.pruning_seed);
|
|
||||||
let stripe =
|
|
||||||
monero::database::pruning::get_pruning_stripe_for_seed(core_sync.pruning_seed);
|
|
||||||
if stripe != monero::database::pruning::CRYPTONOTE_PRUNING_LOG_STRIPES
|
|
||||||
|| stripe > (1 << log_stripes)
|
|
||||||
{
|
|
||||||
return Err(SyncStatesError::PeerSentBadPruningSeed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//if core_sync.current_height > max block numb
|
|
||||||
let DataBaseResponse::BlockHeight(height) = self.send_database_request(DataBaseRequest::BlockHeight(core_sync.top_id)).await? else {
|
|
||||||
unreachable!("the blockchain won't send the wrong response");
|
|
||||||
};
|
|
||||||
|
|
||||||
let behind: bool;
|
|
||||||
|
|
||||||
if let Some(height) = height {
|
|
||||||
if height != core_sync.current_height {
|
|
||||||
return Err(SyncStatesError::WeHaveBlockAtDifferentHeight);
|
|
||||||
}
|
|
||||||
behind = false;
|
|
||||||
} else {
|
|
||||||
let DataBaseResponse::CumulativeDifficulty(cumulative_diff) = self.send_database_request(DataBaseRequest::CumulativeDifficulty).await? else {
|
|
||||||
unreachable!("the blockchain won't send the wrong response");
|
|
||||||
};
|
|
||||||
// if their chain has more POW we want it
|
|
||||||
if cumulative_diff < core_sync.cumulative_difficulty() {
|
|
||||||
behind = true;
|
|
||||||
} else {
|
|
||||||
behind = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut sync_states = self.peer_sync_states.lock().unwrap();
|
|
||||||
sync_states.new_core_sync_data(id, core_sync)?;
|
|
||||||
|
|
||||||
Ok(behind)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_chain_entry_response(
|
|
||||||
&mut self,
|
|
||||||
id: &NetworkAddress,
|
|
||||||
chain_response: ChainResponse,
|
|
||||||
) -> Result<(), SyncStatesError> {
|
|
||||||
let mut expect_unknown = false;
|
|
||||||
let mut needed_blocks = Vec::with_capacity(chain_response.m_block_ids.len());
|
|
||||||
|
|
||||||
for (index, block_id) in chain_response.m_block_ids.iter().enumerate() {
|
|
||||||
let DataBaseResponse::BlockKnown(known) = self.send_database_request(DataBaseRequest::BlockKnown(*block_id)).await? else {
|
|
||||||
unreachable!("the blockchain won't send the wrong response");
|
|
||||||
};
|
|
||||||
if index == 0 {
|
|
||||||
if !known.is_known() {
|
|
||||||
return Err(SyncStatesError::PeerSentNoneOverlappingFirstBlock);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
match known {
|
|
||||||
BlockKnown::No => expect_unknown = true,
|
|
||||||
BlockKnown::OnMainChain => {
|
|
||||||
if expect_unknown {
|
|
||||||
return Err(SyncStatesError::PeerSentAnUnexpectedBlockId);
|
|
||||||
} else {
|
|
||||||
let DataBaseResponse::BlockHeight(height) = self.send_database_request(DataBaseRequest::BlockHeight(*block_id)).await? else {
|
|
||||||
unreachable!("the blockchain won't send the wrong response");
|
|
||||||
};
|
|
||||||
if chain_response.start_height + index as u64
|
|
||||||
!= height.expect("We already know this block is in our main chain.")
|
|
||||||
{
|
|
||||||
return Err(SyncStatesError::WeHaveBlockAtDifferentHeight);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BlockKnown::OnSideChain => {
|
|
||||||
if expect_unknown {
|
|
||||||
return Err(SyncStatesError::PeerSentAnUnexpectedBlockId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BlockKnown::KnownBad => return Err(SyncStatesError::PeerSentKnownBadBlock),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let block_weight = chain_response.m_block_weights.get(index).map(|f| f.clone());
|
|
||||||
needed_blocks.push((*block_id, block_weight));
|
|
||||||
}
|
|
||||||
let mut sync_states = self.peer_sync_states.lock().unwrap();
|
|
||||||
sync_states.new_chain_response(id, chain_response, needed_blocks)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn build_chain_request(&mut self) -> Result<ChainRequest, DatabaseError> {
|
|
||||||
let DataBaseResponse::Chain(ids) = self.send_database_request(DataBaseRequest::Chain).await? else {
|
|
||||||
unreachable!("the blockchain won't send the wrong response");
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ChainRequest {
|
|
||||||
block_ids: ids,
|
|
||||||
prune: false,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_peers_chain_entry<Svc>(
|
|
||||||
&mut self,
|
|
||||||
peer_set: &mut Svc,
|
|
||||||
id: &NetworkAddress,
|
|
||||||
) -> Result<ChainResponse, DatabaseError>
|
|
||||||
where
|
|
||||||
Svc: Service<PeerSetRequest, Response = PeerSetResponse, Error = DatabaseError>,
|
|
||||||
{
|
|
||||||
let chain_req = self.build_chain_request().await?;
|
|
||||||
let ready_set = peer_set.ready().await.unwrap();
|
|
||||||
let response: PeerSetResponse = ready_set
|
|
||||||
.call(PeerSetRequest::SendRequest(
|
|
||||||
Message::Notification(chain_req.into())
|
|
||||||
.try_into()
|
|
||||||
.expect("Chain request can always be converted to IMR"),
|
|
||||||
Some(*id),
|
|
||||||
))
|
|
||||||
.await?;
|
|
||||||
let InternalMessageResponse::ChainResponse(response) = response.response.expect("peer set will return a result for a chain request") else {
|
|
||||||
unreachable!("peer set will return correct response");
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_and_handle_chain_entry<Svc>(
|
|
||||||
&mut self,
|
|
||||||
peer_set: &mut Svc,
|
|
||||||
id: NetworkAddress,
|
|
||||||
) -> Result<(), SyncStatesError>
|
|
||||||
where
|
|
||||||
Svc: Service<PeerSetRequest, Response = PeerSetResponse, Error = DatabaseError>,
|
|
||||||
{
|
|
||||||
let chain_response = self.get_peers_chain_entry(peer_set, &id).await?;
|
|
||||||
self.handle_chain_entry_response(&id, chain_response).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_objects_response(
|
|
||||||
&mut self,
|
|
||||||
id: NetworkAddress,
|
|
||||||
block_ids: Vec<Hash>,
|
|
||||||
peers_height: u64,
|
|
||||||
) -> Result<bool, SyncStatesError> {
|
|
||||||
let mut sync_states = self.peer_sync_states.lock().unwrap();
|
|
||||||
let ran_out_of_blocks =
|
|
||||||
sync_states.new_objects_response(&id, HashSet::from_iter(block_ids))?;
|
|
||||||
drop(sync_states);
|
|
||||||
if ran_out_of_blocks {
|
|
||||||
let DataBaseResponse::CurrentHeight(our_height) = self.send_database_request(DataBaseRequest::CurrentHeight).await? else {
|
|
||||||
unreachable!("the blockchain won't send the wrong response");
|
|
||||||
};
|
|
||||||
if our_height < peers_height {
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_peer_disconnect(&mut self, id: NetworkAddress) {
|
|
||||||
let mut sync_states = self.peer_sync_states.lock().unwrap();
|
|
||||||
sync_states.peer_disconnected(&id);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run<Svc>(mut self, mut peer_set: Svc)
|
|
||||||
where
|
|
||||||
Svc: Service<PeerSetRequest, Response = PeerSetResponse, Error = DatabaseError>,
|
|
||||||
{
|
|
||||||
loop {
|
|
||||||
let Some(change) = self.peer_sync_rx.next().await else {
|
|
||||||
// is this best?
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
match change {
|
|
||||||
PeerSyncChange::CoreSyncData(id, csd) => {
|
|
||||||
match self.handle_core_sync_change(&id, csd).await {
|
|
||||||
Err(_) => {
|
|
||||||
// TODO: check if error needs ban or forget
|
|
||||||
let ready_set = peer_set.ready().await.unwrap();
|
|
||||||
let res = ready_set.call(PeerSetRequest::BanPeer(id)).await;
|
|
||||||
}
|
|
||||||
Ok(request_chain) => {
|
|
||||||
if request_chain {
|
|
||||||
self.get_and_handle_chain_entry(&mut peer_set, id).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PeerSyncChange::ObjectsResponse(id, block_ids, height) => {
|
|
||||||
match self.handle_objects_response(id, block_ids, height).await {
|
|
||||||
Err(_) => {
|
|
||||||
// TODO: check if error needs ban or forget
|
|
||||||
let ready_set = peer_set.ready().await.unwrap();
|
|
||||||
let res = ready_set.call(PeerSetRequest::BanPeer(id)).await;
|
|
||||||
}
|
|
||||||
Ok(res) => {
|
|
||||||
if res {
|
|
||||||
self.get_and_handle_chain_entry(&mut peer_set, id).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PeerSyncChange::PeerDisconnected(id) => {
|
|
||||||
self.handle_peer_disconnect(id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use monero::Hash;
|
|
||||||
use monero_wire::messages::{ChainResponse, CoreSyncData};
|
|
||||||
|
|
||||||
use crate::{PeersSyncData, SyncStatesError};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_sync_data_good_core_sync() {
|
|
||||||
let mut peer_sync_states = PeersSyncData::default();
|
|
||||||
let core_sync = CoreSyncData::new(65346753, 1232, 389, Hash::null(), 1);
|
|
||||||
|
|
||||||
peer_sync_states
|
|
||||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), core_sync)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let new_core_sync = CoreSyncData::new(65346754, 1233, 389, Hash::null(), 1);
|
|
||||||
|
|
||||||
peer_sync_states
|
|
||||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), new_core_sync)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let peer = peer_sync_states
|
|
||||||
.peers
|
|
||||||
.get(&monero_wire::NetworkAddress::default())
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(peer.height, 1233);
|
|
||||||
assert_eq!(peer.cumulative_difficulty, 65346754);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_sync_data_peer_height_dropped() {
|
|
||||||
let mut peer_sync_states = PeersSyncData::default();
|
|
||||||
let core_sync = CoreSyncData::new(65346753, 1232, 389, Hash::null(), 1);
|
|
||||||
|
|
||||||
peer_sync_states
|
|
||||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), core_sync)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let new_core_sync = CoreSyncData::new(65346754, 1231, 389, Hash::null(), 1);
|
|
||||||
|
|
||||||
let res = peer_sync_states
|
|
||||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), new_core_sync)
|
|
||||||
.unwrap_err();
|
|
||||||
|
|
||||||
assert_eq!(res, SyncStatesError::PeersHeightHasDropped);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_sync_data_peer_cumulative_difficulty_dropped() {
|
|
||||||
let mut peer_sync_states = PeersSyncData::default();
|
|
||||||
let core_sync = CoreSyncData::new(65346753, 1232, 389, Hash::null(), 1);
|
|
||||||
|
|
||||||
peer_sync_states
|
|
||||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), core_sync)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let new_core_sync = CoreSyncData::new(65346752, 1233, 389, Hash::null(), 1);
|
|
||||||
|
|
||||||
let res = peer_sync_states
|
|
||||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), new_core_sync)
|
|
||||||
.unwrap_err();
|
|
||||||
|
|
||||||
assert_eq!(res, SyncStatesError::PeersCumulativeDifficultyDropped);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn peer_sync_new_chain_response() {
|
|
||||||
let mut peer_sync_states = PeersSyncData::default();
|
|
||||||
let core_sync = CoreSyncData::new(65346753, 1232, 389, Hash::null(), 1);
|
|
||||||
|
|
||||||
peer_sync_states
|
|
||||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), core_sync)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let chain_response = ChainResponse::new(
|
|
||||||
10,
|
|
||||||
1233,
|
|
||||||
65346754,
|
|
||||||
vec![Hash::new(&[1]), Hash::new(&[2])],
|
|
||||||
vec![],
|
|
||||||
vec![],
|
|
||||||
);
|
|
||||||
|
|
||||||
let needed_blocks = vec![(Hash::new(&[2]), None)];
|
|
||||||
|
|
||||||
peer_sync_states
|
|
||||||
.new_chain_response(
|
|
||||||
&monero_wire::NetworkAddress::default(),
|
|
||||||
chain_response,
|
|
||||||
needed_blocks,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let peer = peer_sync_states
|
|
||||||
.peers
|
|
||||||
.get(&monero_wire::NetworkAddress::default())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(peer.start_height, 11);
|
|
||||||
assert_eq!(peer.height, 1233);
|
|
||||||
assert_eq!(peer.cumulative_difficulty, 65346754);
|
|
||||||
assert_eq!(peer.needed_blocks, vec![(Hash::new(&[2]), None)]);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,109 +0,0 @@
|
||||||
use std::{
|
|
||||||
pin::Pin,
|
|
||||||
str::FromStr,
|
|
||||||
sync::{Arc, Mutex},
|
|
||||||
};
|
|
||||||
|
|
||||||
use cuprate_common::{HardForks, Network};
|
|
||||||
use cuprate_peer::PeerError;
|
|
||||||
use cuprate_protocol::{
|
|
||||||
temp_database::{BlockKnown, DataBaseRequest, DataBaseResponse, DatabaseError},
|
|
||||||
Direction, InternalMessageRequest, InternalMessageResponse,
|
|
||||||
};
|
|
||||||
use cuprate_sync_states::SyncStates;
|
|
||||||
use futures::{channel::mpsc, Future, FutureExt};
|
|
||||||
use monero::Hash;
|
|
||||||
use monero_wire::messages::{admin::HandshakeResponse, CoreSyncData};
|
|
||||||
use tower::ServiceExt;
|
|
||||||
|
|
||||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
|
||||||
|
|
||||||
struct TestBlockchain;
|
|
||||||
|
|
||||||
impl tower::Service<DataBaseRequest> for TestBlockchain {
|
|
||||||
type Error = DatabaseError;
|
|
||||||
type Response = DataBaseResponse;
|
|
||||||
type Future =
|
|
||||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
|
||||||
fn poll_ready(
|
|
||||||
&mut self,
|
|
||||||
cx: &mut std::task::Context<'_>,
|
|
||||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
|
||||||
std::task::Poll::Ready(Ok(()))
|
|
||||||
}
|
|
||||||
fn call(&mut self, req: DataBaseRequest) -> Self::Future {
|
|
||||||
let res = match req {
|
|
||||||
DataBaseRequest::BlockHeight(h) => DataBaseResponse::BlockHeight(Some(221)),
|
|
||||||
DataBaseRequest::BlockKnown(_) => DataBaseResponse::BlockKnown(BlockKnown::OnMainChain),
|
|
||||||
DataBaseRequest::Chain => todo!(),
|
|
||||||
DataBaseRequest::CoreSyncData => {
|
|
||||||
DataBaseResponse::CoreSyncData(CoreSyncData::new(0, 0, 0, Hash::null(), 0))
|
|
||||||
}
|
|
||||||
DataBaseRequest::CumulativeDifficulty => DataBaseResponse::CumulativeDifficulty(0),
|
|
||||||
DataBaseRequest::CurrentHeight => DataBaseResponse::CurrentHeight(0),
|
|
||||||
};
|
|
||||||
|
|
||||||
async { Ok(res) }.boxed()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
struct TestPeerRequest;
|
|
||||||
|
|
||||||
impl tower::Service<InternalMessageRequest> for TestPeerRequest {
|
|
||||||
type Error = PeerError;
|
|
||||||
type Response = InternalMessageResponse;
|
|
||||||
type Future =
|
|
||||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
|
||||||
fn poll_ready(
|
|
||||||
&mut self,
|
|
||||||
cx: &mut std::task::Context<'_>,
|
|
||||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
fn call(&mut self, req: InternalMessageRequest) -> Self::Future {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_p2p_conn() {
|
|
||||||
let conf = cuprate_peer::handshaker::NetworkConfig::default();
|
|
||||||
let (addr_tx, addr_rx) = mpsc::channel(21);
|
|
||||||
let (sync_tx, sync_rx) = mpsc::channel(21);
|
|
||||||
let peer_sync_states = Arc::new(Mutex::default());
|
|
||||||
|
|
||||||
let peer_sync_states = SyncStates::new(
|
|
||||||
sync_rx,
|
|
||||||
HardForks::new(Network::MainNet),
|
|
||||||
peer_sync_states,
|
|
||||||
TestBlockchain,
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut handshaker = cuprate_peer::handshaker::Handshaker::new(
|
|
||||||
conf,
|
|
||||||
addr_tx,
|
|
||||||
TestBlockchain,
|
|
||||||
sync_tx,
|
|
||||||
TestPeerRequest.boxed_clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let soc = tokio::net::TcpSocket::new_v4().unwrap();
|
|
||||||
let addr = std::net::SocketAddr::from_str("127.0.0.1:18080").unwrap();
|
|
||||||
|
|
||||||
let mut con = soc.connect(addr).await.unwrap();
|
|
||||||
|
|
||||||
let (r_h, w_h) = con.split();
|
|
||||||
|
|
||||||
let (client, conn) = handshaker
|
|
||||||
.complete_handshake(
|
|
||||||
r_h.compat(),
|
|
||||||
w_h.compat_write(),
|
|
||||||
Direction::Outbound,
|
|
||||||
monero_wire::NetworkAddress::default(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
//conn.run().await;
|
|
||||||
}
|
|
11
test-utils/Cargo.toml
Normal file
11
test-utils/Cargo.toml
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
[package]
|
||||||
|
name = "cuprate-test-utils"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
monero-wire = {path = "../net/monero-wire"}
|
||||||
|
monero-peer = {path = "../p2p/monero-peer"}
|
||||||
|
|
||||||
|
futures = "0.3.29"
|
||||||
|
async-trait = "0.1.74"
|
1
test-utils/src/lib.rs
Normal file
1
test-utils/src/lib.rs
Normal file
|
@ -0,0 +1 @@
|
||||||
|
pub mod test_netzone;
|
109
test-utils/src/test_netzone.rs
Normal file
109
test-utils/src/test_netzone.rs
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
use std::{
|
||||||
|
fmt::Formatter,
|
||||||
|
io::Error,
|
||||||
|
net::{Ipv4Addr, SocketAddr},
|
||||||
|
pin::Pin,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::{channel::mpsc::Sender as InnerSender, stream::BoxStream, Sink};
|
||||||
|
|
||||||
|
use monero_wire::{
|
||||||
|
network_address::{NetworkAddress, NetworkAddressIncorrectZone},
|
||||||
|
BucketError, Message,
|
||||||
|
};
|
||||||
|
|
||||||
|
use monero_peer::NetworkZone;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct TestNetZoneAddr(pub u32);
|
||||||
|
|
||||||
|
impl std::fmt::Display for TestNetZoneAddr {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_str(format!("test client, id: {}", self.0).as_str())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<TestNetZoneAddr> for NetworkAddress {
|
||||||
|
fn from(value: TestNetZoneAddr) -> Self {
|
||||||
|
NetworkAddress::Clear(SocketAddr::new(Ipv4Addr::from(value.0).into(), 18080))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<NetworkAddress> for TestNetZoneAddr {
|
||||||
|
type Error = NetworkAddressIncorrectZone;
|
||||||
|
|
||||||
|
fn try_from(value: NetworkAddress) -> Result<Self, Self::Error> {
|
||||||
|
match value {
|
||||||
|
NetworkAddress::Clear(soc) => match soc {
|
||||||
|
SocketAddr::V4(v4) => Ok(TestNetZoneAddr(u32::from_be_bytes(v4.ip().octets()))),
|
||||||
|
_ => panic!("None v4 address in test code"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Sender {
|
||||||
|
inner: InnerSender<Message>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<InnerSender<Message>> for Sender {
|
||||||
|
fn from(inner: InnerSender<Message>) -> Self {
|
||||||
|
Sender { inner }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sink<Message> for Sender {
|
||||||
|
type Error = BucketError;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.get_mut()
|
||||||
|
.inner
|
||||||
|
.poll_ready(cx)
|
||||||
|
.map_err(|_| BucketError::IO(std::io::Error::other("mock connection channel closed")))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> {
|
||||||
|
self.get_mut()
|
||||||
|
.inner
|
||||||
|
.start_send(item)
|
||||||
|
.map_err(|_| BucketError::IO(std::io::Error::other("mock connection channel closed")))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Pin::new(&mut self.get_mut().inner)
|
||||||
|
.poll_flush(cx)
|
||||||
|
.map_err(|_| BucketError::IO(std::io::Error::other("mock connection channel closed")))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Pin::new(&mut self.get_mut().inner)
|
||||||
|
.poll_close(cx)
|
||||||
|
.map_err(|_| BucketError::IO(std::io::Error::other("mock connection channel closed")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct TestNetZone<const ALLOW_SYNC: bool, const DANDELION_PP: bool, const CHECK_NODE_ID: bool>;
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl<const ALLOW_SYNC: bool, const DANDELION_PP: bool, const CHECK_NODE_ID: bool> NetworkZone
|
||||||
|
for TestNetZone<ALLOW_SYNC, DANDELION_PP, CHECK_NODE_ID>
|
||||||
|
{
|
||||||
|
const ALLOW_SYNC: bool = ALLOW_SYNC;
|
||||||
|
const DANDELION_PP: bool = DANDELION_PP;
|
||||||
|
const CHECK_NODE_ID: bool = CHECK_NODE_ID;
|
||||||
|
|
||||||
|
type Addr = TestNetZoneAddr;
|
||||||
|
type Stream = BoxStream<'static, Result<Message, BucketError>>;
|
||||||
|
type Sink = Sender;
|
||||||
|
type ServerCfg = ();
|
||||||
|
|
||||||
|
async fn connect_to_peer(_: Self::Addr) -> Result<(Self::Stream, Self::Sink), Error> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn incoming_connection_listener(_: Self::ServerCfg) -> () {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in a new issue