mirror of
https://github.com/Cuprate/cuprate.git
synced 2024-12-22 19:49:28 +00:00
initial p2p code (#8)
* init * save * use macro to create the levin body enum * add protocol docs and cargo fmt * add response validation * add client functionality to connection + fmt * Add new cuprate-common crate this crate will hold stuff needed across cuprate crates + init handshaker * add stagenet & testnet hardforks + tests + cargo fmt * split peer and protocol into separate crates + add sync state watcher * finish initial sync states and add some tests * save * add initial address book * cargo fmt * save * add pruning module to cuprate-common * more address book updates - added an address book client - add some more address book requests - add "NetZone" * lots of changes * cargo fmt * combine p2p into one crate they were all linked anyway * cargo fmt * turn the handshaker into a statemachine * cargo fmt * reduce the amt of copies when decoding + remove reliance on monero-rs * update time_from_timestamp func * cargo fmt + change qr code link + remove clippy.toml
This commit is contained in:
parent
be43216b3f
commit
a187d9a357
43 changed files with 4320 additions and 657 deletions
|
@ -18,10 +18,13 @@ authors=[
|
|||
[workspace]
|
||||
|
||||
members = [
|
||||
"cuprate",
|
||||
"common",
|
||||
"cuprate",
|
||||
"database",
|
||||
"net/levin",
|
||||
"net/monero-wire"
|
||||
"net/monero-wire",
|
||||
"p2p",
|
||||
# "p2p/sync-states"
|
||||
]
|
||||
|
||||
[workspace.dependencies]
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
avoid-breaking-exported-api = false
|
||||
msrv = "1.67.0"
|
11
common/Cargo.toml
Normal file
11
common/Cargo.toml
Normal file
|
@ -0,0 +1,11 @@
|
|||
[package]
|
||||
name = "cuprate-common"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-only"
|
||||
authors = ["Boog900"]
|
||||
|
||||
|
||||
[dependencies]
|
||||
chrono = "0.4.24"
|
||||
thiserror = "1"
|
14
common/LICENSE
Normal file
14
common/LICENSE
Normal file
|
@ -0,0 +1,14 @@
|
|||
Copyright (C) 2023 Cuprate Contributors
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
250
common/src/hardforks.rs
Normal file
250
common/src/hardforks.rs
Normal file
|
@ -0,0 +1,250 @@
|
|||
use chrono::NaiveDateTime;
|
||||
|
||||
use crate::network::Network;
|
||||
|
||||
// this function blindly unwraps
|
||||
// SAFETY: only call when you know the timestamp is good
|
||||
fn time_from_timestamp(stamp: i64) -> NaiveDateTime {
|
||||
NaiveDateTime::from_timestamp_opt(stamp, 0).unwrap()
|
||||
}
|
||||
|
||||
fn get_hard_forks(network: Network) -> [(u8, u64, NaiveDateTime); 16] {
|
||||
match network {
|
||||
Network::MainNet => {
|
||||
[
|
||||
// | version | Height | TimeStamp | *timestamp is when fork height was decided
|
||||
(1, 1, time_from_timestamp(1341378000)),
|
||||
(2, 1009827, time_from_timestamp(1442763710)),
|
||||
(3, 1141317, time_from_timestamp(1458558528)),
|
||||
(4, 1220516, time_from_timestamp(1483574400)),
|
||||
(5, 1288616, time_from_timestamp(1489520158)),
|
||||
(6, 1400000, time_from_timestamp(1503046577)),
|
||||
(7, 1546000, time_from_timestamp(1521303150)),
|
||||
(8, 1685555, time_from_timestamp(1535889547)),
|
||||
(9, 1686275, time_from_timestamp(1535889548)),
|
||||
(10, 1788000, time_from_timestamp(1549792439)),
|
||||
(11, 1788720, time_from_timestamp(1550225678)),
|
||||
(12, 1978433, time_from_timestamp(1571419280)),
|
||||
(13, 2210000, time_from_timestamp(1598180817)),
|
||||
(14, 2210720, time_from_timestamp(1598180818)),
|
||||
(15, 2688888, time_from_timestamp(1656629117)),
|
||||
(16, 2689608, time_from_timestamp(1656629118)),
|
||||
]
|
||||
}
|
||||
Network::TestNet => [
|
||||
(1, 1, time_from_timestamp(1341378000)),
|
||||
(2, 624634, time_from_timestamp(1445355000)),
|
||||
(3, 800500, time_from_timestamp(1472415034)),
|
||||
(4, 801219, time_from_timestamp(1472415035)),
|
||||
(5, 802660, time_from_timestamp(1472415036 + 86400 * 180)),
|
||||
(6, 971400, time_from_timestamp(1501709789)),
|
||||
(7, 1057027, time_from_timestamp(1512211236)),
|
||||
(8, 1057058, time_from_timestamp(1533211200)),
|
||||
(9, 1057778, time_from_timestamp(1533297600)),
|
||||
(10, 1154318, time_from_timestamp(1550153694)),
|
||||
(11, 1155038, time_from_timestamp(1550225678)),
|
||||
(12, 1308737, time_from_timestamp(1569582000)),
|
||||
(13, 1543939, time_from_timestamp(1599069376)),
|
||||
(14, 1544659, time_from_timestamp(1599069377)),
|
||||
(15, 1982800, time_from_timestamp(1652727000)),
|
||||
(16, 1983520, time_from_timestamp(1652813400)),
|
||||
],
|
||||
Network::StageNet => [
|
||||
(1, 1, time_from_timestamp(1341378000)),
|
||||
(2, 32000, time_from_timestamp(1521000000)),
|
||||
(3, 33000, time_from_timestamp(1521120000)),
|
||||
(4, 34000, time_from_timestamp(1521240000)),
|
||||
(5, 35000, time_from_timestamp(1521360000)),
|
||||
(6, 36000, time_from_timestamp(1521480000)),
|
||||
(7, 37000, time_from_timestamp(1521600000)),
|
||||
(8, 176456, time_from_timestamp(1537821770)),
|
||||
(9, 177176, time_from_timestamp(1537821771)),
|
||||
(10, 269000, time_from_timestamp(1550153694)),
|
||||
(11, 269720, time_from_timestamp(1550225678)),
|
||||
(12, 454721, time_from_timestamp(1571419280)),
|
||||
(13, 675405, time_from_timestamp(1598180817)),
|
||||
(14, 676125, time_from_timestamp(1598180818)),
|
||||
(15, 1151000, time_from_timestamp(1656629117)),
|
||||
(16, 1151720, time_from_timestamp(1656629118)),
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HardForks {
|
||||
hard_forks: [(u8, u64, NaiveDateTime); 16],
|
||||
}
|
||||
|
||||
impl HardForks {
|
||||
pub fn new(network: Network) -> Self {
|
||||
HardForks {
|
||||
hard_forks: get_hard_forks(network),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_ideal_version_from_height(&self, height: u64) -> u8 {
|
||||
for hf in self.hard_forks.iter().rev() {
|
||||
if height >= hf.1 {
|
||||
return hf.0;
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub fn get_earliest_ideal_height_for_version(&self, version: u8) -> Option<u64> {
|
||||
if self.hard_forks.len() < version as usize {
|
||||
None
|
||||
} else if version == 0 {
|
||||
Some(0)
|
||||
} else {
|
||||
Some(self.hard_forks[(version - 1) as usize].1)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_ideal_version(&self) -> u8 {
|
||||
self.hard_forks.last().expect("This is not empty").0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::network::Network;
|
||||
|
||||
use super::HardForks;
|
||||
|
||||
const MAIN_NET_FORKS: [u64; 16] = [
|
||||
1, 1009827, 1141317, 1220516, 1288616, 1400000, 1546000, 1685555, 1686275, 1788000,
|
||||
1788720, 1978433, 2210000, 2210720, 2688888, 2689608,
|
||||
];
|
||||
const TEST_NET_FORKS: [u64; 16] = [
|
||||
1, 624634, 800500, 801219, 802660, 971400, 1057027, 1057058, 1057778, 1154318, 1155038,
|
||||
1308737, 1543939, 1544659, 1982800, 1983520,
|
||||
];
|
||||
const STAGE_NET_FORKS: [u64; 16] = [
|
||||
1, 32000, 33000, 34000, 35000, 36000, 37000, 176456, 177176, 269000, 269720, 454721,
|
||||
675405, 676125, 1151000, 1151720,
|
||||
];
|
||||
|
||||
#[test]
|
||||
fn get_ideal_version() {
|
||||
let hardforks = HardForks::new(Network::MainNet);
|
||||
|
||||
let version = hardforks.get_ideal_version();
|
||||
assert_eq!(version as usize, MAIN_NET_FORKS.len());
|
||||
assert_eq!(version as usize, TEST_NET_FORKS.len());
|
||||
assert_eq!(version as usize, STAGE_NET_FORKS.len());
|
||||
|
||||
let height = hardforks
|
||||
.get_earliest_ideal_height_for_version(version)
|
||||
.unwrap();
|
||||
let got_version = hardforks.get_ideal_version_from_height(height);
|
||||
assert_eq!(version, got_version);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_earliest_ideal_height_for_version_mainnet() {
|
||||
let hardforks = HardForks::new(Network::MainNet);
|
||||
|
||||
for (height, version) in MAIN_NET_FORKS.iter().zip(1..MAIN_NET_FORKS.len() as u8) {
|
||||
assert_eq!(
|
||||
hardforks
|
||||
.get_earliest_ideal_height_for_version(version)
|
||||
.unwrap(),
|
||||
*height
|
||||
);
|
||||
assert_eq!(
|
||||
hardforks
|
||||
.get_earliest_ideal_height_for_version(version)
|
||||
.unwrap(),
|
||||
*height
|
||||
);
|
||||
}
|
||||
assert!(hardforks
|
||||
.get_earliest_ideal_height_for_version(MAIN_NET_FORKS.len() as u8 + 1)
|
||||
.is_none())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_earliest_ideal_height_for_version_testnet() {
|
||||
let hardforks = HardForks::new(Network::TestNet);
|
||||
|
||||
for (height, version) in TEST_NET_FORKS.iter().zip(1..TEST_NET_FORKS.len() as u8) {
|
||||
assert_eq!(
|
||||
hardforks
|
||||
.get_earliest_ideal_height_for_version(version)
|
||||
.unwrap(),
|
||||
*height
|
||||
);
|
||||
assert_eq!(
|
||||
hardforks
|
||||
.get_earliest_ideal_height_for_version(version)
|
||||
.unwrap(),
|
||||
*height
|
||||
);
|
||||
}
|
||||
assert!(hardforks
|
||||
.get_earliest_ideal_height_for_version(TEST_NET_FORKS.len() as u8 + 1)
|
||||
.is_none())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_earliest_ideal_height_for_version_stagenet() {
|
||||
let hardforks = HardForks::new(Network::StageNet);
|
||||
|
||||
for (height, version) in STAGE_NET_FORKS.iter().zip(1..STAGE_NET_FORKS.len() as u8) {
|
||||
assert_eq!(
|
||||
hardforks
|
||||
.get_earliest_ideal_height_for_version(version)
|
||||
.unwrap(),
|
||||
*height
|
||||
);
|
||||
assert_eq!(
|
||||
hardforks
|
||||
.get_earliest_ideal_height_for_version(version)
|
||||
.unwrap(),
|
||||
*height
|
||||
);
|
||||
}
|
||||
assert!(hardforks
|
||||
.get_earliest_ideal_height_for_version(STAGE_NET_FORKS.len() as u8 + 1)
|
||||
.is_none())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_ideal_version_from_height_mainnet() {
|
||||
let hardforks = HardForks::new(Network::MainNet);
|
||||
|
||||
for (height, version) in MAIN_NET_FORKS.iter().zip(1..MAIN_NET_FORKS.len() as u8) {
|
||||
assert_eq!(hardforks.get_ideal_version_from_height(*height), version);
|
||||
assert_eq!(
|
||||
hardforks.get_ideal_version_from_height(*height - 1),
|
||||
version - 1
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_ideal_version_from_height_testnet() {
|
||||
let hardforks = HardForks::new(Network::TestNet);
|
||||
|
||||
for (height, version) in TEST_NET_FORKS.iter().zip(1..TEST_NET_FORKS.len() as u8) {
|
||||
assert_eq!(hardforks.get_ideal_version_from_height(*height), version);
|
||||
assert_eq!(
|
||||
hardforks.get_ideal_version_from_height(*height - 1),
|
||||
version - 1
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_ideal_version_from_height_stagenet() {
|
||||
let hardforks = HardForks::new(Network::StageNet);
|
||||
|
||||
for (height, version) in STAGE_NET_FORKS.iter().zip(1..STAGE_NET_FORKS.len() as u8) {
|
||||
assert_eq!(hardforks.get_ideal_version_from_height(*height), version);
|
||||
assert_eq!(
|
||||
hardforks.get_ideal_version_from_height(*height - 1),
|
||||
version - 1
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
14
common/src/lib.rs
Normal file
14
common/src/lib.rs
Normal file
|
@ -0,0 +1,14 @@
|
|||
pub mod hardforks;
|
||||
pub mod network;
|
||||
pub mod pruning;
|
||||
|
||||
pub use hardforks::HardForks;
|
||||
pub use network::Network;
|
||||
pub use pruning::{PruningError, PruningSeed};
|
||||
|
||||
pub const CRYPTONOTE_MAX_BLOCK_NUMBER: u64 = 500000000;
|
||||
|
||||
// pruning
|
||||
pub const CRYPTONOTE_PRUNING_LOG_STRIPES: u32 = 3;
|
||||
pub const CRYPTONOTE_PRUNING_STRIPE_SIZE: u64 = 4096;
|
||||
pub const CRYPTONOTE_PRUNING_TIP_BLOCKS: u64 = 5500;
|
26
common/src/network.rs
Normal file
26
common/src/network.rs
Normal file
|
@ -0,0 +1,26 @@
|
|||
const MAINNET_NETWORK_ID: [u8; 16] = [
|
||||
0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x10,
|
||||
];
|
||||
const TESTNET_NETWORK_ID: [u8; 16] = [
|
||||
0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x11,
|
||||
];
|
||||
const STAGENET_NETWORK_ID: [u8; 16] = [
|
||||
0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x12,
|
||||
];
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum Network {
|
||||
MainNet,
|
||||
TestNet,
|
||||
StageNet,
|
||||
}
|
||||
|
||||
impl Network {
|
||||
pub fn network_id(&self) -> [u8; 16] {
|
||||
match self {
|
||||
Network::MainNet => MAINNET_NETWORK_ID,
|
||||
Network::TestNet => TESTNET_NETWORK_ID,
|
||||
Network::StageNet => STAGENET_NETWORK_ID,
|
||||
}
|
||||
}
|
||||
}
|
406
common/src/pruning.rs
Normal file
406
common/src/pruning.rs
Normal file
|
@ -0,0 +1,406 @@
|
|||
//! # Pruning Mechanism for Monero
|
||||
//!
|
||||
//! This module provides an implementation of the pruning mechanism used in Monero.
|
||||
//! The main data structure, `PruningSeed`, encapsulates the logic for creating and manipulating pruning seeds,
|
||||
//! which determine the set of blocks to be pruned from the blockchain.
|
||||
//!
|
||||
//! `PruningSeed` also contains a method for checking if a pruning seed is valid for Monero rules (must only be
|
||||
//! split into 8 parts):
|
||||
//!
|
||||
//! ```rust
|
||||
//! use cuprate_common::pruning::PruningSeed;
|
||||
//!
|
||||
//! let seed: u32 = 386; // the seed you wan't to check is valid
|
||||
//! match PruningSeed::try_from(seed) {
|
||||
//! Ok(seed) => seed, // seed is valid
|
||||
//! Err(e) => panic!("seed is invalid")
|
||||
//! };
|
||||
//! ```
|
||||
//!
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use super::{
|
||||
CRYPTONOTE_MAX_BLOCK_NUMBER, CRYPTONOTE_PRUNING_LOG_STRIPES, CRYPTONOTE_PRUNING_STRIPE_SIZE,
|
||||
CRYPTONOTE_PRUNING_TIP_BLOCKS,
|
||||
};
|
||||
|
||||
const PRUNING_SEED_LOG_STRIPES_SHIFT: u32 = 7;
|
||||
const PRUNING_SEED_STRIPE_SHIFT: u32 = 0;
|
||||
const PRUNING_SEED_LOG_STRIPES_MASK: u32 = 0x7;
|
||||
const PRUNING_SEED_STRIPE_MASK: u32 = 127;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum PruningError {
|
||||
#[error("log_stripes is out of range")]
|
||||
LogStripesOutOfRange,
|
||||
#[error("Stripe is out of range")]
|
||||
StripeOutOfRange,
|
||||
#[error("The block height is greater than `CRYPTONOTE_MAX_BLOCK_NUMBER`")]
|
||||
BlockHeightTooLarge,
|
||||
#[error("The blockchain height is greater than `CRYPTONOTE_MAX_BLOCK_NUMBER`")]
|
||||
BlockChainHeightTooLarge,
|
||||
#[error("The calculated height is smaller than the block height entered")]
|
||||
CalculatedHeightSmallerThanEnteredBlock,
|
||||
#[error("The entered seed has incorrect log stripes")]
|
||||
SeedDoesNotHaveCorrectLogStripes,
|
||||
}
|
||||
|
||||
/// A Monero pruning seed which has methods to get the next pruned/ unpruned block.
|
||||
///
|
||||
// Internally we use an Option<u32> to represent if a pruning seed is 0 (None)which means
|
||||
// no pruning will take place.
|
||||
pub struct PruningSeed(Option<u32>);
|
||||
|
||||
impl PruningSeed {
|
||||
/// Creates a new pruning seed from a `stripe` and `log_stripes`
|
||||
///
|
||||
/// ### What is a `stripe`
|
||||
///
|
||||
/// A stripe is the part of the blockchain this peer will keep.
|
||||
///
|
||||
/// Monero, when pruning, will split the blockchain into multiple
|
||||
/// "stripes", that amount is currently 8 and each pruned peer
|
||||
/// will keep one of those 8 stripes.
|
||||
///
|
||||
/// ### What is `log_stripes`
|
||||
///
|
||||
/// `log_stripes` is log2 of the amount of stripes used.
|
||||
///
|
||||
/// For Monero, currently, that amount is 8 so `log_stripes` will
|
||||
/// be 3.
|
||||
///
|
||||
/// ---------------------------------------------------------------
|
||||
///
|
||||
/// *note this function allows you to make invalid seeds, this is done
|
||||
/// to allow the specifics of pruning to change in the future. To make
|
||||
/// a valid seed you currently MUST pass in a number 1 to 8 for `stripe`
|
||||
/// and 3 for `log_stripes`.*
|
||||
///
|
||||
pub fn new(stripe: u32, log_stripes: u32) -> Result<PruningSeed, PruningError> {
|
||||
if !(log_stripes <= PRUNING_SEED_LOG_STRIPES_MASK) {
|
||||
Err(PruningError::LogStripesOutOfRange)
|
||||
} else if !(stripe > 0 && stripe <= (1 << log_stripes)) {
|
||||
Err(PruningError::StripeOutOfRange)
|
||||
} else {
|
||||
Ok(PruningSeed(Some(
|
||||
(log_stripes << PRUNING_SEED_LOG_STRIPES_SHIFT)
|
||||
| ((stripe - 1) << PRUNING_SEED_STRIPE_SHIFT),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// Gets log2 of the total amount of stripes this seed is using.
|
||||
fn get_log_stripes(&self) -> Option<u32> {
|
||||
let seed: u32 = self.0?;
|
||||
Some((seed >> PRUNING_SEED_LOG_STRIPES_SHIFT) & PRUNING_SEED_LOG_STRIPES_MASK)
|
||||
}
|
||||
|
||||
// Gets the specific stripe of this seed.
|
||||
fn get_stripe(&self) -> Option<u32> {
|
||||
let seed: u32 = self.0?;
|
||||
Some(1 + ((seed >> PRUNING_SEED_STRIPE_SHIFT) & PRUNING_SEED_STRIPE_MASK))
|
||||
}
|
||||
|
||||
/// Gets the next unpruned block for a given `block_height` and `blockchain_height`
|
||||
///
|
||||
/// Each seed will store, in a cyclic manner, a portion of blocks while discarding
|
||||
/// the ones that are out of your stripe. This function is finding the next height
|
||||
/// for which a specific seed will start storing blocks.
|
||||
///
|
||||
/// ### Errors
|
||||
///
|
||||
/// This function will return an Error if the inputted `block_height` or
|
||||
/// `blockchain_height` is greater than [`CRYPTONOTE_MAX_BLOCK_NUMBER`].
|
||||
///
|
||||
/// This function will also error if `block_height` > `blockchain_height`
|
||||
///
|
||||
pub fn get_next_unpruned_block(
|
||||
&self,
|
||||
block_height: u64,
|
||||
blockchain_height: u64,
|
||||
) -> Result<u64, PruningError> {
|
||||
if block_height > CRYPTONOTE_MAX_BLOCK_NUMBER || block_height > blockchain_height {
|
||||
Err(PruningError::BlockHeightTooLarge)
|
||||
} else if blockchain_height > CRYPTONOTE_MAX_BLOCK_NUMBER {
|
||||
Err(PruningError::BlockChainHeightTooLarge)
|
||||
} else {
|
||||
let Some(seed_stripe) = self.get_stripe() else {
|
||||
// If the `get_stripe` returns None that means no pruning so the next
|
||||
// unpruned block is the one inputted.
|
||||
return Ok(block_height);
|
||||
};
|
||||
if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
|
||||
// If we are within `CRYPTONOTE_PRUNING_TIP_BLOCKS` of the chain we should
|
||||
// not prune blocks.
|
||||
return Ok(block_height);
|
||||
}
|
||||
let seed_log_stripes = self
|
||||
.get_log_stripes()
|
||||
.unwrap_or(CRYPTONOTE_PRUNING_LOG_STRIPES);
|
||||
let block_pruning_stripe = get_block_pruning_stripe(block_height, blockchain_height, seed_log_stripes)
|
||||
.expect("We just checked if `block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height`");
|
||||
if seed_stripe == block_pruning_stripe {
|
||||
// if we have the same stripe as a block that means we keep the block so
|
||||
// the entered block is the next un-pruned one.
|
||||
return Ok(block_height);
|
||||
}
|
||||
|
||||
// cycles: how many times each seed has stored blocks so when all seeds have
|
||||
// stored blocks thats 1 cycle
|
||||
let cycles = (block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) >> seed_log_stripes;
|
||||
// if our seed is before the blocks seed in a cycle that means we have already past our
|
||||
// seed this cycle and need to start the next
|
||||
let cycles_start = cycles
|
||||
+ if seed_stripe > block_pruning_stripe {
|
||||
0
|
||||
} else {
|
||||
1
|
||||
};
|
||||
|
||||
// amt_of_cycles * blocks in a cycle + how many blocks through a cycles until the seed starts storing blocks
|
||||
let calculated_height = cycles_start
|
||||
* (CRYPTONOTE_PRUNING_STRIPE_SIZE << seed_log_stripes)
|
||||
+ (seed_stripe as u64 - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE;
|
||||
if calculated_height + CRYPTONOTE_PRUNING_TIP_BLOCKS > blockchain_height {
|
||||
// if our calculated height is greater than the amount of tip blocks the the start of the tip blocks will be the next un-pruned
|
||||
return Ok(blockchain_height.saturating_sub(CRYPTONOTE_PRUNING_TIP_BLOCKS));
|
||||
}
|
||||
if calculated_height < block_height {
|
||||
return Err(PruningError::CalculatedHeightSmallerThanEnteredBlock);
|
||||
}
|
||||
Ok(calculated_height)
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the next pruned block for a given `block_height` and `blockchain_height`
|
||||
///
|
||||
/// Each seed will store, in a cyclic manner, a portion of blocks while discarding
|
||||
/// the ones that are out of your stripe. This function is finding the next height
|
||||
/// for which a specific seed will start pruning blocks.
|
||||
///
|
||||
/// ### Errors
|
||||
///
|
||||
/// This function will return an Error if the inputted `block_height` or
|
||||
/// `blockchain_height` is greater than [`CRYPTONOTE_MAX_BLOCK_NUMBER`].
|
||||
///
|
||||
/// This function will also error if `block_height` > `blockchain_height`
|
||||
///
|
||||
pub fn get_next_pruned_block(
|
||||
&self,
|
||||
block_height: u64,
|
||||
blockchain_height: u64,
|
||||
) -> Result<u64, PruningError> {
|
||||
let Some(seed_stripe) = self.get_stripe() else {
|
||||
// If the `get_stripe` returns None that means no pruning so the next
|
||||
// pruned block is nonexistent so we return the blockchain_height.
|
||||
return Ok(blockchain_height);
|
||||
};
|
||||
if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
|
||||
// If we are within `CRYPTONOTE_PRUNING_TIP_BLOCKS` of the chain we should
|
||||
// not prune blocks.
|
||||
return Ok(blockchain_height);
|
||||
}
|
||||
let seed_log_stripes = self
|
||||
.get_log_stripes()
|
||||
.unwrap_or(CRYPTONOTE_PRUNING_LOG_STRIPES);
|
||||
let block_pruning_stripe = get_block_pruning_stripe(block_height, blockchain_height, seed_log_stripes)
|
||||
.expect("We just checked if `block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height`");
|
||||
if seed_stripe != block_pruning_stripe {
|
||||
// if our stripe != the blocks stripe that means we prune that block
|
||||
return Ok(block_height);
|
||||
}
|
||||
|
||||
// We can get the end of our "non-pruning" cycle by getting the next stripe's after us first un-pruned block height
|
||||
// so we calculate the next un-pruned block for the next stripe and return it as our next pruned block
|
||||
let next_stripe = (1 + seed_log_stripes) & ((1 << seed_log_stripes) - 1);
|
||||
let seed = PruningSeed::new(next_stripe, seed_log_stripes)?;
|
||||
seed.get_next_unpruned_block(block_height, blockchain_height)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for PruningSeed {
|
||||
type Error = PruningError;
|
||||
|
||||
fn try_from(value: u32) -> Result<Self, Self::Error> {
|
||||
if value == 0 {
|
||||
Ok(PruningSeed(None))
|
||||
} else {
|
||||
let seed = Self(Some(value));
|
||||
let log_stripes = seed.get_log_stripes().expect("This will only return None if the inner value is None which will only happen if the seed is 0 but we checked for that");
|
||||
if log_stripes != CRYPTONOTE_PRUNING_LOG_STRIPES {
|
||||
return Err(PruningError::SeedDoesNotHaveCorrectLogStripes);
|
||||
}
|
||||
if seed.get_stripe().expect("same as above") > (1 << log_stripes) {
|
||||
return Err(PruningError::StripeOutOfRange);
|
||||
}
|
||||
Ok(seed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_block_pruning_stripe(
|
||||
block_height: u64,
|
||||
blockchain_height: u64,
|
||||
log_stripe: u32,
|
||||
) -> Option<u32> {
|
||||
if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
(((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as u64 - 1)) + 1)
|
||||
as u32, // it's trivial to prove it's ok to us `as` here
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pruning::{get_block_pruning_stripe, PruningSeed};
|
||||
|
||||
use super::CRYPTONOTE_PRUNING_LOG_STRIPES;
|
||||
|
||||
fn make_all_pruning_seeds() -> Vec<PruningSeed> {
|
||||
let possible_stripes = 1..(1 << CRYPTONOTE_PRUNING_LOG_STRIPES);
|
||||
possible_stripes
|
||||
.map(|stripe| PruningSeed::new(stripe, CRYPTONOTE_PRUNING_LOG_STRIPES).unwrap())
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_u32_for_pruning_seed() {
|
||||
let good_seeds = 384..=391;
|
||||
for seed in good_seeds {
|
||||
assert!(PruningSeed::try_from(seed).is_ok());
|
||||
}
|
||||
let bad_seeds = [383, 392];
|
||||
for seed in bad_seeds {
|
||||
assert!(PruningSeed::try_from(seed).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn make_invalid_pruning_seeds() {
|
||||
let invalid_stripes = [0, (1 << CRYPTONOTE_PRUNING_LOG_STRIPES) + 1];
|
||||
|
||||
for stripe in invalid_stripes {
|
||||
assert!(PruningSeed::new(stripe, CRYPTONOTE_PRUNING_LOG_STRIPES).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_pruning_log_stripe() {
|
||||
let all_valid_seeds = make_all_pruning_seeds();
|
||||
for seed in all_valid_seeds.iter() {
|
||||
assert_eq!(seed.get_log_stripes().unwrap(), 3)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_pruning_stripe() {
|
||||
let all_valid_seeds = make_all_pruning_seeds();
|
||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||
assert_eq!(seed.get_stripe().unwrap(), i as u32 + 1)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blocks_pruning_stripe() {
|
||||
let blockchain_height = 76437863;
|
||||
|
||||
for i in 0_u32..8 {
|
||||
assert_eq!(
|
||||
get_block_pruning_stripe(
|
||||
(i * 4096) as u64,
|
||||
blockchain_height,
|
||||
CRYPTONOTE_PRUNING_LOG_STRIPES
|
||||
)
|
||||
.unwrap(),
|
||||
i + 1
|
||||
);
|
||||
}
|
||||
|
||||
for i in 0_u32..8 {
|
||||
assert_eq!(
|
||||
get_block_pruning_stripe(
|
||||
32768 + (i * 4096) as u64,
|
||||
blockchain_height,
|
||||
CRYPTONOTE_PRUNING_LOG_STRIPES
|
||||
)
|
||||
.unwrap(),
|
||||
i + 1
|
||||
);
|
||||
}
|
||||
|
||||
for i in 1_u32..8 {
|
||||
assert_eq!(
|
||||
get_block_pruning_stripe(
|
||||
32767 + (i * 4096) as u64,
|
||||
blockchain_height,
|
||||
CRYPTONOTE_PRUNING_LOG_STRIPES
|
||||
)
|
||||
.unwrap(),
|
||||
i
|
||||
);
|
||||
}
|
||||
|
||||
// Block shouldn't be pruned
|
||||
assert!(get_block_pruning_stripe(
|
||||
blockchain_height - 5500,
|
||||
blockchain_height,
|
||||
CRYPTONOTE_PRUNING_LOG_STRIPES
|
||||
)
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn next_unpruned_block() {
|
||||
let all_valid_seeds = make_all_pruning_seeds();
|
||||
let blockchain_height = 76437863;
|
||||
|
||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||
assert_eq!(
|
||||
seed.get_next_unpruned_block(0, blockchain_height).unwrap(),
|
||||
i as u64 * 4096
|
||||
)
|
||||
}
|
||||
|
||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||
assert_eq!(
|
||||
seed.get_next_unpruned_block((i as u64 + 1) * 4096, blockchain_height)
|
||||
.unwrap(),
|
||||
i as u64 * 4096 + 32768
|
||||
)
|
||||
}
|
||||
|
||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||
assert_eq!(
|
||||
seed.get_next_unpruned_block((i as u64 + 8) * 4096, blockchain_height)
|
||||
.unwrap(),
|
||||
i as u64 * 4096 + 32768
|
||||
)
|
||||
}
|
||||
|
||||
for (_, seed) in all_valid_seeds.iter().enumerate() {
|
||||
assert_eq!(
|
||||
seed.get_next_unpruned_block(76437863 - 1, blockchain_height)
|
||||
.unwrap(),
|
||||
76437863 - 1
|
||||
)
|
||||
}
|
||||
|
||||
let zero_seed = PruningSeed(None);
|
||||
|
||||
assert_eq!(
|
||||
zero_seed.get_next_unpruned_block(33443, 5565445).unwrap(),
|
||||
33443
|
||||
);
|
||||
|
||||
let seed = PruningSeed(Some(384));
|
||||
|
||||
// the next unpruned block is the first tip block
|
||||
assert_eq!(seed.get_next_unpruned_block(5000, 11000).unwrap(), 5500)
|
||||
}
|
||||
|
||||
// TODO: next_pruned_block
|
||||
}
|
|
@ -1,33 +1,37 @@
|
|||
//! ### Table module
|
||||
//! This module contains the definition of the [`Table`] and [`DupTable`] trait, and the actual tables used in the database.
|
||||
//! This module contains the definition of the [`Table`] and [`DupTable`] trait, and the actual tables used in the database.
|
||||
//! [`DupTable`] are just a trait used to define that they support DUPSORT|DUPFIXED operation (as of now we don't know the equivalent for HSE).
|
||||
//! All tables are defined with docs explaining its purpose, what types are the key and data.
|
||||
//! For more details please look at Cuprate's book : <link to cuprate book>
|
||||
//! For more details please look at Cuprate's book : <link to cuprate book>
|
||||
|
||||
use monero::{Hash, Block, blockdata::transaction::KeyImage};
|
||||
use bincode::{enc::Encode,de::Decode};
|
||||
use crate::{types::{BlockMetadata, /*OutAmountIdx,*/ /*KeyImage,*/ TxOutputIdx, /*OutTx,*/ AltBlock, TxIndex, TransactionPruned, /*RctOutkey,*/ OutputMetadata}, encoding::Compat};
|
||||
use crate::{
|
||||
encoding::Compat,
|
||||
types::{
|
||||
/*OutTx,*/ AltBlock, BlockMetadata, /*RctOutkey,*/ OutputMetadata,
|
||||
TransactionPruned, TxIndex, /*OutAmountIdx,*/ /*KeyImage,*/ TxOutputIdx,
|
||||
},
|
||||
};
|
||||
use bincode::{de::Decode, enc::Encode};
|
||||
use monero::{blockdata::transaction::KeyImage, Block, Hash};
|
||||
|
||||
/// A trait implementing a table interaction for the database. It is implemented to an empty struct to specify the name and table's associated types. These associated
|
||||
/// A trait implementing a table interaction for the database. It is implemented to an empty struct to specify the name and table's associated types. These associated
|
||||
/// types are used to simplify deserialization process.
|
||||
pub trait Table: Send + Sync + 'static + Clone {
|
||||
|
||||
// name of the table
|
||||
const TABLE_NAME: &'static str;
|
||||
// name of the table
|
||||
const TABLE_NAME: &'static str;
|
||||
|
||||
// Definition of a key & value types of the database
|
||||
type Key: Encode + Decode;
|
||||
type Value: Encode + Decode;
|
||||
// Definition of a key & value types of the database
|
||||
type Key: Encode + Decode;
|
||||
type Value: Encode + Decode;
|
||||
}
|
||||
|
||||
/// A trait implementing a table with duplicated data support.
|
||||
pub trait DupTable: Table {
|
||||
|
||||
// Subkey of the table (prefix of the data)
|
||||
type SubKey: Encode + Decode;
|
||||
// Subkey of the table (prefix of the data)
|
||||
type SubKey: Encode + Decode;
|
||||
}
|
||||
|
||||
/// This declarative macro declare a new empty struct and impl the specified name, and corresponding types.
|
||||
/// This declarative macro declare a new empty struct and impl the specified name, and corresponding types.
|
||||
macro_rules! impl_table {
|
||||
( $(#[$docs:meta])* $table:ident , $key:ty , $value:ty ) => {
|
||||
#[derive(Clone)]
|
||||
|
@ -58,70 +62,120 @@ macro_rules! impl_duptable {
|
|||
// ----- BLOCKS -----
|
||||
|
||||
impl_duptable!(
|
||||
/// `blockhash` is table defining a relation between the hash of a block and its height. Its primary use is to quickly find block's hash by its height.
|
||||
blockhash, (), Compat<Hash>, u64);
|
||||
/// `blockhash` is table defining a relation between the hash of a block and its height. Its primary use is to quickly find block's hash by its height.
|
||||
blockhash,
|
||||
(),
|
||||
Compat<Hash>,
|
||||
u64
|
||||
);
|
||||
|
||||
impl_duptable!(
|
||||
/// `blockmetadata` store block metadata alongside their corresponding Hash. The blocks metadata can contains the total_coins_generated, weight, long_term_block_weight & cumulative RingCT
|
||||
blockmetadata, (), u64, BlockMetadata);
|
||||
|
||||
/// `blockmetadata` store block metadata alongside their corresponding Hash. The blocks metadata can contains the total_coins_generated, weight, long_term_block_weight & cumulative RingCT
|
||||
blockmetadata,
|
||||
(),
|
||||
u64,
|
||||
BlockMetadata
|
||||
);
|
||||
|
||||
impl_table!(
|
||||
/// `blockbody` store blocks' bodies along their Hash. The blocks body contains the coinbase transaction and its corresponding mined transactions' hashes.
|
||||
blocks, u64, Compat<Block>);
|
||||
/// `blockbody` store blocks' bodies along their Hash. The blocks body contains the coinbase transaction and its corresponding mined transactions' hashes.
|
||||
blocks,
|
||||
u64,
|
||||
Compat<Block>
|
||||
);
|
||||
|
||||
/*
|
||||
impl_table!(
|
||||
/// `blockhfversion` keep track of block's hard fork version. If an outdated node continue to run after a hard fork, it needs to know, after updating, what blocks needs to be update.
|
||||
blockhfversion, u64, u8);
|
||||
/// `blockhfversion` keep track of block's hard fork version. If an outdated node continue to run after a hard fork, it needs to know, after updating, what blocks needs to be update.
|
||||
blockhfversion, u64, u8);
|
||||
*/
|
||||
|
||||
impl_table!(
|
||||
/// `altblock` is a table that permits the storage of blocks from an alternative chain, which may cause a re-org. These blocks can be fetch by their corresponding hash.
|
||||
altblock, Compat<Hash>, AltBlock);
|
||||
|
||||
impl_table!(
|
||||
/// `altblock` is a table that permits the storage of blocks from an alternative chain, which may cause a re-org. These blocks can be fetch by their corresponding hash.
|
||||
altblock,
|
||||
Compat<Hash>,
|
||||
AltBlock
|
||||
);
|
||||
|
||||
// ------- TXNs -------
|
||||
|
||||
impl_table!(
|
||||
/// `txspruned` is table storing TransactionPruned (or Pruned Tx). These can be fetch by the corresponding Transaction ID.
|
||||
txspruned, u64, TransactionPruned);
|
||||
|
||||
impl_table!(
|
||||
/// `txsprunable` is a table storing the Prunable part of transactions (Signatures and RctSig), stored as raw bytes. These can be fetch by the corresponding Transaction ID.
|
||||
txsprunable, u64, Vec<u8>);
|
||||
|
||||
impl_duptable!(
|
||||
/// `txsprunablehash` is a table storing hashes of prunable part of transactions. These hash can be fetch by the corresponding Transaction ID.
|
||||
txsprunablehash, u64, (), Compat<Hash>);
|
||||
/// `txspruned` is table storing TransactionPruned (or Pruned Tx). These can be fetch by the corresponding Transaction ID.
|
||||
txspruned,
|
||||
u64,
|
||||
TransactionPruned
|
||||
);
|
||||
|
||||
impl_table!(
|
||||
/// `txsprunabletip` is a table used for optimization purpose. It defines at which block's height this transaction belong as long as the block is with Tip blocks. These can be fetch by the corresponding Transaction ID.
|
||||
txsprunabletip, u64, u64);
|
||||
|
||||
impl_duptable!(
|
||||
/// `txsoutputs` is a table storing output indices used in a transaction. These can be fetch by the corresponding Transaction ID.
|
||||
txsoutputs, u64, (), TxOutputIdx);
|
||||
/// `txsprunable` is a table storing the Prunable part of transactions (Signatures and RctSig), stored as raw bytes. These can be fetch by the corresponding Transaction ID.
|
||||
txsprunable,
|
||||
u64,
|
||||
Vec<u8>
|
||||
);
|
||||
|
||||
impl_duptable!(
|
||||
/// `txsidentifier` is a table defining a relation between the hash of a transaction and its transaction Indexes. Its primarly used to quickly find tx's ID by its hash.
|
||||
txsidentifier, Compat<Hash>, (), TxIndex);
|
||||
|
||||
/// `txsprunablehash` is a table storing hashes of prunable part of transactions. These hash can be fetch by the corresponding Transaction ID.
|
||||
txsprunablehash,
|
||||
u64,
|
||||
(),
|
||||
Compat<Hash>
|
||||
);
|
||||
|
||||
impl_table!(
|
||||
/// `txsprunabletip` is a table used for optimization purpose. It defines at which block's height this transaction belong as long as the block is with Tip blocks. These can be fetch by the corresponding Transaction ID.
|
||||
txsprunabletip,
|
||||
u64,
|
||||
u64
|
||||
);
|
||||
|
||||
impl_duptable!(
|
||||
/// `txsoutputs` is a table storing output indices used in a transaction. These can be fetch by the corresponding Transaction ID.
|
||||
txsoutputs,
|
||||
u64,
|
||||
(),
|
||||
TxOutputIdx
|
||||
);
|
||||
|
||||
impl_duptable!(
|
||||
/// `txsidentifier` is a table defining a relation between the hash of a transaction and its transaction Indexes. Its primarly used to quickly find tx's ID by its hash.
|
||||
txsidentifier,
|
||||
Compat<Hash>,
|
||||
(),
|
||||
TxIndex
|
||||
);
|
||||
|
||||
// ---- OUTPUTS ----
|
||||
|
||||
impl_duptable!(
|
||||
/// `prerctoutputmetadata` is a duplicated table storing Pre-RingCT output's metadata. The key is the amount of this output, and the subkey is its amount idx.
|
||||
prerctoutputmetadata, u64, u64, OutputMetadata);
|
||||
/// `prerctoutputmetadata` is a duplicated table storing Pre-RingCT output's metadata. The key is the amount of this output, and the subkey is its amount idx.
|
||||
prerctoutputmetadata,
|
||||
u64,
|
||||
u64,
|
||||
OutputMetadata
|
||||
);
|
||||
impl_duptable!(
|
||||
/// `prerctoutputmetadata` is a table storing RingCT output's metadata. The key is the amount idx of this output since amount is always 0 for RingCT outputs.
|
||||
outputmetadata, (), u64, OutputMetadata);
|
||||
/// `prerctoutputmetadata` is a table storing RingCT output's metadata. The key is the amount idx of this output since amount is always 0 for RingCT outputs.
|
||||
outputmetadata,
|
||||
(),
|
||||
u64,
|
||||
OutputMetadata
|
||||
);
|
||||
|
||||
// ---- SPT KEYS ----
|
||||
|
||||
impl_duptable!(
|
||||
/// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
|
||||
spentkeys, (), Compat<KeyImage>, ());
|
||||
/// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
|
||||
spentkeys,
|
||||
(),
|
||||
Compat<KeyImage>,
|
||||
()
|
||||
);
|
||||
|
||||
// ---- PROPERTIES ----
|
||||
|
||||
impl_table!(
|
||||
/// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
|
||||
properties, u32, u32);
|
||||
/// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
|
||||
properties,
|
||||
u32,
|
||||
u32
|
||||
);
|
||||
|
|
|
@ -31,12 +31,22 @@ use crate::{Bucket, BucketError};
|
|||
|
||||
/// A BucketSink writes Bucket instances to the provided AsyncWrite target.
|
||||
#[pin_project]
|
||||
pub struct BucketSink<W: AsyncWrite + std::marker::Unpin> {
|
||||
pub struct BucketSink<W> {
|
||||
#[pin]
|
||||
writer: W,
|
||||
buffer: VecDeque<BytesMut>,
|
||||
}
|
||||
|
||||
impl<W: AsyncWrite + std::marker::Unpin> BucketSink<W> {
|
||||
/// Creates a new [`BucketSink`] from the given [`AsyncWrite`] writer.
|
||||
pub fn new(writer: W) -> Self {
|
||||
BucketSink {
|
||||
writer,
|
||||
buffer: VecDeque::with_capacity(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: AsyncWrite + std::marker::Unpin> Sink<Bucket> for BucketSink<W> {
|
||||
type Error = BucketError;
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@ pub mod message_sink;
|
|||
pub mod message_stream;
|
||||
|
||||
pub use header::BucketHead;
|
||||
pub use message_sink::MessageSink;
|
||||
pub use message_stream::MessageStream;
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
||||
|
@ -103,23 +105,29 @@ pub enum MessageType {
|
|||
Request,
|
||||
/// Response
|
||||
Response,
|
||||
/// Notification
|
||||
Notification,
|
||||
}
|
||||
|
||||
impl From<MessageType> for header::Flags {
|
||||
fn from(val: MessageType) -> Self {
|
||||
match val {
|
||||
MessageType::Request => header::REQUEST,
|
||||
MessageType::Response => header::RESPONSE,
|
||||
impl MessageType {
|
||||
/// Returns if the message requires a response
|
||||
pub fn have_to_return_data(&self) -> bool {
|
||||
match self {
|
||||
MessageType::Request => true,
|
||||
MessageType::Response | MessageType::Notification => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryInto<MessageType> for header::Flags {
|
||||
type Error = BucketError;
|
||||
fn try_into(self) -> Result<MessageType, Self::Error> {
|
||||
if self.is_request() {
|
||||
/// Returns the `MessageType` given the flags and have_to_return_data fields
|
||||
pub fn from_flags_and_have_to_return(
|
||||
flags: header::Flags,
|
||||
have_to_return: bool,
|
||||
) -> Result<Self, BucketError> {
|
||||
if flags.is_request() && have_to_return {
|
||||
Ok(MessageType::Request)
|
||||
} else if self.is_response() {
|
||||
} else if flags.is_request() {
|
||||
Ok(MessageType::Notification)
|
||||
} else if flags.is_response() && !have_to_return {
|
||||
Ok(MessageType::Response)
|
||||
} else {
|
||||
Err(BucketError::UnknownFlags)
|
||||
|
@ -127,23 +135,26 @@ impl TryInto<MessageType> for header::Flags {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<MessageType> for header::Flags {
|
||||
fn from(val: MessageType) -> Self {
|
||||
match val {
|
||||
MessageType::Request | MessageType::Notification => header::REQUEST,
|
||||
MessageType::Response => header::RESPONSE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A levin body
|
||||
pub trait LevinBody: Sized {
|
||||
/// Decodes the message from the data in the header
|
||||
fn decode_message(
|
||||
buf: &[u8],
|
||||
typ: MessageType,
|
||||
have_to_return: bool,
|
||||
command: u32,
|
||||
) -> Result<Self, BucketError>;
|
||||
fn decode_message(buf: &[u8], typ: MessageType, command: u32) -> Result<Self, BucketError>;
|
||||
|
||||
/// Encodes the message
|
||||
///
|
||||
/// returns:
|
||||
/// return_code: i32,
|
||||
/// command: u32,
|
||||
/// have_to_return: bool,
|
||||
/// message_type: MessageType
|
||||
/// bytes: Bytes
|
||||
fn encode(&self) -> Result<(i32, u32, bool, MessageType, Bytes), BucketError>;
|
||||
/// bytes: Vec<u8>
|
||||
fn encode(&self) -> Result<(i32, u32, MessageType, Vec<u8>), BucketError>;
|
||||
}
|
||||
|
|
|
@ -32,12 +32,22 @@ use crate::LevinBody;
|
|||
|
||||
/// A Sink that converts levin messages to buckets and passes them onto the `BucketSink`
|
||||
#[pin_project]
|
||||
pub struct MessageSink<W: AsyncWrite + std::marker::Unpin, E: LevinBody> {
|
||||
pub struct MessageSink<W, E> {
|
||||
#[pin]
|
||||
bucket_sink: BucketSink<W>,
|
||||
phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<W: AsyncWrite + std::marker::Unpin, E: LevinBody> MessageSink<W, E> {
|
||||
/// Creates a new sink from the provided [`AsyncWrite`]
|
||||
pub fn new(writer: W) -> Self {
|
||||
MessageSink {
|
||||
bucket_sink: BucketSink::new(writer),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: AsyncWrite + std::marker::Unpin, E: LevinBody> Sink<E> for MessageSink<W, E> {
|
||||
type Error = BucketError;
|
||||
|
||||
|
@ -49,16 +59,19 @@ impl<W: AsyncWrite + std::marker::Unpin, E: LevinBody> Sink<E> for MessageSink<W
|
|||
}
|
||||
|
||||
fn start_send(self: Pin<&mut Self>, item: E) -> Result<(), Self::Error> {
|
||||
let (return_code, command, have_to_return_data, flags, body) = item.encode()?;
|
||||
let (return_code, command, message_type, body) = item.encode()?;
|
||||
let header = BucketHead::build(
|
||||
body.len() as u64,
|
||||
have_to_return_data,
|
||||
message_type.have_to_return_data(),
|
||||
command,
|
||||
flags.into(),
|
||||
message_type.into(),
|
||||
return_code,
|
||||
);
|
||||
|
||||
let bucket = Bucket { header, body };
|
||||
let bucket = Bucket {
|
||||
header,
|
||||
body: body.into(),
|
||||
};
|
||||
|
||||
self.project().bucket_sink.start_send(bucket)
|
||||
}
|
||||
|
|
|
@ -27,19 +27,20 @@ use pin_project::pin_project;
|
|||
use crate::bucket_stream::BucketStream;
|
||||
use crate::BucketError;
|
||||
use crate::LevinBody;
|
||||
use crate::MessageType;
|
||||
use crate::LEVIN_SIGNATURE;
|
||||
use crate::PROTOCOL_VERSION;
|
||||
|
||||
/// A stream that reads from the underlying `BucketStream` and uses the the
|
||||
/// methods on the `LevinBody` trait to decode the inner messages(bodies)
|
||||
#[pin_project]
|
||||
pub struct MessageStream<D: LevinBody, S: AsyncRead + std::marker::Unpin> {
|
||||
pub struct MessageStream<S, D> {
|
||||
#[pin]
|
||||
bucket_stream: BucketStream<S>,
|
||||
phantom: PhantomData<D>,
|
||||
}
|
||||
|
||||
impl<D: LevinBody, S: AsyncRead + std::marker::Unpin> MessageStream<D, S> {
|
||||
impl<D: LevinBody, S: AsyncRead + std::marker::Unpin> MessageStream<S, D> {
|
||||
/// Creates a new stream from the provided `AsyncRead`
|
||||
pub fn new(stream: S) -> Self {
|
||||
MessageStream {
|
||||
|
@ -49,7 +50,7 @@ impl<D: LevinBody, S: AsyncRead + std::marker::Unpin> MessageStream<D, S> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<D: LevinBody, S: AsyncRead + std::marker::Unpin> Stream for MessageStream<D, S> {
|
||||
impl<D: LevinBody, S: AsyncRead + std::marker::Unpin> Stream for MessageStream<S, D> {
|
||||
type Item = Result<D, BucketError>;
|
||||
|
||||
fn poll_next(
|
||||
|
@ -71,6 +72,8 @@ impl<D: LevinBody, S: AsyncRead + std::marker::Unpin> Stream for MessageStream<D
|
|||
))?;
|
||||
}
|
||||
|
||||
// TODO: we shouldn't return an error if the peer sends an error response we should define a new network
|
||||
// message: Error.
|
||||
if bucket.header.return_code < 0
|
||||
|| (bucket.header.return_code == 0 && bucket.header.flags.is_response())
|
||||
{
|
||||
|
@ -84,8 +87,10 @@ impl<D: LevinBody, S: AsyncRead + std::marker::Unpin> Stream for MessageStream<D
|
|||
|
||||
Poll::Ready(Some(D::decode_message(
|
||||
&bucket.body,
|
||||
bucket.header.flags.try_into()?,
|
||||
bucket.header.have_to_return_data,
|
||||
MessageType::from_flags_and_have_to_return(
|
||||
bucket.header.flags,
|
||||
bucket.header.have_to_return_data,
|
||||
)?,
|
||||
bucket.header.command,
|
||||
)))
|
||||
}
|
||||
|
|
|
@ -8,12 +8,14 @@ repository = "https://github.com/SyntheticBird45/cuprate/tree/main/net/monero-wi
|
|||
|
||||
|
||||
[dependencies]
|
||||
levin = {path="../levin"}
|
||||
serde = {version = "1.0", features =["derive"]}
|
||||
serde_with = "2.2.0"
|
||||
epee-serde = {git="https://github.com/Boog900/epee_serde.git"}
|
||||
monero = "0.18.2"
|
||||
levin = {path="../levin"}
|
||||
sealed = "0.4"
|
||||
epee-serde = {git="https://github.com/Cuprate/epee_serde.git"}
|
||||
#monero = {git="https://github.com/Boog900/monero-rs.git", branch="db", features=["database"]}
|
||||
byteorder = "1.4.3"
|
||||
bytes = "1"
|
||||
thiserror = "1.0.24"
|
||||
hex = "0.4.3"
|
||||
|
||||
|
|
|
@ -13,34 +13,80 @@
|
|||
// copies or substantial portions of the Software.
|
||||
//
|
||||
|
||||
macro_rules! get_field_from_map {
|
||||
($map:ident, $field_name:expr) => {
|
||||
$map.get($field_name)
|
||||
.ok_or_else(|| serde::de::Error::missing_field($field_name))?
|
||||
};
|
||||
}
|
||||
macro_rules! message {
|
||||
(
|
||||
Admin,
|
||||
Name: $name:ident,
|
||||
ID: $id:expr,
|
||||
Request: $req:ident {
|
||||
EncodingError: $req_enc_err:path,
|
||||
Encode: $req_enc:path,
|
||||
Decode: $req_dec:path,
|
||||
},
|
||||
Response: $res:ident {
|
||||
EncodingError: $res_enc_err:path,
|
||||
Encode: $res_enc:path,
|
||||
Decode: $res_dec:path,
|
||||
},
|
||||
) => {
|
||||
#[sealed::sealed]
|
||||
impl crate::messages::NetworkMessage for $req {
|
||||
type EncodingError = $req_enc_err;
|
||||
fn decode(buf: &[u8]) -> Result<Self, Self::EncodingError> {
|
||||
$req_dec(buf)
|
||||
}
|
||||
fn encode(&self) -> Result<Vec<u8>, Self::EncodingError> {
|
||||
$req_enc(self)
|
||||
}
|
||||
}
|
||||
#[sealed::sealed]
|
||||
impl crate::messages::NetworkMessage for $res {
|
||||
type EncodingError = $res_enc_err;
|
||||
fn decode(buf: &[u8]) -> Result<Self, Self::EncodingError> {
|
||||
$res_dec(buf)
|
||||
}
|
||||
fn encode(&self) -> Result<Vec<u8>, Self::EncodingError> {
|
||||
$res_enc(self)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! get_val_from_map {
|
||||
($map:ident, $field_name:expr, $get_fn:ident, $expected_ty:expr) => {
|
||||
$map.get($field_name)
|
||||
.ok_or_else(|| serde::de::Error::missing_field($field_name))?
|
||||
.$get_fn()
|
||||
.ok_or_else(|| {
|
||||
serde::de::Error::invalid_type($map.get_value_type_as_unexpected(), &$expected_ty)
|
||||
})?
|
||||
};
|
||||
}
|
||||
pub struct $name;
|
||||
|
||||
macro_rules! get_internal_val {
|
||||
($value:ident, $get_fn:ident, $expected_ty:expr) => {
|
||||
$value.$get_fn().ok_or_else(|| {
|
||||
serde::de::Error::invalid_type($value.get_value_type_as_unexpected(), &$expected_ty)
|
||||
})?
|
||||
};
|
||||
}
|
||||
#[sealed::sealed]
|
||||
impl crate::messages::AdminMessage for $name {
|
||||
const ID: u32 = $id;
|
||||
const NAME: &'static str = stringify!($name);
|
||||
|
||||
macro_rules! monero_decode_into_serde_err {
|
||||
($ty:ty, $buf:ident) => {
|
||||
monero::consensus::deserialize::<$ty>($buf).map_err(serde::de::Error::custom)?
|
||||
type Request = $req;
|
||||
type Response = $res;
|
||||
}
|
||||
};
|
||||
(
|
||||
Protocol,
|
||||
Name: $name:ident {
|
||||
EncodingError: $enc_err:path,
|
||||
Encode: $enc:path,
|
||||
Decode: $dec:path,
|
||||
},
|
||||
ID: $id:expr,
|
||||
) => {
|
||||
#[sealed::sealed]
|
||||
impl crate::messages::NetworkMessage for $name {
|
||||
type EncodingError = $enc_err;
|
||||
fn decode(buf: &[u8]) -> Result<Self, Self::EncodingError> {
|
||||
$dec(buf)
|
||||
}
|
||||
fn encode(&self) -> Result<Vec<u8>, Self::EncodingError> {
|
||||
$enc(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[sealed::sealed]
|
||||
impl crate::messages::ProtocolMessage for $name {
|
||||
const ID: u32 = $id;
|
||||
const NAME: &'static str = stringify!($name);
|
||||
|
||||
type Notification = Self;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -27,98 +27,15 @@
|
|||
#![deny(non_upper_case_globals)]
|
||||
#![deny(non_camel_case_types)]
|
||||
#![deny(unused_mut)]
|
||||
#![deny(missing_docs)]
|
||||
//#![deny(missing_docs)]
|
||||
|
||||
#[macro_use]
|
||||
mod internal_macros;
|
||||
pub mod messages;
|
||||
pub mod network_address;
|
||||
mod utils;
|
||||
|
||||
pub use messages::{Message, P2pCommand};
|
||||
pub use network_address::NetworkAddress;
|
||||
|
||||
// re-exports
|
||||
pub use levin;
|
||||
pub use levin::message_sink::MessageSink;
|
||||
pub use levin::message_stream::MessageStream;
|
||||
|
||||
use levin::BucketError;
|
||||
|
||||
/// The possible commands that can be in a levin header
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum P2pCommand {
|
||||
// 100* admin commands
|
||||
/// Handshake
|
||||
Handshake,
|
||||
/// TimedSync
|
||||
TimedSync,
|
||||
/// Ping
|
||||
Ping,
|
||||
/// SupportFlags
|
||||
SupportFlags,
|
||||
|
||||
// 200* protocol commands
|
||||
/// NewBlock
|
||||
NewBlock,
|
||||
/// NewTransactions
|
||||
NewTransactions,
|
||||
/// RequestGetObject
|
||||
RequestGetObject,
|
||||
/// ResponseGetObject
|
||||
ResponseGetObject,
|
||||
/// RequestChain
|
||||
RequestChain,
|
||||
/// ResponseChainEntry
|
||||
ResponseChainEntry,
|
||||
/// NewFluffyBlock
|
||||
NewFluffyBlock,
|
||||
/// RequestFluffyMissingTx
|
||||
RequestFluffyMissingTx,
|
||||
/// GetTxPoolComplement
|
||||
GetTxPoolComplement,
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for P2pCommand {
|
||||
type Error = BucketError;
|
||||
|
||||
fn try_from(value: u32) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
1001 => Ok(P2pCommand::Handshake),
|
||||
1002 => Ok(P2pCommand::TimedSync),
|
||||
1003 => Ok(P2pCommand::Ping),
|
||||
1007 => Ok(P2pCommand::SupportFlags),
|
||||
|
||||
2001 => Ok(P2pCommand::NewBlock),
|
||||
2002 => Ok(P2pCommand::NewTransactions),
|
||||
2003 => Ok(P2pCommand::RequestGetObject),
|
||||
2004 => Ok(P2pCommand::ResponseGetObject),
|
||||
2006 => Ok(P2pCommand::RequestChain),
|
||||
2007 => Ok(P2pCommand::ResponseChainEntry),
|
||||
2008 => Ok(P2pCommand::NewFluffyBlock),
|
||||
2009 => Ok(P2pCommand::RequestFluffyMissingTx),
|
||||
2010 => Ok(P2pCommand::GetTxPoolComplement),
|
||||
|
||||
_ => Err(BucketError::UnsupportedP2pCommand(value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<P2pCommand> for u32 {
|
||||
fn from(val: P2pCommand) -> Self {
|
||||
match val {
|
||||
P2pCommand::Handshake => 1001,
|
||||
P2pCommand::TimedSync => 1002,
|
||||
P2pCommand::Ping => 1003,
|
||||
P2pCommand::SupportFlags => 1007,
|
||||
|
||||
P2pCommand::NewBlock => 2001,
|
||||
P2pCommand::NewTransactions => 2002,
|
||||
P2pCommand::RequestGetObject => 2003,
|
||||
P2pCommand::ResponseGetObject => 2004,
|
||||
P2pCommand::RequestChain => 2006,
|
||||
P2pCommand::ResponseChainEntry => 2007,
|
||||
P2pCommand::NewFluffyBlock => 2008,
|
||||
P2pCommand::RequestFluffyMissingTx => 2009,
|
||||
P2pCommand::GetTxPoolComplement => 2010,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,319 +20,289 @@ pub mod admin;
|
|||
pub mod common;
|
||||
pub mod protocol;
|
||||
|
||||
pub use admin::{Handshake, Ping, SupportFlags, TimedSync};
|
||||
pub use common::{BasicNodeData, CoreSyncData, PeerID, PeerListEntryBase};
|
||||
pub use protocol::{
|
||||
ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
|
||||
GetObjectsResponse, GetTxPoolCompliment, NewBlock, NewFluffyBlock, NewTransactions,
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use levin::BucketError;
|
||||
use levin::MessageType;
|
||||
use levin::{BucketError, MessageType};
|
||||
|
||||
use crate::P2pCommand;
|
||||
|
||||
fn zero_val<T: From<u8>>() -> T {
|
||||
T::from(0_u8)
|
||||
#[sealed::sealed]
|
||||
pub trait NetworkMessage: Sized {
|
||||
type EncodingError: std::fmt::Debug;
|
||||
fn decode(buf: &[u8]) -> Result<Self, Self::EncodingError>;
|
||||
fn encode(&self) -> Result<Vec<u8>, Self::EncodingError>;
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
#[sealed::sealed]
|
||||
pub trait AdminMessage {
|
||||
const ID: u32;
|
||||
const NAME: &'static str;
|
||||
type Request: NetworkMessage;
|
||||
type Response: NetworkMessage;
|
||||
}
|
||||
|
||||
fn default_false() -> bool {
|
||||
false
|
||||
#[sealed::sealed]
|
||||
pub trait ProtocolMessage {
|
||||
const ID: u32;
|
||||
const NAME: &'static str;
|
||||
type Notification: NetworkMessage;
|
||||
}
|
||||
|
||||
/// A message request
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum MessageRequest {
|
||||
/// Handshake
|
||||
Handshake(admin::HandshakeRequest),
|
||||
/// TimedSync
|
||||
TimedSync(admin::TimedSyncRequest),
|
||||
/// Ping
|
||||
Ping(admin::PingRequest),
|
||||
/// SupportFlags
|
||||
SupportFlags(admin::SupportFlagsRequest),
|
||||
}
|
||||
macro_rules! p2p_command {
|
||||
($($message:ident),+) => {
|
||||
pub enum P2pCommand {
|
||||
$($message,)+
|
||||
}
|
||||
|
||||
/// A message response
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum MessageResponse {
|
||||
/// Handshake
|
||||
Handshake(admin::HandshakeResponse),
|
||||
/// TimedSync
|
||||
TimedSync(admin::TimedSyncResponse),
|
||||
/// Ping
|
||||
Ping(admin::PingResponse),
|
||||
/// SupportFlags
|
||||
SupportFlags(admin::SupportFlagsResponse),
|
||||
}
|
||||
|
||||
/// A messages notification
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum MessageNotification {
|
||||
/// NewBlock
|
||||
NewBlock(protocol::NewBlock),
|
||||
/// NewTransactions
|
||||
NewTransactions(protocol::NewTransactions),
|
||||
/// RequestGetObject
|
||||
RequestGetObject(protocol::GetObjectsRequest),
|
||||
/// ResponseGetObject
|
||||
ResponseGetObject(protocol::GetObjectsResponse),
|
||||
/// RequestChain
|
||||
RequestChain(protocol::ChainRequest),
|
||||
/// ResponseChainEntry
|
||||
ResponseChainEntry(protocol::ChainResponse),
|
||||
/// NewFluffyBlock
|
||||
NewFluffyBlock(protocol::NewFluffyBlock),
|
||||
/// RequestFluffyMissingTx
|
||||
RequestFluffyMissingTx(protocol::FluffyMissingTransactionsRequest),
|
||||
/// GetTxPoolComplement
|
||||
GetTxPoolComplement(protocol::TxPoolCompliment),
|
||||
}
|
||||
|
||||
/// A Monero Message (levin body)
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Message {
|
||||
/// Request
|
||||
Request(MessageRequest),
|
||||
/// Response
|
||||
Response(MessageResponse),
|
||||
/// Notification
|
||||
Notification(Box<MessageNotification>), // check benefits/ drawbacks of doing this, im just boxing it for now to satisfy clippy
|
||||
}
|
||||
|
||||
fn epee_encode_error_to_levin(err: epee_serde::Error) -> BucketError {
|
||||
BucketError::FailedToEncodeBucketBody(err.to_string())
|
||||
}
|
||||
|
||||
fn encode_message<T: serde::ser::Serialize>(message: &T) -> Result<Vec<u8>, BucketError> {
|
||||
epee_serde::to_bytes(message).map_err(epee_encode_error_to_levin)
|
||||
}
|
||||
|
||||
fn epee_decode_error_to_levin(err: epee_serde::Error) -> BucketError {
|
||||
BucketError::FailedToDecodeBucketBody(err.to_string())
|
||||
}
|
||||
|
||||
fn decode_message<T: serde::de::DeserializeOwned>(buf: &[u8]) -> Result<T, BucketError> {
|
||||
epee_serde::from_bytes(buf).map_err(epee_decode_error_to_levin)
|
||||
}
|
||||
|
||||
impl levin::LevinBody for Message {
|
||||
fn decode_message(
|
||||
buf: &[u8],
|
||||
typ: MessageType,
|
||||
have_to_return: bool,
|
||||
command: u32,
|
||||
) -> Result<Self, BucketError> {
|
||||
let command = P2pCommand::try_from(command)?;
|
||||
|
||||
Ok(match typ {
|
||||
MessageType::Response => Message::Response(match command {
|
||||
P2pCommand::Handshake => MessageResponse::Handshake(decode_message(buf)?),
|
||||
P2pCommand::TimedSync => MessageResponse::TimedSync(decode_message(buf)?),
|
||||
P2pCommand::Ping => MessageResponse::Ping(decode_message(buf)?),
|
||||
P2pCommand::SupportFlags => MessageResponse::SupportFlags(decode_message(buf)?),
|
||||
_ => {
|
||||
return Err(levin::BucketError::FailedToDecodeBucketBody(
|
||||
"Invalid header flag/command/have_to_return combination".to_string(),
|
||||
))
|
||||
}
|
||||
}),
|
||||
|
||||
MessageType::Request if have_to_return => Message::Request(match command {
|
||||
P2pCommand::Handshake => MessageRequest::Handshake(decode_message(buf)?),
|
||||
P2pCommand::TimedSync => MessageRequest::TimedSync(decode_message(buf)?),
|
||||
P2pCommand::Ping => MessageRequest::Ping(admin::PingRequest),
|
||||
P2pCommand::SupportFlags => {
|
||||
MessageRequest::SupportFlags(admin::SupportFlagsRequest)
|
||||
}
|
||||
_ => {
|
||||
return Err(levin::BucketError::FailedToDecodeBucketBody(
|
||||
"Invalid header flag/command/have_to_return combination".to_string(),
|
||||
))
|
||||
}
|
||||
}),
|
||||
|
||||
MessageType::Request if !have_to_return => {
|
||||
Message::Notification(Box::new(match command {
|
||||
P2pCommand::NewBlock => MessageNotification::NewBlock(decode_message(buf)?),
|
||||
P2pCommand::NewTransactions => {
|
||||
MessageNotification::NewTransactions(decode_message(buf)?)
|
||||
}
|
||||
P2pCommand::RequestGetObject => {
|
||||
MessageNotification::RequestGetObject(decode_message(buf)?)
|
||||
}
|
||||
P2pCommand::ResponseGetObject => {
|
||||
MessageNotification::ResponseGetObject(decode_message(buf)?)
|
||||
}
|
||||
P2pCommand::RequestChain => {
|
||||
MessageNotification::RequestChain(decode_message(buf)?)
|
||||
}
|
||||
P2pCommand::ResponseChainEntry => {
|
||||
MessageNotification::ResponseChainEntry(decode_message(buf)?)
|
||||
}
|
||||
P2pCommand::NewFluffyBlock => {
|
||||
MessageNotification::NewFluffyBlock(decode_message(buf)?)
|
||||
}
|
||||
P2pCommand::RequestFluffyMissingTx => {
|
||||
MessageNotification::RequestFluffyMissingTx(decode_message(buf)?)
|
||||
}
|
||||
P2pCommand::GetTxPoolComplement => {
|
||||
MessageNotification::GetTxPoolComplement(decode_message(buf)?)
|
||||
}
|
||||
_ => {
|
||||
return Err(levin::BucketError::FailedToDecodeBucketBody(
|
||||
"Invalid header flag/command/have_to_return combination".to_string(),
|
||||
))
|
||||
}
|
||||
}))
|
||||
}
|
||||
_ => unreachable!("All typs are handleded"),
|
||||
})
|
||||
}
|
||||
|
||||
fn encode(&self) -> Result<(i32, u32, bool, MessageType, Bytes), BucketError> {
|
||||
let return_code;
|
||||
let command;
|
||||
let have_to_return_data;
|
||||
let flag;
|
||||
let bytes;
|
||||
|
||||
match self {
|
||||
Message::Request(req) => {
|
||||
return_code = 0;
|
||||
have_to_return_data = true;
|
||||
flag = MessageType::Request;
|
||||
match req {
|
||||
MessageRequest::Handshake(handshake) => {
|
||||
command = P2pCommand::Handshake;
|
||||
bytes = encode_message(handshake)?;
|
||||
}
|
||||
MessageRequest::TimedSync(timedsync) => {
|
||||
command = P2pCommand::TimedSync;
|
||||
bytes = encode_message(timedsync)?;
|
||||
}
|
||||
MessageRequest::Ping(_) => {
|
||||
command = P2pCommand::Ping;
|
||||
bytes = Vec::new();
|
||||
}
|
||||
MessageRequest::SupportFlags(_) => {
|
||||
command = P2pCommand::SupportFlags;
|
||||
bytes = Vec::new();
|
||||
}
|
||||
}
|
||||
}
|
||||
Message::Response(res) => {
|
||||
return_code = 1;
|
||||
have_to_return_data = false;
|
||||
flag = MessageType::Response;
|
||||
match res {
|
||||
MessageResponse::Handshake(handshake) => {
|
||||
command = P2pCommand::Handshake;
|
||||
bytes = encode_message(handshake)?;
|
||||
}
|
||||
MessageResponse::TimedSync(timed_sync) => {
|
||||
command = P2pCommand::TimedSync;
|
||||
bytes = encode_message(timed_sync)?;
|
||||
}
|
||||
MessageResponse::Ping(ping) => {
|
||||
command = P2pCommand::Ping;
|
||||
bytes = encode_message(ping)?;
|
||||
}
|
||||
MessageResponse::SupportFlags(support_flags) => {
|
||||
command = P2pCommand::SupportFlags;
|
||||
bytes = encode_message(support_flags)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Message::Notification(noti) => {
|
||||
return_code = 0;
|
||||
have_to_return_data = false;
|
||||
flag = MessageType::Response;
|
||||
match noti.as_ref() {
|
||||
MessageNotification::NewBlock(new_block) => {
|
||||
command = P2pCommand::NewBlock;
|
||||
bytes = encode_message(new_block)?;
|
||||
}
|
||||
MessageNotification::NewTransactions(new_txs) => {
|
||||
command = P2pCommand::NewTransactions;
|
||||
bytes = encode_message(new_txs)?;
|
||||
}
|
||||
MessageNotification::RequestGetObject(obj) => {
|
||||
command = P2pCommand::RequestGetObject;
|
||||
bytes = encode_message(obj)?;
|
||||
}
|
||||
MessageNotification::ResponseGetObject(obj) => {
|
||||
command = P2pCommand::ResponseGetObject;
|
||||
bytes = encode_message(obj)?;
|
||||
}
|
||||
MessageNotification::RequestChain(chain) => {
|
||||
command = P2pCommand::RequestChain;
|
||||
bytes = encode_message(chain)?;
|
||||
}
|
||||
MessageNotification::ResponseChainEntry(chain_entry) => {
|
||||
command = P2pCommand::ResponseChainEntry;
|
||||
bytes = encode_message(chain_entry)?;
|
||||
}
|
||||
MessageNotification::NewFluffyBlock(fluffy_block) => {
|
||||
command = P2pCommand::NewFluffyBlock;
|
||||
bytes = encode_message(fluffy_block)?;
|
||||
}
|
||||
MessageNotification::RequestFluffyMissingTx(txs) => {
|
||||
command = P2pCommand::RequestFluffyMissingTx;
|
||||
bytes = encode_message(txs)?;
|
||||
}
|
||||
MessageNotification::GetTxPoolComplement(txpool) => {
|
||||
command = P2pCommand::GetTxPoolComplement;
|
||||
bytes = encode_message(txpool)?;
|
||||
}
|
||||
pub struct P2pCommandFromU32Err;
|
||||
impl TryFrom<u32> for P2pCommand {
|
||||
type Error = P2pCommandFromU32Err;
|
||||
fn try_from(value: u32) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
$($message::ID => Ok(P2pCommand::$message),)+
|
||||
_ => Err(P2pCommandFromU32Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Ok((
|
||||
return_code,
|
||||
command.into(),
|
||||
have_to_return_data,
|
||||
flag,
|
||||
bytes.into(),
|
||||
));
|
||||
}
|
||||
impl From<P2pCommand> for u32 {
|
||||
fn from(val: P2pCommand) -> Self {
|
||||
match val {
|
||||
$(P2pCommand::$message => $message::ID,)+
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Message;
|
||||
use levin::{LevinBody, MessageType};
|
||||
macro_rules! levin_body {
|
||||
(
|
||||
Admin:
|
||||
$($admin_mes:ident),+
|
||||
Protocol:
|
||||
$($protocol_mes:ident),+
|
||||
) => {
|
||||
|
||||
#[test]
|
||||
fn decode_handshake_request() {
|
||||
let buf = [
|
||||
1, 17, 1, 1, 1, 1, 2, 1, 1, 12, 9, 110, 111, 100, 101, 95, 100, 97, 116, 97, 12, 24, 7,
|
||||
109, 121, 95, 112, 111, 114, 116, 6, 168, 70, 0, 0, 10, 110, 101, 116, 119, 111, 114,
|
||||
107, 95, 105, 100, 10, 64, 18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161,
|
||||
161, 16, 7, 112, 101, 101, 114, 95, 105, 100, 5, 153, 5, 227, 61, 188, 214, 159, 10,
|
||||
13, 115, 117, 112, 112, 111, 114, 116, 95, 102, 108, 97, 103, 115, 6, 1, 0, 0, 0, 8,
|
||||
114, 112, 99, 95, 112, 111, 114, 116, 7, 0, 0, 20, 114, 112, 99, 95, 99, 114, 101, 100,
|
||||
105, 116, 115, 95, 112, 101, 114, 95, 104, 97, 115, 104, 6, 0, 0, 0, 0, 12, 112, 97,
|
||||
121, 108, 111, 97, 100, 95, 100, 97, 116, 97, 12, 24, 21, 99, 117, 109, 117, 108, 97,
|
||||
116, 105, 118, 101, 95, 100, 105, 102, 102, 105, 99, 117, 108, 116, 121, 5, 59, 90,
|
||||
163, 153, 0, 0, 0, 0, 27, 99, 117, 109, 117, 108, 97, 116, 105, 118, 101, 95, 100, 105,
|
||||
102, 102, 105, 99, 117, 108, 116, 121, 95, 116, 111, 112, 54, 52, 5, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 14, 99, 117, 114, 114, 101, 110, 116, 95, 104, 101, 105, 103, 104, 116, 5, 190,
|
||||
50, 0, 0, 0, 0, 0, 0, 12, 112, 114, 117, 110, 105, 110, 103, 95, 115, 101, 101, 100, 6,
|
||||
0, 0, 0, 0, 6, 116, 111, 112, 95, 105, 100, 10, 128, 230, 40, 186, 45, 79, 79, 224,
|
||||
164, 117, 133, 84, 130, 185, 94, 4, 1, 57, 126, 74, 145, 238, 238, 122, 44, 214, 85,
|
||||
129, 237, 230, 14, 67, 218, 11, 116, 111, 112, 95, 118, 101, 114, 115, 105, 111, 110,
|
||||
8, 1, 18, 108, 111, 99, 97, 108, 95, 112, 101, 101, 114, 108, 105, 115, 116, 95, 110,
|
||||
101, 119, 140, 4, 24, 3, 97, 100, 114, 12, 8, 4, 116, 121, 112, 101, 8, 1, 4, 97, 100,
|
||||
100, 114, 12, 8, 4, 109, 95, 105, 112, 6, 225, 219, 21, 0, 6, 109, 95, 112, 111, 114,
|
||||
116, 7, 0, 0, 2, 105, 100, 5, 0, 0, 0, 0, 0, 0, 0, 0, 9, 108, 97, 115, 116, 95, 115,
|
||||
101, 101, 110, 1, 0, 0, 0, 0, 0, 0, 0, 0, 12, 112, 114, 117, 110, 105, 110, 103, 95,
|
||||
115, 101, 101, 100, 6, 0, 0, 0, 0, 8, 114, 112, 99, 95, 112, 111, 114, 116, 7, 0, 0,
|
||||
20, 114, 112, 99, 95, 99, 114, 101, 100, 105, 116, 115, 95, 112, 101, 114, 95, 104, 97,
|
||||
115, 104, 6, 0, 0, 0, 0,
|
||||
];
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MessageRequest {
|
||||
$($admin_mes(<$admin_mes as AdminMessage>::Request),)+
|
||||
}
|
||||
|
||||
let message = Message::decode_message(&buf, MessageType::Request, true, 1001);
|
||||
println!("{:?}", message);
|
||||
}
|
||||
$(
|
||||
impl From<<$admin_mes as AdminMessage>::Request> for MessageRequest {
|
||||
fn from(value: <$admin_mes as AdminMessage>::Request) -> MessageRequest {
|
||||
MessageRequest::$admin_mes(value)
|
||||
}
|
||||
}
|
||||
)+
|
||||
|
||||
impl MessageRequest {
|
||||
pub fn id(&self) -> u32 {
|
||||
match self {
|
||||
$(MessageRequest::$admin_mes(_) => $admin_mes::ID,)+
|
||||
}
|
||||
}
|
||||
pub fn decode(buf: &[u8], command: u32) -> Result<Self, BucketError> {
|
||||
match command {
|
||||
$($admin_mes::ID => Ok(
|
||||
MessageRequest::$admin_mes(<$admin_mes as AdminMessage>::Request::decode(buf)
|
||||
.map_err(|e| BucketError::FailedToDecodeBucketBody(e.to_string()))?)),)+
|
||||
_ => Err(BucketError::UnsupportedP2pCommand(command))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode(&self) -> Result<(u32, Vec<u8>), BucketError> {
|
||||
match self {
|
||||
$(MessageRequest::$admin_mes(mes) => Ok(($admin_mes::ID, mes.encode()
|
||||
.map_err(|e| BucketError::FailedToEncodeBucketBody(e.to_string()))?)),)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MessageResponse {
|
||||
$($admin_mes(<$admin_mes as AdminMessage>::Response),)+
|
||||
}
|
||||
|
||||
$(
|
||||
impl From<<$admin_mes as AdminMessage>::Response> for MessageResponse {
|
||||
fn from(value: <$admin_mes as AdminMessage>::Response) -> MessageResponse {
|
||||
MessageResponse::$admin_mes(value)
|
||||
}
|
||||
}
|
||||
)+
|
||||
|
||||
impl MessageResponse {
|
||||
pub fn id(&self) -> u32 {
|
||||
match self {
|
||||
$(MessageResponse::$admin_mes(_) => $admin_mes::ID,)+
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(buf: &[u8], command: u32) -> Result<Self, BucketError> {
|
||||
match command {
|
||||
$($admin_mes::ID => Ok(
|
||||
MessageResponse::$admin_mes(<$admin_mes as AdminMessage>::Response::decode(buf)
|
||||
.map_err(|e| BucketError::FailedToDecodeBucketBody(e.to_string()))?)),)+
|
||||
_ => Err(BucketError::UnsupportedP2pCommand(command))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode(&self) -> Result<(u32, Vec<u8>), BucketError> {
|
||||
match self {
|
||||
$(MessageResponse::$admin_mes(mes) => Ok(($admin_mes::ID, mes.encode()
|
||||
.map_err(|e| BucketError::FailedToEncodeBucketBody(e.to_string()))?)),)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MessageNotification {
|
||||
$($protocol_mes(<$protocol_mes as ProtocolMessage>::Notification),)+
|
||||
}
|
||||
|
||||
$(
|
||||
impl From<<$protocol_mes as ProtocolMessage>::Notification> for MessageNotification {
|
||||
fn from(value: <$protocol_mes as ProtocolMessage>::Notification) -> MessageNotification {
|
||||
MessageNotification::$protocol_mes(value)
|
||||
}
|
||||
}
|
||||
)+
|
||||
|
||||
|
||||
impl MessageNotification {
|
||||
pub fn id(&self) -> u32 {
|
||||
match self {
|
||||
$(MessageNotification::$protocol_mes(_) => $protocol_mes::ID,)+
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn decode(buf: &[u8], command: u32) -> Result<Self, BucketError> {
|
||||
match command {
|
||||
$($protocol_mes::ID => Ok(
|
||||
MessageNotification::$protocol_mes(<$protocol_mes as ProtocolMessage>::Notification::decode(buf)
|
||||
.map_err(|e| BucketError::FailedToDecodeBucketBody(e.to_string()))?)),)+
|
||||
_ => Err(BucketError::UnsupportedP2pCommand(command))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode(&self) -> Result<(u32, Vec<u8>), BucketError> {
|
||||
match self {
|
||||
$(MessageNotification::$protocol_mes(mes) => Ok(($protocol_mes::ID, mes.encode()
|
||||
.map_err(|e| BucketError::FailedToEncodeBucketBody(e.to_string()))?)),)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Message {
|
||||
Request(MessageRequest),
|
||||
Response(MessageResponse),
|
||||
Notification(MessageNotification)
|
||||
}
|
||||
|
||||
impl From<MessageResponse> for Message {
|
||||
fn from(value: MessageResponse) -> Message {
|
||||
Message::Response(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MessageRequest> for Message {
|
||||
fn from(value: MessageRequest) -> Message {
|
||||
Message::Request(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MessageNotification> for Message {
|
||||
fn from(value: MessageNotification) -> Message {
|
||||
Message::Notification(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Message {
|
||||
pub fn id(&self) -> u32 {
|
||||
match self {
|
||||
Message::Request(req) => req.id(),
|
||||
Message::Response(res) => res.id(),
|
||||
Message::Notification(noti) => noti.id(),
|
||||
}
|
||||
}
|
||||
pub fn is_request(&self) -> bool {
|
||||
matches!(self, Self::Request(_))
|
||||
}
|
||||
pub fn is_response(&self) -> bool {
|
||||
matches!(self, Self::Response(_))
|
||||
}
|
||||
pub fn is_notification(&self) -> bool {
|
||||
matches!(self, Self::Notification(_))
|
||||
}
|
||||
}
|
||||
|
||||
impl levin::LevinBody for Message {
|
||||
fn decode_message(buf: &[u8], typ: MessageType, command: u32) -> Result<Self, BucketError> {
|
||||
Ok(match typ {
|
||||
MessageType::Response => Message::Response(MessageResponse::decode(buf, command)?),
|
||||
MessageType::Request => Message::Request(MessageRequest::decode(buf, command)?),
|
||||
MessageType::Notification => Message::Notification(MessageNotification::decode(buf, command)?),
|
||||
})
|
||||
}
|
||||
|
||||
fn encode(&self) -> Result<(i32, u32, MessageType, Vec<u8>), BucketError> {
|
||||
match self {
|
||||
Message::Response(mes) => {
|
||||
let (command, bytes)= mes.encode()?;
|
||||
Ok((1, command, MessageType::Response, bytes))
|
||||
},
|
||||
Message::Request(mes) => {
|
||||
let (command, bytes)= mes.encode()?;
|
||||
Ok((0, command, MessageType::Request, bytes))
|
||||
},
|
||||
Message::Notification(mes) => {
|
||||
let (command, bytes)= mes.encode()?;
|
||||
Ok((0, command, MessageType::Notification, bytes))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
p2p_command!(
|
||||
Handshake,
|
||||
TimedSync,
|
||||
Ping,
|
||||
SupportFlags,
|
||||
NewBlock,
|
||||
NewTransactions,
|
||||
GetObjectsRequest,
|
||||
GetObjectsResponse,
|
||||
ChainRequest,
|
||||
ChainResponse,
|
||||
NewFluffyBlock,
|
||||
FluffyMissingTransactionsRequest,
|
||||
GetTxPoolCompliment
|
||||
);
|
||||
|
||||
levin_body!(
|
||||
Admin:
|
||||
Handshake,
|
||||
TimedSync,
|
||||
Ping,
|
||||
SupportFlags
|
||||
Protocol:
|
||||
NewBlock,
|
||||
NewTransactions,
|
||||
GetObjectsRequest,
|
||||
GetObjectsResponse,
|
||||
ChainRequest,
|
||||
ChainResponse,
|
||||
NewFluffyBlock,
|
||||
FluffyMissingTransactionsRequest,
|
||||
GetTxPoolCompliment
|
||||
);
|
||||
|
|
|
@ -18,13 +18,34 @@
|
|||
//! Admin message requests must be responded to in order unlike
|
||||
//! protocol messages.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{
|
||||
common::{BasicNodeData, CoreSyncData, PeerListEntryBase},
|
||||
common::{BasicNodeData, CoreSyncData, PeerListEntryBase, PeerSupportFlags},
|
||||
PeerID,
|
||||
};
|
||||
|
||||
const P2P_ADMIN_BASE: u32 = 1000;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SillyEncodingError;
|
||||
|
||||
impl Display for SillyEncodingError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str("Literally impossible to get this error")
|
||||
}
|
||||
}
|
||||
|
||||
fn silly_encode<T>(_: &T) -> Result<Vec<u8>, SillyEncodingError> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
fn silly_decode<T: Default>(_: &[u8]) -> Result<T, SillyEncodingError> {
|
||||
Ok(T::default())
|
||||
}
|
||||
|
||||
/// A Handshake Request
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct HandshakeRequest {
|
||||
|
@ -34,6 +55,10 @@ pub struct HandshakeRequest {
|
|||
pub payload_data: CoreSyncData,
|
||||
}
|
||||
|
||||
fn empty_vec<T>() -> Vec<T> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
/// A Handshake Response
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct HandshakeResponse {
|
||||
|
@ -42,9 +67,26 @@ pub struct HandshakeResponse {
|
|||
/// Core Sync Data
|
||||
pub payload_data: CoreSyncData,
|
||||
/// PeerList
|
||||
#[serde(default = "empty_vec")]
|
||||
pub local_peerlist_new: Vec<PeerListEntryBase>,
|
||||
}
|
||||
|
||||
message!(
|
||||
Admin,
|
||||
Name: Handshake,
|
||||
ID: P2P_ADMIN_BASE + 1,
|
||||
Request: HandshakeRequest {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
Response: HandshakeResponse {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
);
|
||||
|
||||
/// A TimedSync Request
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct TimedSyncRequest {
|
||||
|
@ -61,11 +103,27 @@ pub struct TimedSyncResponse {
|
|||
pub local_peerlist_new: Vec<PeerListEntryBase>,
|
||||
}
|
||||
|
||||
message!(
|
||||
Admin,
|
||||
Name: TimedSync,
|
||||
ID: P2P_ADMIN_BASE + 2,
|
||||
Request: TimedSyncRequest {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
Response: TimedSyncResponse {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
);
|
||||
|
||||
/// The status field of an okay ping response
|
||||
pub const PING_OK_RESPONSE_STATUS_TEXT: &str = "OK";
|
||||
|
||||
/// A Ping Request
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct PingRequest;
|
||||
|
||||
/// A Ping Response
|
||||
|
@ -77,25 +135,54 @@ pub struct PingResponse {
|
|||
pub peer_id: PeerID,
|
||||
}
|
||||
|
||||
message!(
|
||||
Admin,
|
||||
Name: Ping,
|
||||
ID: P2P_ADMIN_BASE + 3,
|
||||
Request: PingRequest {
|
||||
EncodingError: SillyEncodingError,
|
||||
Encode: silly_encode,
|
||||
Decode: silly_decode,
|
||||
},
|
||||
Response: PingResponse {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
);
|
||||
|
||||
/// A Support Flags Request
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct SupportFlagsRequest;
|
||||
|
||||
/// A Support Flags Response
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct SupportFlagsResponse {
|
||||
/// Support Flags
|
||||
pub support_flags: u32,
|
||||
pub support_flags: PeerSupportFlags,
|
||||
}
|
||||
|
||||
message!(
|
||||
Admin,
|
||||
Name: SupportFlags,
|
||||
ID: P2P_ADMIN_BASE + 7,
|
||||
Request: SupportFlagsRequest {
|
||||
EncodingError: SillyEncodingError,
|
||||
Encode: silly_encode,
|
||||
Decode: silly_decode,
|
||||
},
|
||||
Response: SupportFlagsResponse {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
|
||||
use monero::Hash;
|
||||
|
||||
use super::{BasicNodeData, CoreSyncData, HandshakeRequest, HandshakeResponse};
|
||||
use crate::messages::common::PeerID;
|
||||
use crate::messages::common::{PeerID, PeerSupportFlags};
|
||||
|
||||
#[test]
|
||||
fn serde_handshake_req() {
|
||||
|
@ -120,7 +207,7 @@ mod tests {
|
|||
18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161, 161, 16,
|
||||
],
|
||||
peer_id: PeerID(9671405426614699871),
|
||||
support_flags: 1,
|
||||
support_flags: PeerSupportFlags::from(1_u32),
|
||||
rpc_port: 0,
|
||||
rpc_credits_per_hash: 0,
|
||||
};
|
||||
|
@ -130,9 +217,11 @@ mod tests {
|
|||
cumulative_difficulty_top64: 0,
|
||||
current_height: 0,
|
||||
pruning_seed: 0,
|
||||
top_id: Hash::from_str(
|
||||
top_id: hex::decode(
|
||||
"0x418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3",
|
||||
)
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
top_version: 1,
|
||||
};
|
||||
|
@ -925,7 +1014,7 @@ mod tests {
|
|||
18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161, 161, 16,
|
||||
],
|
||||
peer_id: PeerID(6037804360359455404),
|
||||
support_flags: 1,
|
||||
support_flags: PeerSupportFlags::from(1_u32),
|
||||
rpc_port: 18089,
|
||||
rpc_credits_per_hash: 0,
|
||||
};
|
||||
|
@ -935,9 +1024,11 @@ mod tests {
|
|||
cumulative_difficulty_top64: 0,
|
||||
current_height: 2775167,
|
||||
pruning_seed: 386,
|
||||
top_id: Hash::from_str(
|
||||
top_id: hex::decode(
|
||||
"0x40780072dae9123108599a9f6585f2474d03f7b6dbb5d8c18717baa8cf7756eb",
|
||||
)
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
top_version: 16,
|
||||
};
|
||||
|
|
|
@ -17,19 +17,52 @@
|
|||
//
|
||||
|
||||
use epee_serde::Value;
|
||||
use monero::{Block, Hash, Transaction};
|
||||
use serde::de;
|
||||
use serde::ser::SerializeStruct;
|
||||
use serde::ser::{SerializeSeq, SerializeStruct};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::serde_as;
|
||||
use serde_with::TryFromInto;
|
||||
|
||||
use super::zero_val;
|
||||
use crate::utils;
|
||||
use crate::NetworkAddress;
|
||||
|
||||
/// A PeerID, different from a `NetworkAddress`
|
||||
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[serde(transparent)]
|
||||
pub struct PeerSupportFlags(u32); // had to name it this to avoid conflict
|
||||
|
||||
impl PeerSupportFlags {
|
||||
const FLUFFY_BLOCKS: u32 = 0b0000_0001;
|
||||
/// checks if `self` has all the flags that `other` has
|
||||
pub fn contains(&self, other: &PeerSupportFlags) -> bool {
|
||||
self.0 & other.0 == other.0
|
||||
}
|
||||
pub fn supports_fluffy_blocks(&self) -> bool {
|
||||
self.0 & Self::FLUFFY_BLOCKS == Self::FLUFFY_BLOCKS
|
||||
}
|
||||
pub fn get_support_flag_fluffy_blocks() -> Self {
|
||||
PeerSupportFlags(Self::FLUFFY_BLOCKS)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0 == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u8> for PeerSupportFlags {
|
||||
fn from(value: u8) -> Self {
|
||||
PeerSupportFlags(value as u32)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for PeerSupportFlags {
|
||||
fn from(value: u32) -> Self {
|
||||
PeerSupportFlags(value)
|
||||
}
|
||||
}
|
||||
|
||||
/// A PeerID, different from a `NetworkAddress`
|
||||
#[derive(Debug, Clone, Default, Copy, Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[serde(transparent)]
|
||||
pub struct PeerID(pub u64);
|
||||
|
||||
/// Basic Node Data, information on the connected peer
|
||||
|
@ -43,15 +76,15 @@ pub struct BasicNodeData {
|
|||
pub peer_id: PeerID,
|
||||
/// The Peers Support Flags
|
||||
/// (If this is not in the message the default is 0)
|
||||
#[serde(default = "zero_val")]
|
||||
pub support_flags: u32,
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub support_flags: PeerSupportFlags,
|
||||
/// RPC Port
|
||||
/// (If this is not in the message the default is 0)
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub rpc_port: u16,
|
||||
/// RPC Credits Per Hash
|
||||
/// (If this is not in the message the default is 0)
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub rpc_credits_per_hash: u32,
|
||||
}
|
||||
|
||||
|
@ -64,23 +97,41 @@ pub struct CoreSyncData {
|
|||
pub cumulative_difficulty: u64,
|
||||
/// Cumulative Difficulty High
|
||||
/// The upper 64 bits of the 128 bit cumulative difficulty
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub cumulative_difficulty_top64: u64,
|
||||
/// Current Height of the peer
|
||||
pub current_height: u64,
|
||||
/// Pruning Seed of the peer
|
||||
/// (If this is not in the message the default is 0)
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub pruning_seed: u32,
|
||||
/// Hash of the top block
|
||||
#[serde_as(as = "TryFromInto<[u8; 32]>")]
|
||||
pub top_id: Hash,
|
||||
pub top_id: [u8; 32],
|
||||
/// Version of the top block
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub top_version: u8,
|
||||
}
|
||||
|
||||
impl CoreSyncData {
|
||||
pub fn new(
|
||||
cumulative_difficulty_128: u128,
|
||||
current_height: u64,
|
||||
pruning_seed: u32,
|
||||
top_id: [u8; 32],
|
||||
top_version: u8,
|
||||
) -> CoreSyncData {
|
||||
let cumulative_difficulty = cumulative_difficulty_128 as u64;
|
||||
let cumulative_difficulty_top64 = (cumulative_difficulty_128 >> 64) as u64;
|
||||
CoreSyncData {
|
||||
cumulative_difficulty,
|
||||
cumulative_difficulty_top64,
|
||||
current_height,
|
||||
pruning_seed,
|
||||
top_id,
|
||||
top_version,
|
||||
}
|
||||
}
|
||||
/// Returns the 128 bit cumulative difficulty of the peers blockchain
|
||||
pub fn cumulative_difficulty(&self) -> u128 {
|
||||
let mut ret: u128 = self.cumulative_difficulty_top64 as u128;
|
||||
|
@ -91,76 +142,150 @@ impl CoreSyncData {
|
|||
|
||||
/// PeerListEntryBase, information kept on a peer which will be entered
|
||||
/// in a peer list/store.
|
||||
#[derive(Clone, Copy, Deserialize, Serialize, Debug, Eq, PartialEq)]
|
||||
#[derive(Clone, Copy, Default, Deserialize, Serialize, Debug, Eq, PartialEq)]
|
||||
pub struct PeerListEntryBase {
|
||||
/// The Peer Address
|
||||
pub adr: NetworkAddress,
|
||||
/// The Peer ID
|
||||
pub id: PeerID,
|
||||
/// The last Time The Peer Was Seen
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub last_seen: i64,
|
||||
/// The Pruning Seed
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub pruning_seed: u32,
|
||||
/// The RPC port
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub rpc_port: u16,
|
||||
/// The RPC credits per hash
|
||||
#[serde(default = "zero_val")]
|
||||
#[serde(default = "utils::zero_val")]
|
||||
pub rpc_credits_per_hash: u32,
|
||||
}
|
||||
|
||||
impl std::hash::Hash for PeerListEntryBase {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
// We only hash the adr so we can look this up in a HashSet.
|
||||
self.adr.hash(state)
|
||||
}
|
||||
}
|
||||
|
||||
/// A pruned tx with the hash of the missing prunable data
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct TxBlobEntry {
|
||||
pub struct PrunedTxBlobEntry {
|
||||
/// The Tx
|
||||
pub tx: Transaction, // ########### use pruned transaction when PR is merged ##############
|
||||
pub tx: Vec<u8>,
|
||||
/// The Prunable Tx Hash
|
||||
pub prunable_hash: Hash,
|
||||
pub prunable_hash: [u8; 32],
|
||||
}
|
||||
|
||||
impl TxBlobEntry {
|
||||
fn from_epee_value<E: de::Error>(value: &Value) -> Result<Self, E> {
|
||||
let tx_blob = get_val_from_map!(value, "blob", get_bytes, "Vec<u8>");
|
||||
impl PrunedTxBlobEntry {
|
||||
fn from_epee_value<E: de::Error>(mut value: Value) -> Result<Self, E> {
|
||||
let tx = utils::get_internal_val_from_map(&mut value, "blob", Value::get_bytes, "Vec<u8>")?;
|
||||
|
||||
let tx = monero_decode_into_serde_err!(Transaction, tx_blob);
|
||||
let prunable_hash = utils::get_internal_val_from_map(
|
||||
&mut value,
|
||||
"prunable_hash",
|
||||
Value::get_bytes,
|
||||
"Vec<u8>",
|
||||
)?;
|
||||
let prunable_hash_len = prunable_hash.len();
|
||||
|
||||
let prunable_hash_blob = get_val_from_map!(value, "prunable_hash", get_bytes, "Vec<u8>");
|
||||
|
||||
let prunable_hash = Hash::from_slice(prunable_hash_blob);
|
||||
|
||||
Ok(Self { tx, prunable_hash })
|
||||
Ok(PrunedTxBlobEntry {
|
||||
tx,
|
||||
prunable_hash: prunable_hash
|
||||
.try_into()
|
||||
.map_err(|_| E::invalid_length(prunable_hash_len, &"a 16-byte array"))?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for TxBlobEntry {
|
||||
impl Serialize for PrunedTxBlobEntry {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("", 2)?;
|
||||
let tx_blob = monero::consensus::serialize(&self.tx);
|
||||
state.serialize_field("blob", &tx_blob)?;
|
||||
let prunable_hash = self.prunable_hash.as_bytes();
|
||||
state.serialize_field("prunable_hash", prunable_hash)?;
|
||||
state.serialize_field("blob", &self.tx)?;
|
||||
state.serialize_field("prunable_hash", &self.prunable_hash)?;
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum TransactionBlobs {
|
||||
Pruned(Vec<PrunedTxBlobEntry>),
|
||||
Normal(Vec<Vec<u8>>),
|
||||
}
|
||||
|
||||
impl TransactionBlobs {
|
||||
pub fn len(&self) -> usize {
|
||||
match self {
|
||||
TransactionBlobs::Normal(txs) => txs.len(),
|
||||
TransactionBlobs::Pruned(txs) => txs.len(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
fn from_epee_value<E: de::Error>(value: Value, pruned: bool) -> Result<Self, E> {
|
||||
let txs = utils::get_internal_val(value, Value::get_seq, "A sequence")?;
|
||||
if pruned {
|
||||
let mut decoded_txs = Vec::with_capacity(txs.len());
|
||||
for tx in txs {
|
||||
decoded_txs.push(PrunedTxBlobEntry::from_epee_value(tx)?);
|
||||
}
|
||||
Ok(TransactionBlobs::Pruned(decoded_txs))
|
||||
} else {
|
||||
let mut decoded_txs = Vec::with_capacity(txs.len());
|
||||
for tx in txs {
|
||||
decoded_txs.push(utils::get_internal_val(tx, Value::get_bytes, "Vec<u8>")?);
|
||||
}
|
||||
Ok(TransactionBlobs::Normal(decoded_txs))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for TransactionBlobs {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
TransactionBlobs::Pruned(txs) => {
|
||||
let mut seq = serializer.serialize_seq(Some(txs.len()))?;
|
||||
|
||||
for tx in txs {
|
||||
seq.serialize_element(tx)?;
|
||||
}
|
||||
|
||||
seq.end()
|
||||
}
|
||||
TransactionBlobs::Normal(txs) => {
|
||||
let mut seq = serializer.serialize_seq(Some(txs.len()))?;
|
||||
|
||||
for tx in txs {
|
||||
seq.serialize_element(tx)?;
|
||||
}
|
||||
|
||||
seq.end()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A Block that can contain transactions
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct BlockCompleteEntry {
|
||||
/// True if tx data is pruned
|
||||
pub pruned: bool,
|
||||
/// The Block
|
||||
pub block: Block,
|
||||
pub block: Vec<u8>,
|
||||
/// The Block Weight/Size
|
||||
pub block_weight: u64,
|
||||
/// If the Block is pruned the txs will be here
|
||||
pub txs_pruned: Vec<TxBlobEntry>,
|
||||
/// If the Block is not pruned the txs will be here
|
||||
pub txs: Vec<Transaction>,
|
||||
/// The blocks txs
|
||||
pub txs: TransactionBlobs,
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for BlockCompleteEntry {
|
||||
|
@ -168,46 +293,38 @@ impl<'de> Deserialize<'de> for BlockCompleteEntry {
|
|||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let value = Value::deserialize(deserializer)?;
|
||||
let mut value = Value::deserialize(deserializer)?;
|
||||
let mut pruned = false;
|
||||
if let Some(val) = value.get("pruned") {
|
||||
pruned = *get_internal_val!(val, get_bool, "bool");
|
||||
if let Some(val) = value.get_and_remove("pruned") {
|
||||
pruned = utils::get_internal_val(val, Value::get_bool, "bool")?;
|
||||
}
|
||||
|
||||
let block_bytes = get_val_from_map!(value, "block", get_bytes, "Vec<u8>");
|
||||
|
||||
let block = monero_decode_into_serde_err!(Block, block_bytes);
|
||||
let block =
|
||||
utils::get_internal_val_from_map(&mut value, "block", Value::get_bytes, "Vec<u8>")?;
|
||||
|
||||
let mut block_weight = 0;
|
||||
|
||||
let mut txs_pruned = vec![];
|
||||
let mut txs = vec![];
|
||||
let txs_value = value.get_and_remove("txs");
|
||||
|
||||
let mut txs = TransactionBlobs::Normal(vec![]);
|
||||
|
||||
if let Some(txs_value) = txs_value {
|
||||
txs = TransactionBlobs::from_epee_value(txs_value, true)?;
|
||||
}
|
||||
|
||||
if pruned {
|
||||
block_weight = *get_val_from_map!(value, "block_weight", get_u64, "u64");
|
||||
|
||||
if let Some(v) = value.get("txs") {
|
||||
let v = get_internal_val!(v, get_seq, "a sequence");
|
||||
|
||||
txs_pruned.reserve(v.len());
|
||||
for val in v {
|
||||
txs_pruned.push(TxBlobEntry::from_epee_value(val)?);
|
||||
}
|
||||
}
|
||||
} else if let Some(v) = value.get("txs") {
|
||||
let v = get_internal_val!(v, get_seq, "a sequence");
|
||||
|
||||
txs.reserve(v.len());
|
||||
for val in v {
|
||||
let tx_buf = get_internal_val!(val, get_bytes, "Vec<u8>");
|
||||
txs.push(monero_decode_into_serde_err!(Transaction, tx_buf));
|
||||
}
|
||||
block_weight = utils::get_internal_val_from_map(
|
||||
&mut value,
|
||||
"block_weight",
|
||||
Value::get_u64,
|
||||
"u64",
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(BlockCompleteEntry {
|
||||
pruned,
|
||||
block,
|
||||
block_weight,
|
||||
txs_pruned,
|
||||
txs,
|
||||
})
|
||||
}
|
||||
|
@ -219,7 +336,7 @@ impl Serialize for BlockCompleteEntry {
|
|||
S: serde::Serializer,
|
||||
{
|
||||
let mut len = 1;
|
||||
if !self.txs.is_empty() || !self.txs_pruned.is_empty() {
|
||||
if !self.txs.is_empty() {
|
||||
len += 1;
|
||||
}
|
||||
if self.pruned {
|
||||
|
@ -230,24 +347,31 @@ impl Serialize for BlockCompleteEntry {
|
|||
|
||||
let mut state = serializer.serialize_struct("", len)?;
|
||||
|
||||
let block = monero::consensus::serialize(&self.block);
|
||||
state.serialize_field("block", &block)?;
|
||||
state.serialize_field("block", &self.block)?;
|
||||
|
||||
if self.pruned {
|
||||
state.serialize_field("pruned", &true)?;
|
||||
state.serialize_field("block_weight", &self.block_weight)?;
|
||||
}
|
||||
|
||||
if !self.txs_pruned.is_empty() {
|
||||
state.serialize_field("txs", &self.txs_pruned)?;
|
||||
}
|
||||
} else if !self.txs.is_empty() {
|
||||
let mut tx_blobs = vec![];
|
||||
for tx in self.txs.iter() {
|
||||
tx_blobs.push(monero::consensus::serialize(tx));
|
||||
}
|
||||
state.serialize_field("txs", &tx_blobs)?;
|
||||
if !self.txs.is_empty() {
|
||||
state.serialize_field("txs", &self.txs)?;
|
||||
}
|
||||
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::CoreSyncData;
|
||||
|
||||
#[test]
|
||||
fn core_sync_cumulative_difficulty() {
|
||||
let core_sync = CoreSyncData::new(u128::MAX, 80085, 200, [0; 32], 21);
|
||||
assert_eq!(core_sync.cumulative_difficulty(), u128::MAX);
|
||||
let core_sync = CoreSyncData::new(21, 80085, 200, [0; 32], 21);
|
||||
assert_eq!(core_sync.cumulative_difficulty(), 21);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,16 +18,15 @@
|
|||
//! Protocol message requests don't have to be responded to in order unlike
|
||||
//! admin messages.
|
||||
|
||||
use monero::Hash;
|
||||
use monero::Transaction;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_with::serde_as;
|
||||
use serde_with::Bytes;
|
||||
use serde_with::TryFromInto;
|
||||
|
||||
use super::common::BlockCompleteEntry;
|
||||
use super::{default_false, default_true};
|
||||
use crate::utils::{default_false, default_true};
|
||||
|
||||
const P2P_PROTOCOL_BASE: u32 = 2000;
|
||||
|
||||
/// A block that SHOULD have transactions
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
|
@ -38,14 +37,15 @@ pub struct NewBlock {
|
|||
pub current_blockchain_height: u64,
|
||||
}
|
||||
|
||||
/// A Block that doesn't have transactions unless requested
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct NewFluffyBlock {
|
||||
/// Block which might have transactions
|
||||
pub b: BlockCompleteEntry,
|
||||
/// The Block height
|
||||
pub current_blockchain_height: u64,
|
||||
}
|
||||
message!(
|
||||
Protocol,
|
||||
Name: NewBlock {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 1,
|
||||
);
|
||||
|
||||
/// A Tx Pool transaction blob
|
||||
#[serde_as]
|
||||
|
@ -53,13 +53,6 @@ pub struct NewFluffyBlock {
|
|||
#[serde(transparent)]
|
||||
pub struct TxBlob(#[serde_as(as = "Bytes")] pub Vec<u8>);
|
||||
|
||||
impl TxBlob {
|
||||
/// Deserialize the transaction
|
||||
pub fn deserialize(&self) -> Result<Transaction, monero::consensus::encode::Error> {
|
||||
monero::consensus::deserialize(&self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// New Tx Pool Transactions
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
|
@ -75,18 +68,37 @@ pub struct NewTransactions {
|
|||
pub padding: Vec<u8>,
|
||||
}
|
||||
|
||||
message!(
|
||||
Protocol,
|
||||
Name: NewTransactions {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 2,
|
||||
);
|
||||
|
||||
/// A Request For Blocks
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct GetObjectsRequest {
|
||||
/// Blocks
|
||||
#[serde_as(as = "Vec<TryFromInto<[u8; 32]>>")]
|
||||
pub blocks: Vec<Hash>,
|
||||
pub blocks: Vec<[u8; 32]>,
|
||||
/// Pruned
|
||||
#[serde(default = "default_false")]
|
||||
pub pruned: bool,
|
||||
}
|
||||
|
||||
message!(
|
||||
Protocol,
|
||||
Name: GetObjectsRequest {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 3,
|
||||
);
|
||||
|
||||
/// A Blocks Response
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
|
@ -94,24 +106,42 @@ pub struct GetObjectsResponse {
|
|||
/// Blocks
|
||||
pub blocks: Vec<BlockCompleteEntry>,
|
||||
/// Missed IDs
|
||||
#[serde_as(as = "Vec<TryFromInto<[u8; 32]>>")]
|
||||
pub missed_ids: Vec<Hash>,
|
||||
pub missed_ids: Vec<[u8; 32]>,
|
||||
/// The height of the peers blockchain
|
||||
pub current_blockchain_height: u64,
|
||||
}
|
||||
|
||||
message!(
|
||||
Protocol,
|
||||
Name: GetObjectsResponse {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 4,
|
||||
);
|
||||
|
||||
/// A Chain Request
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct ChainRequest {
|
||||
/// Block IDs
|
||||
#[serde_as(as = "Vec<TryFromInto<[u8; 32]>>")]
|
||||
pub block_ids: Vec<Hash>,
|
||||
pub block_ids: Vec<[u8; 32]>,
|
||||
/// Prune
|
||||
#[serde(default = "default_false")]
|
||||
pub prune: bool,
|
||||
}
|
||||
|
||||
message!(
|
||||
Protocol,
|
||||
Name: ChainRequest {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 6,
|
||||
);
|
||||
|
||||
/// A Chain Response
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
|
@ -125,42 +155,113 @@ pub struct ChainResponse {
|
|||
/// Cumulative Difficulty High
|
||||
pub cumulative_difficulty_high: u64,
|
||||
/// Block IDs
|
||||
#[serde_as(as = "Vec<TryFromInto<[u8; 32]>>")]
|
||||
pub m_block_ids: Vec<Hash>,
|
||||
pub m_block_ids: Vec<[u8; 32]>,
|
||||
/// Block Weights
|
||||
pub m_block_weights: Vec<u64>,
|
||||
/// The first Block in the blockchain
|
||||
/// The first Block in the response
|
||||
#[serde_as(as = "Bytes")]
|
||||
pub first_block: Vec<u8>,
|
||||
}
|
||||
|
||||
impl ChainResponse {
|
||||
pub fn new(
|
||||
start_height: u64,
|
||||
total_height: u64,
|
||||
cumulative_difficulty_128: u128,
|
||||
m_block_ids: Vec<[u8; 32]>,
|
||||
m_block_weights: Vec<u64>,
|
||||
first_block: Vec<u8>,
|
||||
) -> Self {
|
||||
let cumulative_difficulty_low = cumulative_difficulty_128 as u64;
|
||||
let cumulative_difficulty_high = (cumulative_difficulty_128 >> 64) as u64;
|
||||
Self {
|
||||
start_height,
|
||||
total_height,
|
||||
cumulative_difficulty_low,
|
||||
cumulative_difficulty_high,
|
||||
m_block_ids,
|
||||
m_block_weights,
|
||||
first_block,
|
||||
}
|
||||
}
|
||||
pub fn cumulative_difficulty(&self) -> u128 {
|
||||
let mut ret: u128 = self.cumulative_difficulty_high as u128;
|
||||
ret <<= 64;
|
||||
ret | self.cumulative_difficulty_low as u128
|
||||
}
|
||||
}
|
||||
|
||||
message!(
|
||||
Protocol,
|
||||
Name: ChainResponse {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 7,
|
||||
);
|
||||
|
||||
/// A Block that doesn't have transactions unless requested
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct NewFluffyBlock {
|
||||
/// Block which might have transactions
|
||||
pub b: BlockCompleteEntry,
|
||||
/// The Block height
|
||||
pub current_blockchain_height: u64,
|
||||
}
|
||||
|
||||
message!(
|
||||
Protocol,
|
||||
Name: NewFluffyBlock {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 8,
|
||||
);
|
||||
|
||||
/// A request for Txs we are missing from our TxPool
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct FluffyMissingTransactionsRequest {
|
||||
/// The Block we are missing the Txs in
|
||||
#[serde_as(as = "TryFromInto<[u8; 32]>")]
|
||||
pub block_hash: Hash,
|
||||
pub block_hash: [u8; 32],
|
||||
/// The current blockchain height
|
||||
pub current_blockchain_height: u64,
|
||||
/// The Tx Indices
|
||||
pub missing_tx_indices: Vec<u64>,
|
||||
}
|
||||
|
||||
message!(
|
||||
Protocol,
|
||||
Name: FluffyMissingTransactionsRequest {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 9,
|
||||
);
|
||||
|
||||
/// TxPoolCompliment
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct TxPoolCompliment {
|
||||
pub struct GetTxPoolCompliment {
|
||||
/// Tx Hashes
|
||||
#[serde_as(as = "Vec<TryFromInto<[u8; 32]>>")]
|
||||
pub hashes: Vec<Hash>,
|
||||
pub hashes: Vec<[u8; 32]>,
|
||||
}
|
||||
|
||||
message!(
|
||||
Protocol,
|
||||
Name: GetTxPoolCompliment {
|
||||
EncodingError: epee_serde::Error,
|
||||
Encode: epee_serde::to_bytes,
|
||||
Decode: epee_serde::from_bytes,
|
||||
},
|
||||
ID: P2P_PROTOCOL_BASE + 10,
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
|
||||
use monero::Hash;
|
||||
|
||||
use super::{NewFluffyBlock, NewTransactions};
|
||||
|
||||
|
@ -675,14 +776,16 @@ mod tests {
|
|||
248, 248, 91, 110, 107, 144, 12, 175, 253, 21, 121, 28,
|
||||
];
|
||||
|
||||
let now = std::time::Instant::now();
|
||||
for _ in 0..1000 {
|
||||
let _new_transactions: NewTransactions = epee_serde::from_bytes(bytes).unwrap();
|
||||
}
|
||||
println!("in: {}ms", now.elapsed().as_millis());
|
||||
|
||||
let new_transactions: NewTransactions = epee_serde::from_bytes(bytes).unwrap();
|
||||
|
||||
assert_eq!(4, new_transactions.txs.len());
|
||||
|
||||
for transaction in new_transactions.txs.iter() {
|
||||
transaction.deserialize().unwrap();
|
||||
}
|
||||
|
||||
let encoded_bytes = epee_serde::to_bytes(&new_transactions).unwrap();
|
||||
let new_transactions_2: NewTransactions = epee_serde::from_bytes(encoded_bytes).unwrap();
|
||||
|
||||
|
@ -1031,11 +1134,6 @@ mod tests {
|
|||
103, 104, 116, 5, 209, 45, 42, 0, 0, 0, 0, 0,
|
||||
];
|
||||
let fluffy_block: NewFluffyBlock = epee_serde::from_bytes(bytes).unwrap();
|
||||
let hash =
|
||||
Hash::from_str("0x0bb7f7cfc8fcf55d3da64093a9ef7e9efb57e14249ef6a392b407aeecb1cd844")
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(hash, fluffy_block.b.block.id());
|
||||
|
||||
let encoded_bytes = epee_serde::to_bytes(&fluffy_block).unwrap();
|
||||
let fluffy_block_2: NewFluffyBlock = epee_serde::from_bytes(encoded_bytes).unwrap();
|
||||
|
|
|
@ -22,8 +22,17 @@ use std::{hash::Hash, net};
|
|||
use epee_serde::Value;
|
||||
use serde::{de, ser::SerializeStruct, Deserialize, Serialize};
|
||||
|
||||
use super::utils;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum NetZone {
|
||||
Public,
|
||||
Tor,
|
||||
I2p,
|
||||
}
|
||||
|
||||
/// An IPv4 address with a port
|
||||
#[derive(Clone, Copy, Serialize, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(Clone, Copy, Serialize, Debug, Default, PartialEq, Eq, Hash)]
|
||||
pub struct IPv4Address {
|
||||
/// IP address
|
||||
pub m_ip: u32,
|
||||
|
@ -41,20 +50,20 @@ impl From<net::SocketAddrV4> for IPv4Address {
|
|||
}
|
||||
|
||||
impl IPv4Address {
|
||||
fn from_value<E: de::Error>(value: &Value) -> Result<Self, E> {
|
||||
let m_ip = get_val_from_map!(value, "m_ip", get_u32, "u32");
|
||||
fn from_value<E: de::Error>(mut value: Value) -> Result<Self, E> {
|
||||
let m_ip = utils::get_internal_val_from_map(&mut value, "m_ip", Value::get_u32, "u32")?;
|
||||
|
||||
let m_port = get_val_from_map!(value, "m_port", get_u16, "u16");
|
||||
let m_port = utils::get_internal_val_from_map(&mut value, "m_port", Value::get_u16, "u16")?;
|
||||
|
||||
Ok(IPv4Address {
|
||||
m_ip: *m_ip,
|
||||
m_port: *m_port,
|
||||
m_ip: m_ip,
|
||||
m_port: m_port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// An IPv6 address with a port
|
||||
#[derive(Clone, Copy, Serialize, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(Clone, Copy, Serialize, Debug, Default, PartialEq, Eq, Hash)]
|
||||
pub struct IPv6Address {
|
||||
/// Address
|
||||
pub addr: [u8; 16],
|
||||
|
@ -72,17 +81,18 @@ impl From<net::SocketAddrV6> for IPv6Address {
|
|||
}
|
||||
|
||||
impl IPv6Address {
|
||||
fn from_value<E: de::Error>(value: &Value) -> Result<Self, E> {
|
||||
let addr = get_val_from_map!(value, "addr", get_bytes, "Vec<u8>");
|
||||
fn from_value<E: de::Error>(mut value: Value) -> Result<Self, E> {
|
||||
let addr =
|
||||
utils::get_internal_val_from_map(&mut value, "addr", Value::get_bytes, "Vec<u8>")?;
|
||||
let addr_len = addr.len();
|
||||
|
||||
let m_port = get_val_from_map!(value, "m_port", get_u16, "u16");
|
||||
let m_port = utils::get_internal_val_from_map(&mut value, "m_port", Value::get_u16, "u16")?;
|
||||
|
||||
Ok(IPv6Address {
|
||||
addr: addr
|
||||
.clone()
|
||||
.try_into()
|
||||
.map_err(|_| E::invalid_length(addr.len(), &"a 16-byte array"))?,
|
||||
m_port: *m_port,
|
||||
.map_err(|_| E::invalid_length(addr_len, &"a 16-byte array"))?,
|
||||
m_port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -97,6 +107,37 @@ pub enum NetworkAddress {
|
|||
IPv6(IPv6Address),
|
||||
}
|
||||
|
||||
impl NetworkAddress {
|
||||
pub fn get_zone(&self) -> NetZone {
|
||||
match self {
|
||||
NetworkAddress::IPv4(_) | NetworkAddress::IPv6(_) => NetZone::Public,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_loopback(&self) -> bool {
|
||||
// TODO
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_local(&self) -> bool {
|
||||
// TODO
|
||||
false
|
||||
}
|
||||
|
||||
pub fn port(&self) -> u16 {
|
||||
match self {
|
||||
NetworkAddress::IPv4(ip) => ip.m_port,
|
||||
NetworkAddress::IPv6(ip) => ip.m_port,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NetworkAddress {
|
||||
fn default() -> Self {
|
||||
Self::IPv4(IPv4Address::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<net::SocketAddrV4> for NetworkAddress {
|
||||
fn from(value: net::SocketAddrV4) -> Self {
|
||||
NetworkAddress::IPv4(value.into())
|
||||
|
@ -123,12 +164,16 @@ impl<'de> Deserialize<'de> for NetworkAddress {
|
|||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let value = Value::deserialize(deserializer)?;
|
||||
let addr_type = get_val_from_map!(value, "type", get_u8, "u8");
|
||||
let mut value = Value::deserialize(deserializer)?;
|
||||
let addr_type = utils::get_internal_val_from_map(&mut value, "type", Value::get_u8, "u8")?;
|
||||
|
||||
Ok(match addr_type {
|
||||
1 => NetworkAddress::IPv4(IPv4Address::from_value(get_field_from_map!(value, "addr"))?),
|
||||
2 => NetworkAddress::IPv6(IPv6Address::from_value(get_field_from_map!(value, "addr"))?),
|
||||
1 => NetworkAddress::IPv4(IPv4Address::from_value(utils::get_field_from_map(
|
||||
&mut value, "addr",
|
||||
)?)?),
|
||||
2 => NetworkAddress::IPv6(IPv6Address::from_value(utils::get_field_from_map(
|
||||
&mut value, "addr",
|
||||
)?)?),
|
||||
_ => {
|
||||
return Err(de::Error::custom(
|
||||
"Network address type currently unsupported",
|
||||
|
|
45
net/monero-wire/src/utils.rs
Normal file
45
net/monero-wire/src/utils.rs
Normal file
|
@ -0,0 +1,45 @@
|
|||
use epee_serde::Value;
|
||||
|
||||
pub(crate) fn zero_val<T: From<u8>>() -> T {
|
||||
T::from(0_u8)
|
||||
}
|
||||
|
||||
pub(crate) fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_false() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn get_field_from_map<E: serde::de::Error>(
|
||||
value: &mut Value,
|
||||
field_name: &'static str,
|
||||
) -> Result<Value, E> {
|
||||
value
|
||||
.get_and_remove(field_name)
|
||||
.ok_or(serde::de::Error::missing_field(field_name))
|
||||
}
|
||||
|
||||
pub(crate) fn get_internal_val<E, F, T>(value: Value, get_fn: F, expected_ty: &str) -> Result<T, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
F: Fn(Value) -> Option<T>,
|
||||
{
|
||||
let err = serde::de::Error::invalid_type(value.get_value_type_as_unexpected(), &expected_ty);
|
||||
get_fn(value).ok_or(err)
|
||||
}
|
||||
|
||||
pub(crate) fn get_internal_val_from_map<E, F, T>(
|
||||
value: &mut Value,
|
||||
field_name: &'static str,
|
||||
get_fn: F,
|
||||
expected_ty: &str,
|
||||
) -> Result<T, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
F: Fn(Value) -> Option<T>,
|
||||
{
|
||||
let val = get_field_from_map(value, field_name)?;
|
||||
get_internal_val(val, get_fn, expected_ty)
|
||||
}
|
19
p2p/Cargo.toml
Normal file
19
p2p/Cargo.toml
Normal file
|
@ -0,0 +1,19 @@
|
|||
[package]
|
||||
name = "cuprate-peer"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-only"
|
||||
authors = ["Boog900"]
|
||||
|
||||
|
||||
[dependencies]
|
||||
chrono = "0.4.24"
|
||||
thiserror = "1.0.39"
|
||||
cuprate-common = {path = "../common"}
|
||||
monero-wire = {path= "../net/monero-wire"}
|
||||
futures = "0.3.26"
|
||||
tower = {version = "0.4.13", features = ["util", "steer"]}
|
||||
tokio = {version= "1.27", features=["rt", "time"]}
|
||||
async-trait = "0.1.68"
|
||||
tracing = "0.1.37"
|
||||
rand = "0.8.5"
|
14
p2p/LICENSE
Normal file
14
p2p/LICENSE
Normal file
|
@ -0,0 +1,14 @@
|
|||
Copyright (C) 2023 Cuprate Contributors
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
120
p2p/src/address_book.rs
Normal file
120
p2p/src/address_book.rs
Normal file
|
@ -0,0 +1,120 @@
|
|||
mod addr_book_client;
|
||||
pub(crate) mod address_book;
|
||||
|
||||
pub use addr_book_client::start_address_book;
|
||||
|
||||
use monero_wire::{messages::PeerListEntryBase, network_address::NetZone, NetworkAddress};
|
||||
|
||||
const MAX_WHITE_LIST_PEERS: usize = 1000;
|
||||
const MAX_GRAY_LIST_PEERS: usize = 5000;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AddressBookError {
|
||||
#[error("Peer was not found in book")]
|
||||
PeerNotFound,
|
||||
#[error("The peer list is empty")]
|
||||
PeerListEmpty,
|
||||
#[error("Peer sent an address out of it's net-zone")]
|
||||
PeerSentAnAddressOutOfZone,
|
||||
#[error("The address books channel has closed.")]
|
||||
AddressBooksChannelClosed,
|
||||
#[error("Peer Store Error: {0}")]
|
||||
PeerStoreError(&'static str),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AddressBookRequest {
|
||||
HandleNewPeerList(Vec<PeerListEntryBase>, NetZone),
|
||||
SetPeerSeen(NetworkAddress, i64),
|
||||
BanPeer(NetworkAddress, chrono::NaiveDateTime),
|
||||
AddPeerToAnchor(NetworkAddress),
|
||||
RemovePeerFromAnchor(NetworkAddress),
|
||||
UpdatePeerInfo(PeerListEntryBase),
|
||||
|
||||
GetRandomGrayPeer(NetZone),
|
||||
GetRandomWhitePeer(NetZone),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for AddressBookRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::HandleNewPeerList(_, _) => f.write_str("HandleNewPeerList"),
|
||||
Self::SetPeerSeen(_, _) => f.write_str("SetPeerSeen"),
|
||||
Self::BanPeer(_, _) => f.write_str("BanPeer"),
|
||||
Self::AddPeerToAnchor(_) => f.write_str("AddPeerToAnchor"),
|
||||
Self::RemovePeerFromAnchor(_) => f.write_str("RemovePeerFromAnchor"),
|
||||
Self::UpdatePeerInfo(_) => f.write_str("UpdatePeerInfo"),
|
||||
Self::GetRandomGrayPeer(_) => f.write_str("GetRandomGrayPeer"),
|
||||
Self::GetRandomWhitePeer(_) => f.write_str("GetRandomWhitePeer"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AddressBookRequest {
|
||||
pub fn get_zone(&self) -> NetZone {
|
||||
match self {
|
||||
Self::HandleNewPeerList(_, zone) => *zone,
|
||||
Self::SetPeerSeen(peer, _) => peer.get_zone(),
|
||||
Self::BanPeer(peer, _) => peer.get_zone(),
|
||||
Self::AddPeerToAnchor(peer) => peer.get_zone(),
|
||||
Self::RemovePeerFromAnchor(peer) => peer.get_zone(),
|
||||
Self::UpdatePeerInfo(peer) => peer.adr.get_zone(),
|
||||
|
||||
Self::GetRandomGrayPeer(zone) => *zone,
|
||||
Self::GetRandomWhitePeer(zone) => *zone,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AddressBookResponse {
|
||||
Ok,
|
||||
Peer(PeerListEntryBase),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AddressBookConfig {
|
||||
max_white_peers: usize,
|
||||
max_gray_peers: usize,
|
||||
}
|
||||
|
||||
impl Default for AddressBookConfig {
|
||||
fn default() -> Self {
|
||||
AddressBookConfig {
|
||||
max_white_peers: MAX_WHITE_LIST_PEERS,
|
||||
max_gray_peers: MAX_GRAY_LIST_PEERS,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait AddressBookStore: Clone {
|
||||
type Error: Into<AddressBookError>;
|
||||
/// Loads the peers from the peer store.
|
||||
/// returns (in order):
|
||||
/// the white list,
|
||||
/// the gray list,
|
||||
/// the anchor list,
|
||||
/// the ban list
|
||||
async fn load_peers(
|
||||
&mut self,
|
||||
zone: NetZone,
|
||||
) -> Result<
|
||||
(
|
||||
Vec<PeerListEntryBase>, // white list
|
||||
Vec<PeerListEntryBase>, // gray list
|
||||
Vec<NetworkAddress>, // anchor list
|
||||
Vec<(NetworkAddress, chrono::NaiveDateTime)>, // ban list
|
||||
),
|
||||
Self::Error,
|
||||
>;
|
||||
|
||||
async fn save_peers(
|
||||
&mut self,
|
||||
zone: NetZone,
|
||||
white: Vec<PeerListEntryBase>,
|
||||
gray: Vec<PeerListEntryBase>,
|
||||
anchor: Vec<NetworkAddress>,
|
||||
bans: Vec<(NetworkAddress, chrono::NaiveDateTime)>, // ban lists
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
121
p2p/src/address_book/addr_book_client.rs
Normal file
121
p2p/src/address_book/addr_book_client.rs
Normal file
|
@ -0,0 +1,121 @@
|
|||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
use futures::channel::{mpsc, oneshot};
|
||||
use futures::FutureExt;
|
||||
use tokio::task::spawn;
|
||||
use tower::steer::Steer;
|
||||
|
||||
use monero_wire::network_address::NetZone;
|
||||
|
||||
use super::address_book::{AddressBook, AddressBookClientRequest};
|
||||
use super::{
|
||||
AddressBookConfig, AddressBookError, AddressBookRequest, AddressBookResponse, AddressBookStore,
|
||||
};
|
||||
|
||||
pub async fn start_address_book<S>(
|
||||
peer_store: S,
|
||||
config: AddressBookConfig,
|
||||
) -> Result<
|
||||
impl tower::Service<
|
||||
AddressBookRequest,
|
||||
Response = AddressBookResponse,
|
||||
Error = AddressBookError,
|
||||
Future = Pin<
|
||||
Box<
|
||||
dyn Future<Output = Result<AddressBookResponse, AddressBookError>>
|
||||
+ Send
|
||||
+ 'static,
|
||||
>,
|
||||
>,
|
||||
> + Clone,
|
||||
AddressBookError,
|
||||
>
|
||||
where
|
||||
S: AddressBookStore,
|
||||
{
|
||||
let mut builder = AddressBookBuilder::new(peer_store, config);
|
||||
|
||||
let public = builder.build(NetZone::Public).await?;
|
||||
let tor = builder.build(NetZone::Tor).await?;
|
||||
let i2p = builder.build(NetZone::I2p).await?;
|
||||
|
||||
let books = vec![public, tor, i2p];
|
||||
|
||||
Ok(Steer::new(
|
||||
books,
|
||||
|req: &AddressBookRequest, _: &[_]| match req.get_zone() {
|
||||
NetZone::Public => 0,
|
||||
NetZone::Tor => 1,
|
||||
NetZone::I2p => 2,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
pub struct AddressBookBuilder<S> {
|
||||
peer_store: S,
|
||||
config: AddressBookConfig,
|
||||
}
|
||||
|
||||
impl<S> AddressBookBuilder<S>
|
||||
where
|
||||
S: AddressBookStore,
|
||||
{
|
||||
fn new(peer_store: S, config: AddressBookConfig) -> Self {
|
||||
AddressBookBuilder { peer_store, config }
|
||||
}
|
||||
|
||||
async fn build(&mut self, zone: NetZone) -> Result<AddressBookClient, AddressBookError> {
|
||||
let (white, gray, anchor, bans) =
|
||||
self.peer_store.load_peers(zone).await.map_err(Into::into)?;
|
||||
|
||||
let book = AddressBook::new(self.config.clone(), zone, white, gray, anchor, bans);
|
||||
|
||||
let (tx, rx) = mpsc::channel(5);
|
||||
|
||||
spawn(book.run(rx));
|
||||
|
||||
Ok(AddressBookClient { book: tx })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct AddressBookClient {
|
||||
book: mpsc::Sender<AddressBookClientRequest>,
|
||||
}
|
||||
|
||||
impl tower::Service<AddressBookRequest> for AddressBookClient {
|
||||
type Error = AddressBookError;
|
||||
type Response = AddressBookResponse;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
self.book
|
||||
.poll_ready(cx)
|
||||
.map_err(|_| AddressBookError::AddressBooksChannelClosed)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: AddressBookRequest) -> Self::Future {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
// get the callers span
|
||||
let span = tracing::span::Span::current();
|
||||
|
||||
let req = AddressBookClientRequest { req, tx, span };
|
||||
|
||||
match self.book.try_send(req) {
|
||||
Err(_e) => {
|
||||
// I'm assuming all callers will call `poll_ready` first (which they are supposed to)
|
||||
futures::future::ready(Err(AddressBookError::AddressBooksChannelClosed)).boxed()
|
||||
}
|
||||
Ok(()) => async move {
|
||||
rx.await
|
||||
.expect("Address Book will not drop requests until completed")
|
||||
}
|
||||
.boxed(),
|
||||
}
|
||||
}
|
||||
}
|
274
p2p/src/address_book/address_book.rs
Normal file
274
p2p/src/address_book/address_book.rs
Normal file
|
@ -0,0 +1,274 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
StreamExt,
|
||||
};
|
||||
use rand::{Rng, SeedableRng};
|
||||
use std::time::Duration;
|
||||
|
||||
use cuprate_common::PruningSeed;
|
||||
use monero_wire::{messages::PeerListEntryBase, network_address::NetZone, NetworkAddress};
|
||||
|
||||
use super::{AddressBookConfig, AddressBookError, AddressBookRequest, AddressBookResponse};
|
||||
|
||||
mod peer_list;
|
||||
use peer_list::PeerList;
|
||||
|
||||
pub(crate) struct AddressBookClientRequest {
|
||||
pub req: AddressBookRequest,
|
||||
pub tx: oneshot::Sender<Result<AddressBookResponse, AddressBookError>>,
|
||||
|
||||
pub span: tracing::Span,
|
||||
}
|
||||
|
||||
pub struct AddressBook {
|
||||
zone: NetZone,
|
||||
config: AddressBookConfig,
|
||||
white_list: PeerList,
|
||||
gray_list: PeerList,
|
||||
anchor_list: HashSet<NetworkAddress>,
|
||||
|
||||
baned_peers: HashMap<NetworkAddress, chrono::NaiveDateTime>,
|
||||
|
||||
rng: rand::rngs::StdRng,
|
||||
//banned_subnets:,
|
||||
}
|
||||
|
||||
impl AddressBook {
|
||||
pub fn new(
|
||||
config: AddressBookConfig,
|
||||
zone: NetZone,
|
||||
white_peers: Vec<PeerListEntryBase>,
|
||||
gray_peers: Vec<PeerListEntryBase>,
|
||||
anchor_peers: Vec<NetworkAddress>,
|
||||
baned_peers: Vec<(NetworkAddress, chrono::NaiveDateTime)>,
|
||||
) -> AddressBook {
|
||||
let rng = rand::prelude::StdRng::from_entropy();
|
||||
let white_list = PeerList::new(white_peers);
|
||||
let gray_list = PeerList::new(gray_peers);
|
||||
let anchor_list = HashSet::from_iter(anchor_peers);
|
||||
let baned_peers = HashMap::from_iter(baned_peers);
|
||||
|
||||
let mut book = AddressBook {
|
||||
zone,
|
||||
config,
|
||||
white_list,
|
||||
gray_list,
|
||||
anchor_list,
|
||||
baned_peers,
|
||||
rng,
|
||||
};
|
||||
|
||||
book.check_unban_peers();
|
||||
|
||||
book
|
||||
}
|
||||
|
||||
pub const fn book_name(&self) -> &'static str {
|
||||
match self.zone {
|
||||
NetZone::Public => "PublicAddressBook",
|
||||
NetZone::Tor => "TorAddressBook",
|
||||
NetZone::I2p => "I2pAddressBook",
|
||||
}
|
||||
}
|
||||
|
||||
fn len_white_list(&self) -> usize {
|
||||
self.white_list.len()
|
||||
}
|
||||
|
||||
fn len_gray_list(&self) -> usize {
|
||||
self.gray_list.len()
|
||||
}
|
||||
|
||||
fn max_white_peers(&self) -> usize {
|
||||
self.config.max_white_peers
|
||||
}
|
||||
|
||||
fn max_gray_peers(&self) -> usize {
|
||||
self.config.max_gray_peers
|
||||
}
|
||||
|
||||
fn is_peer_banned(&self, peer: &NetworkAddress) -> bool {
|
||||
self.baned_peers.contains_key(peer)
|
||||
}
|
||||
|
||||
fn check_unban_peers(&mut self) {
|
||||
let mut now = chrono::Utc::now().naive_utc();
|
||||
self.baned_peers.retain(|_, time| time > &mut now)
|
||||
}
|
||||
|
||||
fn ban_peer(&mut self, peer: NetworkAddress, till: chrono::NaiveDateTime) {
|
||||
let now = chrono::Utc::now().naive_utc();
|
||||
if now > till {
|
||||
return;
|
||||
}
|
||||
|
||||
tracing::debug!("Banning peer: {peer:?} until: {till}");
|
||||
|
||||
self.baned_peers.insert(peer, till);
|
||||
}
|
||||
|
||||
fn add_peer_to_anchor(&mut self, peer: NetworkAddress) -> Result<(), AddressBookError> {
|
||||
tracing::debug!("Adding peer: {peer:?} to anchor list");
|
||||
// is peer in gray list
|
||||
if let Some(peer_eb) = self.gray_list.remove_peer(&peer) {
|
||||
self.white_list.add_new_peer(peer_eb);
|
||||
self.anchor_list.insert(peer);
|
||||
Ok(())
|
||||
} else {
|
||||
if !self.white_list.contains_peer(&peer) {
|
||||
return Err(AddressBookError::PeerNotFound);
|
||||
}
|
||||
self.anchor_list.insert(peer);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_peer_from_anchor(&mut self, peer: NetworkAddress) {
|
||||
let _ = self.anchor_list.remove(&peer);
|
||||
}
|
||||
|
||||
fn set_peer_seen(
|
||||
&mut self,
|
||||
peer: NetworkAddress,
|
||||
last_seen: i64,
|
||||
) -> Result<(), AddressBookError> {
|
||||
if let Some(mut peer) = self.gray_list.remove_peer(&peer) {
|
||||
peer.last_seen = last_seen;
|
||||
self.white_list.add_new_peer(peer);
|
||||
} else {
|
||||
let peer = self
|
||||
.white_list
|
||||
.get_peer_mut(&peer)
|
||||
.ok_or(AddressBookError::PeerNotFound)?;
|
||||
peer.last_seen = last_seen;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_peer_to_gray_list(&mut self, mut peer: PeerListEntryBase) {
|
||||
if self.white_list.contains_peer(&peer.adr) {
|
||||
return;
|
||||
};
|
||||
if !self.gray_list.contains_peer(&peer.adr) {
|
||||
peer.last_seen = 0;
|
||||
self.gray_list.add_new_peer(peer);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_new_peerlist(
|
||||
&mut self,
|
||||
mut peers: Vec<PeerListEntryBase>,
|
||||
) -> Result<(), AddressBookError> {
|
||||
let length = peers.len();
|
||||
|
||||
tracing::debug!("Received new peer list, length: {length}");
|
||||
|
||||
let mut err = None;
|
||||
peers.retain(|peer| {
|
||||
if err.is_some() {
|
||||
false
|
||||
} else if peer.adr.is_local() || peer.adr.is_loopback() {
|
||||
false
|
||||
} else if peer.adr.port() == peer.rpc_port {
|
||||
false
|
||||
} else if PruningSeed::try_from(peer.pruning_seed).is_err() {
|
||||
false
|
||||
} else if peer.adr.get_zone() != self.zone {
|
||||
tracing::info!("Received an address from a different network zone, ignoring list.");
|
||||
err = Some(AddressBookError::PeerSentAnAddressOutOfZone);
|
||||
false
|
||||
} else if self.is_peer_banned(&peer.adr) {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(e) = err {
|
||||
return Err(e);
|
||||
} else {
|
||||
for peer in peers {
|
||||
self.add_peer_to_gray_list(peer);
|
||||
}
|
||||
self.gray_list
|
||||
.reduce_list(&HashSet::new(), self.max_gray_peers());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_random_gray_peer(&mut self) -> Option<PeerListEntryBase> {
|
||||
self.gray_list.get_random_peer(&mut self.rng).map(|p| *p)
|
||||
}
|
||||
|
||||
fn get_random_white_peer(&mut self) -> Option<PeerListEntryBase> {
|
||||
self.white_list.get_random_peer(&mut self.rng).map(|p| *p)
|
||||
}
|
||||
|
||||
fn update_peer_info(&mut self, peer: PeerListEntryBase) -> Result<(), AddressBookError> {
|
||||
if let Some(peer_stored) = self.gray_list.get_peer_mut(&peer.adr) {
|
||||
*peer_stored = peer;
|
||||
Ok(())
|
||||
} else if let Some(peer_stored) = self.white_list.get_peer_mut(&peer.adr) {
|
||||
*peer_stored = peer;
|
||||
Ok(())
|
||||
} else {
|
||||
return Err(AddressBookError::PeerNotFound);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn run(mut self, mut rx: mpsc::Receiver<AddressBookClientRequest>) {
|
||||
loop {
|
||||
let Some(req) = rx.next().await else {
|
||||
// the client has been dropped the node has *possibly* shut down
|
||||
return;
|
||||
};
|
||||
|
||||
self.check_unban_peers();
|
||||
|
||||
let span = tracing::debug_span!(parent: &req.span, "AddressBook");
|
||||
let _guard = span.enter();
|
||||
|
||||
tracing::debug!("{} received request: {}", self.book_name(), req.req);
|
||||
|
||||
let res = match req.req {
|
||||
AddressBookRequest::HandleNewPeerList(new_peers, _) => self
|
||||
.handle_new_peerlist(new_peers)
|
||||
.map(|_| AddressBookResponse::Ok),
|
||||
AddressBookRequest::SetPeerSeen(peer, last_seen) => self
|
||||
.set_peer_seen(peer, last_seen)
|
||||
.map(|_| AddressBookResponse::Ok),
|
||||
AddressBookRequest::BanPeer(peer, till) => {
|
||||
self.ban_peer(peer, till);
|
||||
Ok(AddressBookResponse::Ok)
|
||||
}
|
||||
AddressBookRequest::AddPeerToAnchor(peer) => self
|
||||
.add_peer_to_anchor(peer)
|
||||
.map(|_| AddressBookResponse::Ok),
|
||||
AddressBookRequest::RemovePeerFromAnchor(peer) => {
|
||||
self.remove_peer_from_anchor(peer);
|
||||
Ok(AddressBookResponse::Ok)
|
||||
}
|
||||
AddressBookRequest::UpdatePeerInfo(peer) => {
|
||||
self.update_peer_info(peer).map(|_| AddressBookResponse::Ok)
|
||||
}
|
||||
|
||||
AddressBookRequest::GetRandomGrayPeer(_) => match self.get_random_gray_peer() {
|
||||
Some(peer) => Ok(AddressBookResponse::Peer(peer)),
|
||||
None => Err(AddressBookError::PeerListEmpty),
|
||||
},
|
||||
AddressBookRequest::GetRandomWhitePeer(_) => match self.get_random_white_peer() {
|
||||
Some(peer) => Ok(AddressBookResponse::Peer(peer)),
|
||||
None => Err(AddressBookError::PeerListEmpty),
|
||||
},
|
||||
};
|
||||
|
||||
if let Err(e) = &res {
|
||||
tracing::debug!("Error when handling request, err: {e}")
|
||||
}
|
||||
|
||||
let _ = req.tx.send(res);
|
||||
}
|
||||
}
|
||||
}
|
282
p2p/src/address_book/address_book/peer_list.rs
Normal file
282
p2p/src/address_book/address_book/peer_list.rs
Normal file
|
@ -0,0 +1,282 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use monero_wire::{messages::PeerListEntryBase, NetworkAddress};
|
||||
use rand::Rng;
|
||||
|
||||
pub struct PeerList {
|
||||
peers: HashMap<NetworkAddress, PeerListEntryBase>,
|
||||
pruning_idxs: HashMap<u32, Vec<NetworkAddress>>,
|
||||
}
|
||||
|
||||
impl PeerList {
|
||||
pub fn new(list: Vec<PeerListEntryBase>) -> PeerList {
|
||||
let mut peers = HashMap::with_capacity(list.len());
|
||||
let mut pruning_idxs = HashMap::with_capacity(8);
|
||||
|
||||
for peer in list {
|
||||
peers.insert(peer.adr, peer);
|
||||
|
||||
pruning_idxs
|
||||
.entry(peer.pruning_seed)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(peer.adr);
|
||||
}
|
||||
PeerList {
|
||||
peers,
|
||||
pruning_idxs,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.peers.len()
|
||||
}
|
||||
|
||||
pub fn add_new_peer(&mut self, peer: PeerListEntryBase) {
|
||||
if self.peers.insert(peer.adr, peer.clone()).is_none() {
|
||||
self.pruning_idxs
|
||||
.entry(peer.pruning_seed)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(peer.adr);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_peer(&self, peer: &NetworkAddress) -> Option<&PeerListEntryBase> {
|
||||
self.peers.get(peer)
|
||||
}
|
||||
|
||||
pub fn get_peer_by_idx(&self, n: usize) -> Option<&PeerListEntryBase> {
|
||||
self.peers.iter().nth(n).map(|(_, ret)| ret)
|
||||
}
|
||||
|
||||
pub fn get_random_peer<R: Rng>(&self, r: &mut R) -> Option<&PeerListEntryBase> {
|
||||
let len = self.len();
|
||||
if len == 0 {
|
||||
None
|
||||
} else {
|
||||
let n = r.gen_range(0..len);
|
||||
|
||||
self.get_peer_by_idx(n)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_peer_mut(&mut self, peer: &NetworkAddress) -> Option<&mut PeerListEntryBase> {
|
||||
self.peers.get_mut(peer)
|
||||
}
|
||||
|
||||
pub fn contains_peer(&self, peer: &NetworkAddress) -> bool {
|
||||
self.peers.contains_key(peer)
|
||||
}
|
||||
|
||||
pub fn get_peers_by_pruning_seed(
|
||||
&self,
|
||||
seed: &u32,
|
||||
) -> Option<impl Iterator<Item = &PeerListEntryBase>> {
|
||||
let addrs = self.pruning_idxs.get(seed)?;
|
||||
Some(addrs.iter().filter_map(move |addr| self.peers.get(addr)))
|
||||
}
|
||||
|
||||
fn remove_peer_pruning_idx(&mut self, peer: &PeerListEntryBase) {
|
||||
if let Some(peer_list) = self.pruning_idxs.get_mut(&peer.pruning_seed) {
|
||||
if let Some(idx) = peer_list.iter().position(|peer_adr| peer_adr == &peer.adr) {
|
||||
peer_list.remove(idx);
|
||||
} else {
|
||||
unreachable!("This function will only be called when the peer exists.");
|
||||
}
|
||||
} else {
|
||||
unreachable!("Pruning seed must exist if a peer has that seed.");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove_peer(&mut self, peer: &NetworkAddress) -> Option<PeerListEntryBase> {
|
||||
let peer_eb = self.peers.remove(peer)?;
|
||||
self.remove_peer_pruning_idx(&peer_eb);
|
||||
Some(peer_eb)
|
||||
}
|
||||
|
||||
pub fn reduce_list(&mut self, must_keep_peers: &HashSet<NetworkAddress>, new_len: usize) {
|
||||
if new_len >= self.len() {
|
||||
return;
|
||||
}
|
||||
|
||||
let target_removed = self.len() - new_len;
|
||||
let mut removed_count = 0;
|
||||
let mut peers_to_remove: Vec<NetworkAddress> = Vec::with_capacity(target_removed);
|
||||
|
||||
for (peer_adr, _) in &self.peers {
|
||||
if removed_count >= target_removed {
|
||||
break;
|
||||
}
|
||||
if !must_keep_peers.contains(peer_adr) {
|
||||
peers_to_remove.push(*peer_adr);
|
||||
removed_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
for peer_adr in peers_to_remove {
|
||||
let _ = self.remove_peer(&peer_adr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{collections::HashSet, vec};
|
||||
|
||||
use monero_wire::{messages::PeerListEntryBase, NetworkAddress};
|
||||
use rand::Rng;
|
||||
|
||||
use super::PeerList;
|
||||
|
||||
fn make_fake_peer_list(numb_o_peers: usize) -> PeerList {
|
||||
let mut peer_list = vec![PeerListEntryBase::default(); numb_o_peers];
|
||||
for (idx, peer) in peer_list.iter_mut().enumerate() {
|
||||
let NetworkAddress::IPv4(ip) = &mut peer.adr else {panic!("this test requires default to be ipv4")};
|
||||
ip.m_ip += idx as u32;
|
||||
}
|
||||
|
||||
PeerList::new(peer_list)
|
||||
}
|
||||
|
||||
fn make_fake_peer_list_with_random_pruning_seeds(numb_o_peers: usize) -> PeerList {
|
||||
let mut r = rand::thread_rng();
|
||||
|
||||
let mut peer_list = vec![PeerListEntryBase::default(); numb_o_peers];
|
||||
for (idx, peer) in peer_list.iter_mut().enumerate() {
|
||||
let NetworkAddress::IPv4(ip) = &mut peer.adr else {panic!("this test requires default to be ipv4")};
|
||||
ip.m_ip += idx as u32;
|
||||
|
||||
peer.pruning_seed = if r.gen_bool(0.4) {
|
||||
0
|
||||
} else {
|
||||
r.gen_range(384..=391)
|
||||
};
|
||||
}
|
||||
|
||||
PeerList::new(peer_list)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_list_reduce_length() {
|
||||
let mut peer_list = make_fake_peer_list(2090);
|
||||
let must_keep_peers = HashSet::new();
|
||||
|
||||
let target_len = 2000;
|
||||
|
||||
peer_list.reduce_list(&must_keep_peers, target_len);
|
||||
|
||||
assert_eq!(peer_list.len(), target_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_list_reduce_length_with_peers_we_need() {
|
||||
let mut peer_list = make_fake_peer_list(500);
|
||||
let must_keep_peers = HashSet::from_iter(peer_list.peers.iter().map(|(adr, _)| *adr));
|
||||
|
||||
let target_len = 49;
|
||||
|
||||
peer_list.reduce_list(&must_keep_peers, target_len);
|
||||
|
||||
// we can't remove any of the peers we said we need them all
|
||||
assert_eq!(peer_list.len(), 500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_list_get_peers_by_pruning_seed() {
|
||||
let mut r = rand::thread_rng();
|
||||
|
||||
let peer_list = make_fake_peer_list_with_random_pruning_seeds(1000);
|
||||
let seed = if r.gen_bool(0.4) {
|
||||
0
|
||||
} else {
|
||||
r.gen_range(384..=391)
|
||||
};
|
||||
|
||||
let peers_with_seed = peer_list
|
||||
.get_peers_by_pruning_seed(&seed)
|
||||
.expect("If you hit this buy a lottery ticket");
|
||||
|
||||
for peer in peers_with_seed {
|
||||
assert_eq!(peer.pruning_seed, seed);
|
||||
}
|
||||
|
||||
assert_eq!(peer_list.len(), 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_list_remove_specific_peer() {
|
||||
let mut peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
|
||||
|
||||
// generate peer at a random point in the list
|
||||
let mut peer = NetworkAddress::default();
|
||||
let NetworkAddress::IPv4(ip) = &mut peer else {panic!("this test requires default to be ipv4")};
|
||||
ip.m_ip += 50;
|
||||
|
||||
assert!(peer_list.remove_peer(&peer).is_some());
|
||||
|
||||
let pruning_idxs = peer_list.pruning_idxs;
|
||||
let peers = peer_list.peers;
|
||||
|
||||
for (_, addrs) in pruning_idxs {
|
||||
addrs.iter().for_each(|adr| assert!(adr != &peer))
|
||||
}
|
||||
|
||||
assert!(!peers.contains_key(&peer));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_list_pruning_idxs_are_correct() {
|
||||
let peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
|
||||
let mut total_len = 0;
|
||||
|
||||
for (seed, list) in peer_list.pruning_idxs {
|
||||
for peer in list.iter() {
|
||||
assert_eq!(peer_list.peers.get(peer).unwrap().pruning_seed, seed);
|
||||
total_len += 1;
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(total_len, peer_list.peers.len())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_list_add_new_peer() {
|
||||
let mut peer_list = make_fake_peer_list(10);
|
||||
let mut new_peer = PeerListEntryBase::default();
|
||||
let NetworkAddress::IPv4(ip) = &mut new_peer.adr else {panic!("this test requires default to be ipv4")};
|
||||
ip.m_ip += 50;
|
||||
|
||||
peer_list.add_new_peer(new_peer.clone());
|
||||
|
||||
assert_eq!(peer_list.len(), 11);
|
||||
assert_eq!(peer_list.get_peer(&new_peer.adr), Some(&new_peer));
|
||||
assert!(peer_list
|
||||
.pruning_idxs
|
||||
.get(&new_peer.pruning_seed)
|
||||
.unwrap()
|
||||
.contains(&new_peer.adr));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_list_add_existing_peer() {
|
||||
let mut peer_list = make_fake_peer_list(10);
|
||||
let existing_peer = peer_list
|
||||
.get_peer(&NetworkAddress::default())
|
||||
.unwrap()
|
||||
.clone();
|
||||
|
||||
peer_list.add_new_peer(existing_peer.clone());
|
||||
|
||||
assert_eq!(peer_list.len(), 10);
|
||||
assert_eq!(peer_list.get_peer(&existing_peer.adr), Some(&existing_peer));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_list_get_non_existent_peer() {
|
||||
let peer_list = make_fake_peer_list(10);
|
||||
let mut non_existent_peer = NetworkAddress::default();
|
||||
let NetworkAddress::IPv4(ip) = &mut non_existent_peer else {panic!("this test requires default to be ipv4")};
|
||||
ip.m_ip += 50;
|
||||
|
||||
assert_eq!(peer_list.get_peer(&non_existent_peer), None);
|
||||
}
|
||||
}
|
3
p2p/src/lib.rs
Normal file
3
p2p/src/lib.rs
Normal file
|
@ -0,0 +1,3 @@
|
|||
pub mod address_book;
|
||||
pub mod peer;
|
||||
mod protocol;
|
42
p2p/src/peer.rs
Normal file
42
p2p/src/peer.rs
Normal file
|
@ -0,0 +1,42 @@
|
|||
pub mod client;
|
||||
pub mod connection;
|
||||
pub mod handshaker;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use monero_wire::levin::BucketError;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error, Clone, Copy)]
|
||||
pub enum RequestServiceError {}
|
||||
|
||||
#[derive(Debug, Error, Clone, Copy)]
|
||||
pub enum PeerError {
|
||||
#[error("Peer is on a different network")]
|
||||
PeerIsOnAnotherNetwork,
|
||||
#[error("Peer sent an unexpected response")]
|
||||
PeerSentUnSolicitedResponse,
|
||||
#[error("Internal service did not respond when required")]
|
||||
InternalServiceDidNotRespond,
|
||||
#[error("Connection to peer has been terminated")]
|
||||
PeerConnectionClosed,
|
||||
#[error("The Client `internal` channel was closed")]
|
||||
ClientChannelClosed,
|
||||
#[error("The Peer sent an unexpected response")]
|
||||
PeerSentUnexpectedResponse,
|
||||
#[error("The peer sent a bad response: {0}")]
|
||||
ResponseError(&'static str),
|
||||
#[error("Internal service error: {0}")]
|
||||
InternalService(#[from] RequestServiceError),
|
||||
#[error("Internal peer sync channel closed")]
|
||||
InternalPeerSyncChannelClosed,
|
||||
#[error("Levin Error")]
|
||||
LevinError, // remove me, this is just temporary
|
||||
}
|
||||
|
||||
impl From<BucketError> for PeerError {
|
||||
fn from(_: BucketError) -> Self {
|
||||
PeerError::LevinError
|
||||
}
|
||||
}
|
70
p2p/src/peer/client.rs
Normal file
70
p2p/src/peer/client.rs
Normal file
|
@ -0,0 +1,70 @@
|
|||
use std::pin::Pin;
|
||||
use std::{future::Future, sync::Arc};
|
||||
|
||||
use crate::protocol::{InternalMessageRequest, InternalMessageResponse};
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
FutureExt,
|
||||
};
|
||||
use monero_wire::messages::PeerID;
|
||||
use monero_wire::{messages::common::PeerSupportFlags, NetworkAddress};
|
||||
|
||||
use super::{connection::ClientRequest, PeerError};
|
||||
|
||||
pub struct ConnectionInfo {
|
||||
pub addr: NetworkAddress,
|
||||
pub support_flags: PeerSupportFlags,
|
||||
/// Peer ID
|
||||
pub peer_id: PeerID,
|
||||
pub rpc_port: u16,
|
||||
pub rpc_credits_per_hash: u32,
|
||||
}
|
||||
|
||||
pub struct Client {
|
||||
pub connection_info: Arc<ConnectionInfo>,
|
||||
server_tx: mpsc::Sender<ClientRequest>,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub fn new(
|
||||
connection_info: Arc<ConnectionInfo>,
|
||||
server_tx: mpsc::Sender<ClientRequest>,
|
||||
) -> Self {
|
||||
Client {
|
||||
connection_info,
|
||||
server_tx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl tower::Service<InternalMessageRequest> for Client {
|
||||
type Error = PeerError;
|
||||
type Response = InternalMessageResponse;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
self.server_tx
|
||||
.poll_ready(cx)
|
||||
.map_err(|e| PeerError::ClientChannelClosed)
|
||||
}
|
||||
fn call(&mut self, req: InternalMessageRequest) -> Self::Future {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
match self.server_tx.try_send(ClientRequest { req, tx }) {
|
||||
Ok(()) => rx
|
||||
.map(|recv_result| {
|
||||
recv_result
|
||||
.expect("ClientRequest oneshot sender must not be dropped before send")
|
||||
})
|
||||
.boxed(),
|
||||
Err(_e) => {
|
||||
// TODO: better error handling
|
||||
futures::future::ready(Err(PeerError::ClientChannelClosed)).boxed()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
196
p2p/src/peer/connection.rs
Normal file
196
p2p/src/peer/connection.rs
Normal file
|
@ -0,0 +1,196 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use futures::channel::{mpsc, oneshot};
|
||||
use futures::stream::Fuse;
|
||||
use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt};
|
||||
|
||||
use levin::{MessageSink, MessageStream};
|
||||
use monero_wire::messages::CoreSyncData;
|
||||
use monero_wire::{levin, Message, NetworkAddress};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use crate::protocol::{
|
||||
InternalMessageRequest, InternalMessageResponse, BLOCKS_IDS_SYNCHRONIZING_MAX_COUNT,
|
||||
P2P_MAX_PEERS_IN_HANDSHAKE,
|
||||
};
|
||||
|
||||
use super::PeerError;
|
||||
|
||||
pub enum PeerSyncChange {
|
||||
CoreSyncData(NetworkAddress, CoreSyncData),
|
||||
ObjectsResponse(NetworkAddress, Vec<[u8; 32]>, u64),
|
||||
PeerDisconnected(NetworkAddress),
|
||||
}
|
||||
|
||||
pub struct ClientRequest {
|
||||
pub req: InternalMessageRequest,
|
||||
pub tx: oneshot::Sender<Result<InternalMessageResponse, PeerError>>,
|
||||
}
|
||||
|
||||
pub enum State {
|
||||
WaitingForRequest,
|
||||
WaitingForResponse {
|
||||
request: InternalMessageRequest,
|
||||
tx: oneshot::Sender<Result<InternalMessageResponse, PeerError>>,
|
||||
},
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn expected_response_id(&self) -> Option<u32> {
|
||||
match self {
|
||||
Self::WaitingForRequest => None,
|
||||
Self::WaitingForResponse { request, tx: _ } => request.expected_id(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Connection<Svc, Aw, Ar> {
|
||||
address: NetworkAddress,
|
||||
state: State,
|
||||
sink: MessageSink<Aw, Message>,
|
||||
stream: Fuse<MessageStream<Ar, Message>>,
|
||||
client_rx: mpsc::Receiver<ClientRequest>,
|
||||
sync_state_tx: mpsc::Sender<PeerSyncChange>,
|
||||
svc: Svc,
|
||||
}
|
||||
|
||||
impl<Svc, Aw, Ar> Connection<Svc, Aw, Ar>
|
||||
where
|
||||
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = PeerError>,
|
||||
Aw: AsyncWrite + std::marker::Unpin,
|
||||
Ar: AsyncRead + std::marker::Unpin,
|
||||
{
|
||||
pub fn new(
|
||||
address: NetworkAddress,
|
||||
sink: MessageSink<Aw, Message>,
|
||||
stream: MessageStream<Ar, Message>,
|
||||
client_rx: mpsc::Receiver<ClientRequest>,
|
||||
sync_state_tx: mpsc::Sender<PeerSyncChange>,
|
||||
svc: Svc,
|
||||
) -> Connection<Svc, Aw, Ar> {
|
||||
Connection {
|
||||
address,
|
||||
state: State::WaitingForRequest,
|
||||
sink,
|
||||
stream: stream.fuse(),
|
||||
client_rx,
|
||||
sync_state_tx,
|
||||
svc,
|
||||
}
|
||||
}
|
||||
async fn handle_response(&mut self, res: InternalMessageResponse) -> Result<(), PeerError> {
|
||||
let state = std::mem::replace(&mut self.state, State::WaitingForRequest);
|
||||
if let State::WaitingForResponse { request, tx } = state {
|
||||
match (request, &res) {
|
||||
(InternalMessageRequest::Handshake(_), InternalMessageResponse::Handshake(_)) => {}
|
||||
(
|
||||
InternalMessageRequest::SupportFlags(_),
|
||||
InternalMessageResponse::SupportFlags(_),
|
||||
) => {}
|
||||
(InternalMessageRequest::TimedSync(_), InternalMessageResponse::TimedSync(res)) => {
|
||||
}
|
||||
(
|
||||
InternalMessageRequest::GetObjectsRequest(req),
|
||||
InternalMessageResponse::GetObjectsResponse(res),
|
||||
) => {}
|
||||
(
|
||||
InternalMessageRequest::ChainRequest(_),
|
||||
InternalMessageResponse::ChainResponse(res),
|
||||
) => {}
|
||||
(
|
||||
InternalMessageRequest::FluffyMissingTransactionsRequest(req),
|
||||
InternalMessageResponse::NewFluffyBlock(blk),
|
||||
) => {}
|
||||
(
|
||||
InternalMessageRequest::GetTxPoolCompliment(_),
|
||||
InternalMessageResponse::NewTransactions(_),
|
||||
) => {
|
||||
// we could check we received no transactions that we said we knew about but thats going to happen later anyway when they get added to our
|
||||
// mempool
|
||||
}
|
||||
_ => return Err(PeerError::ResponseError("Peer sent incorrect response")),
|
||||
}
|
||||
// response passed our tests we can send it to the requestor
|
||||
let _ = tx.send(Ok(res));
|
||||
Ok(())
|
||||
} else {
|
||||
unreachable!("This will only be called when in state WaitingForResponse");
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_message_to_peer(&mut self, mes: impl Into<Message>) -> Result<(), PeerError> {
|
||||
Ok(self.sink.send(mes.into()).await?)
|
||||
}
|
||||
|
||||
async fn handle_peer_request(&mut self, req: InternalMessageRequest) -> Result<(), PeerError> {
|
||||
// we should check contents of peer requests for obvious errors like we do with responses
|
||||
let ready_svc = self.svc.ready().await?;
|
||||
let res = ready_svc.call(req).await?;
|
||||
self.send_message_to_peer(res).await
|
||||
}
|
||||
|
||||
async fn handle_client_request(&mut self, req: ClientRequest) -> Result<(), PeerError> {
|
||||
// check we need a response
|
||||
if let Some(_) = req.req.expected_id() {
|
||||
self.state = State::WaitingForResponse {
|
||||
request: req.req.clone(),
|
||||
tx: req.tx,
|
||||
};
|
||||
}
|
||||
self.send_message_to_peer(req.req).await
|
||||
}
|
||||
|
||||
async fn state_waiting_for_request(&mut self) -> Result<(), PeerError> {
|
||||
futures::select! {
|
||||
peer_message = self.stream.next() => {
|
||||
match peer_message.expect("MessageStream will never return None") {
|
||||
Ok(message) => {
|
||||
self.handle_peer_request(message.try_into().map_err(|_| PeerError::PeerSentUnexpectedResponse)?).await
|
||||
},
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
},
|
||||
client_req = self.client_rx.next() => {
|
||||
self.handle_client_request(client_req.ok_or(PeerError::ClientChannelClosed)?).await
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn state_waiting_for_response(&mut self) -> Result<(), PeerError> {
|
||||
// put a timeout on this
|
||||
let peer_message = self
|
||||
.stream
|
||||
.next()
|
||||
.await
|
||||
.expect("MessageStream will never return None")?;
|
||||
|
||||
if !peer_message.is_request()
|
||||
&& self.state.expected_response_id() == Some(peer_message.id())
|
||||
{
|
||||
if let Ok(res) = peer_message.try_into() {
|
||||
Ok(self.handle_response(res).await?)
|
||||
} else {
|
||||
// im almost certain this is impossible to hit, but im not certain enough to use unreachable!()
|
||||
Err(PeerError::ResponseError("Peer sent incorrect response"))
|
||||
}
|
||||
} else {
|
||||
if let Ok(req) = peer_message.try_into() {
|
||||
self.handle_peer_request(req).await
|
||||
} else {
|
||||
// this can be hit if the peer sends a protocol response with the wrong id
|
||||
Err(PeerError::ResponseError("Peer sent incorrect response"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
loop {
|
||||
let _res = match self.state {
|
||||
State::WaitingForRequest => self.state_waiting_for_request().await,
|
||||
State::WaitingForResponse { request: _, tx: _ } => {
|
||||
self.state_waiting_for_response().await
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
454
p2p/src/peer/handshaker.rs
Normal file
454
p2p/src/peer/handshaker.rs
Normal file
|
@ -0,0 +1,454 @@
|
|||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::FutureExt;
|
||||
use futures::{channel::mpsc, AsyncRead, AsyncWrite, SinkExt, StreamExt};
|
||||
use monero_wire::messages::admin::{SupportFlagsRequest, SupportFlagsResponse};
|
||||
use monero_wire::messages::MessageRequest;
|
||||
use thiserror::Error;
|
||||
use tokio::time;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use crate::address_book::{AddressBookError, AddressBookRequest, AddressBookResponse};
|
||||
use crate::protocol::temp_database::{DataBaseRequest, DataBaseResponse, DatabaseError};
|
||||
use crate::protocol::{
|
||||
Direction, InternalMessageRequest, InternalMessageResponse, P2P_MAX_PEERS_IN_HANDSHAKE,
|
||||
};
|
||||
use cuprate_common::{HardForks, Network, PruningSeed};
|
||||
use monero_wire::{
|
||||
levin::{BucketError, MessageSink, MessageStream},
|
||||
messages::{
|
||||
admin::{HandshakeRequest, HandshakeResponse},
|
||||
common::PeerSupportFlags,
|
||||
BasicNodeData, CoreSyncData, MessageResponse, PeerID, PeerListEntryBase,
|
||||
},
|
||||
Message, NetworkAddress,
|
||||
};
|
||||
use tracing::Instrument;
|
||||
|
||||
use super::client::Client;
|
||||
use super::{
|
||||
client::ConnectionInfo,
|
||||
connection::{ClientRequest, Connection, PeerSyncChange},
|
||||
PeerError,
|
||||
};
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum HandShakeError {
|
||||
#[error("The peer did not complete the handshake fast enough")]
|
||||
PeerTimedOut,
|
||||
#[error("The peer has a weird pruning scheme")]
|
||||
PeerClaimedWeirdPruning,
|
||||
#[error("The peer has an unexpected top version")]
|
||||
PeerHasUnexpectedTopVersion,
|
||||
#[error("The peer does not have the minimum support flags")]
|
||||
PeerDoesNotHaveTheMinimumSupportFlags,
|
||||
#[error("The peer is on a different network")]
|
||||
PeerIsOnADifferentNetwork,
|
||||
#[error("Address book err: {0}")]
|
||||
AddressBookError(#[from] AddressBookError),
|
||||
#[error("The peer sent too many peers, considered spamming")]
|
||||
PeerSentTooManyPeers,
|
||||
#[error("The peer sent a wrong response to our handshake")]
|
||||
PeerSentWrongResponse,
|
||||
#[error("The syncer returned an error")]
|
||||
DataBaseError(#[from] DatabaseError),
|
||||
#[error("Bucket error while communicating with peer: {0}")]
|
||||
BucketError(#[from] BucketError),
|
||||
}
|
||||
|
||||
pub struct NetworkConfig {
|
||||
/// Port
|
||||
my_port: u32,
|
||||
/// The Network
|
||||
network: Network,
|
||||
/// Peer ID
|
||||
peer_id: PeerID,
|
||||
/// RPC Port
|
||||
rpc_port: u16,
|
||||
/// RPC Credits Per Hash
|
||||
rpc_credits_per_hash: u32,
|
||||
our_support_flags: PeerSupportFlags,
|
||||
minimum_peer_support_flags: PeerSupportFlags,
|
||||
handshake_timeout: time::Duration,
|
||||
max_in_peers: u32,
|
||||
target_out_peers: u32,
|
||||
}
|
||||
|
||||
impl Default for NetworkConfig {
|
||||
fn default() -> Self {
|
||||
NetworkConfig {
|
||||
my_port: 18080,
|
||||
network: Network::MainNet,
|
||||
peer_id: PeerID(21),
|
||||
rpc_port: 0,
|
||||
rpc_credits_per_hash: 0,
|
||||
our_support_flags: PeerSupportFlags::get_support_flag_fluffy_blocks(),
|
||||
minimum_peer_support_flags: PeerSupportFlags::from(0_u32),
|
||||
handshake_timeout: time::Duration::from_secs(5),
|
||||
max_in_peers: 13,
|
||||
target_out_peers: 21,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NetworkConfig {
|
||||
pub fn basic_node_data(&self) -> BasicNodeData {
|
||||
BasicNodeData {
|
||||
my_port: self.my_port,
|
||||
network_id: self.network.network_id(),
|
||||
peer_id: self.peer_id,
|
||||
support_flags: self.our_support_flags,
|
||||
rpc_port: self.rpc_port,
|
||||
rpc_credits_per_hash: self.rpc_credits_per_hash,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Handshake<W, R> {
|
||||
sink: MessageSink<W, Message>,
|
||||
stream: MessageStream<R, Message>,
|
||||
direction: Direction,
|
||||
addr: NetworkAddress,
|
||||
}
|
||||
|
||||
pub struct Handshaker<Bc, Svc, AdrBook> {
|
||||
config: NetworkConfig,
|
||||
parent_span: tracing::Span,
|
||||
address_book: AdrBook,
|
||||
blockchain: Bc,
|
||||
peer_sync_states: mpsc::Sender<PeerSyncChange>,
|
||||
peer_request_service: Svc,
|
||||
}
|
||||
|
||||
impl<Bc, Svc, AdrBook, W, R> tower::Service<Handshake<W, R>> for Handshaker<Bc, Svc, AdrBook>
|
||||
where
|
||||
Bc: Service<DataBaseRequest, Response = DataBaseResponse, Error = DatabaseError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
Bc::Future: Send,
|
||||
|
||||
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = PeerError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
Svc::Future: Send,
|
||||
|
||||
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = AddressBookError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
AdrBook::Future: Send,
|
||||
|
||||
W: AsyncWrite + std::marker::Unpin + Send + 'static,
|
||||
R: AsyncRead + std::marker::Unpin + Send + 'static,
|
||||
{
|
||||
type Error = HandShakeError;
|
||||
type Response = Client;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
std::task::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Handshake<W, R>) -> Self::Future {
|
||||
let Handshake {
|
||||
sink: mut peer_sink,
|
||||
stream: mut peer_stream,
|
||||
direction,
|
||||
addr,
|
||||
} = req;
|
||||
|
||||
let span = tracing::debug_span!("Handshaker");
|
||||
|
||||
let connection_span = tracing::debug_span!(parent: &self.parent_span, "Connection");
|
||||
|
||||
let blockchain = self.blockchain.clone();
|
||||
let address_book = self.address_book.clone();
|
||||
let syncer_tx = self.peer_sync_states.clone();
|
||||
let peer_request_service = self.peer_request_service.clone();
|
||||
|
||||
let state_machine = HandshakeSM {
|
||||
peer_sink,
|
||||
peer_stream,
|
||||
direction,
|
||||
addr,
|
||||
network: self.config.network,
|
||||
basic_node_data: self.config.basic_node_data(),
|
||||
minimum_support_flags: self.config.minimum_peer_support_flags,
|
||||
address_book,
|
||||
blockchain,
|
||||
peer_request_service,
|
||||
connection_span,
|
||||
state: HandshakeState::Start,
|
||||
};
|
||||
|
||||
let ret = time::timeout(self.config.handshake_timeout, state_machine.do_handshake());
|
||||
|
||||
async move {
|
||||
match ret.await {
|
||||
Ok(handshake) => handshake,
|
||||
Err(_) => Err(HandShakeError::PeerTimedOut),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
enum HandshakeState {
|
||||
Start,
|
||||
WaitingForHandshakeResponse,
|
||||
WaitingForSupportFlagResponse(BasicNodeData),
|
||||
Complete(BasicNodeData),
|
||||
}
|
||||
|
||||
impl HandshakeState {
|
||||
pub fn is_complete(&self) -> bool {
|
||||
matches!(self, HandshakeState::Complete(_))
|
||||
}
|
||||
|
||||
pub fn peer_basic_node_data(self) -> Option<BasicNodeData> {
|
||||
match self {
|
||||
HandshakeState::Complete(sup) => Some(sup),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct HandshakeSM<Bc, Svc, AdrBook, W, R> {
|
||||
peer_sink: MessageSink<W, Message>,
|
||||
peer_stream: MessageStream<R, Message>,
|
||||
direction: Direction,
|
||||
addr: NetworkAddress,
|
||||
network: Network,
|
||||
|
||||
basic_node_data: BasicNodeData,
|
||||
minimum_support_flags: PeerSupportFlags,
|
||||
address_book: AdrBook,
|
||||
blockchain: Bc,
|
||||
peer_request_service: Svc,
|
||||
connection_span: tracing::Span,
|
||||
|
||||
state: HandshakeState,
|
||||
}
|
||||
|
||||
impl<Bc, Svc, AdrBook, W, R> HandshakeSM<Bc, Svc, AdrBook, W, R>
|
||||
where
|
||||
Bc: Service<DataBaseRequest, Response = DataBaseResponse, Error = DatabaseError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
Bc::Future: Send,
|
||||
|
||||
Svc: Service<InternalMessageRequest, Response = InternalMessageResponse, Error = PeerError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
Svc::Future: Send,
|
||||
|
||||
AdrBook: Service<AddressBookRequest, Response = AddressBookResponse, Error = AddressBookError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
AdrBook::Future: Send,
|
||||
|
||||
W: AsyncWrite + std::marker::Unpin + Send + 'static,
|
||||
R: AsyncRead + std::marker::Unpin + Send + 'static,
|
||||
{
|
||||
async fn get_our_core_sync(&mut self) -> Result<CoreSyncData, DatabaseError> {
|
||||
let blockchain = self.blockchain.ready().await?;
|
||||
let DataBaseResponse::CoreSyncData(core_sync) = blockchain.call(DataBaseRequest::CoreSyncData).await? else {
|
||||
unreachable!("Database will always return the requested item")
|
||||
};
|
||||
Ok(core_sync)
|
||||
}
|
||||
|
||||
async fn send_handshake_req(
|
||||
&mut self,
|
||||
node_data: BasicNodeData,
|
||||
payload_data: CoreSyncData,
|
||||
) -> Result<(), HandShakeError> {
|
||||
let handshake_req = HandshakeRequest {
|
||||
node_data,
|
||||
payload_data,
|
||||
};
|
||||
|
||||
tracing::trace!("Sending handshake request: {handshake_req:?}");
|
||||
|
||||
let message: Message = Message::Request(handshake_req.into());
|
||||
self.peer_sink.send(message).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_handshake_res(&mut self) -> Result<HandshakeResponse, HandShakeError> {
|
||||
// put a timeout on this
|
||||
let Message::Response(MessageResponse::Handshake(handshake_res)) = self.peer_stream.next().await.expect("MessageSink will not return None")? else {
|
||||
return Err(HandShakeError::PeerSentWrongResponse);
|
||||
};
|
||||
|
||||
tracing::trace!("Received handshake response: {handshake_res:?}");
|
||||
|
||||
Ok(handshake_res)
|
||||
}
|
||||
|
||||
async fn send_support_flag_req(&mut self) -> Result<(), HandShakeError> {
|
||||
tracing::trace!("Peer sent no support flags, sending request");
|
||||
|
||||
let message: Message = Message::Request(SupportFlagsRequest.into());
|
||||
self.peer_sink.send(message).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_handshake_response(
|
||||
&mut self,
|
||||
res: HandshakeResponse,
|
||||
) -> Result<(), HandShakeError> {
|
||||
let HandshakeResponse {
|
||||
node_data: peer_node_data,
|
||||
payload_data: peer_core_sync,
|
||||
local_peerlist_new,
|
||||
} = res;
|
||||
|
||||
if !peer_node_data
|
||||
.support_flags
|
||||
.contains(&self.minimum_support_flags)
|
||||
{
|
||||
tracing::debug!("Handshake failed: peer does not have minimum support flags");
|
||||
return Err(HandShakeError::PeerDoesNotHaveTheMinimumSupportFlags);
|
||||
}
|
||||
|
||||
if peer_node_data.network_id != self.network.network_id() {
|
||||
tracing::debug!("Handshake failed: peer is on a different network");
|
||||
return Err(HandShakeError::PeerIsOnADifferentNetwork);
|
||||
}
|
||||
|
||||
if local_peerlist_new.len() > P2P_MAX_PEERS_IN_HANDSHAKE {
|
||||
tracing::debug!("Handshake failed: peer sent too many peers in response");
|
||||
return Err(HandShakeError::PeerSentTooManyPeers);
|
||||
}
|
||||
|
||||
// Tell the address book about the new peers
|
||||
self.address_book
|
||||
.ready()
|
||||
.await?
|
||||
.call(AddressBookRequest::HandleNewPeerList(
|
||||
local_peerlist_new,
|
||||
self.addr.get_zone(),
|
||||
))
|
||||
.await?;
|
||||
|
||||
// coresync, pruning seed
|
||||
|
||||
if peer_node_data.support_flags.is_empty() {
|
||||
self.send_support_flag_req().await?;
|
||||
self.state = HandshakeState::WaitingForSupportFlagResponse(peer_node_data);
|
||||
} else {
|
||||
self.state = HandshakeState::Complete(peer_node_data);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_message_response(
|
||||
&mut self,
|
||||
response: MessageResponse,
|
||||
) -> Result<(), HandShakeError> {
|
||||
match (&mut self.state, response) {
|
||||
(
|
||||
HandshakeState::WaitingForHandshakeResponse,
|
||||
MessageResponse::Handshake(handshake),
|
||||
) => self.handle_handshake_response(handshake).await,
|
||||
(
|
||||
HandshakeState::WaitingForSupportFlagResponse(bnd),
|
||||
MessageResponse::SupportFlags(support_flags),
|
||||
) => {
|
||||
bnd.support_flags = support_flags.support_flags;
|
||||
self.state = HandshakeState::Complete(bnd.clone());
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(HandShakeError::PeerSentWrongResponse),
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_support_flags(
|
||||
&mut self,
|
||||
support_flags: PeerSupportFlags,
|
||||
) -> Result<(), HandShakeError> {
|
||||
let message = Message::Response(SupportFlagsResponse { support_flags }.into());
|
||||
self.peer_sink.send(message).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn do_outbound_handshake(&mut self) -> Result<(), HandShakeError> {
|
||||
let core_sync = self.get_our_core_sync().await?;
|
||||
self.send_handshake_req(self.basic_node_data.clone(), core_sync)
|
||||
.await?;
|
||||
self.state = HandshakeState::WaitingForHandshakeResponse;
|
||||
|
||||
while !self.state.is_complete() {
|
||||
match self.peer_stream.next().await {
|
||||
Some(mes) => {
|
||||
let mes = mes?;
|
||||
match mes {
|
||||
Message::Request(MessageRequest::SupportFlags(_)) => {
|
||||
self.send_support_flags(self.basic_node_data.support_flags)
|
||||
.await?
|
||||
}
|
||||
Message::Response(response) => {
|
||||
self.handle_message_response(response).await?
|
||||
}
|
||||
_ => return Err(HandShakeError::PeerSentWrongResponse),
|
||||
}
|
||||
}
|
||||
None => unreachable!("peer_stream wont return None"),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn do_handshake(mut self) -> Result<Client, HandShakeError> {
|
||||
match self.direction {
|
||||
Direction::Outbound => self.do_outbound_handshake().await?,
|
||||
Direction::Inbound => todo!(),
|
||||
}
|
||||
|
||||
let (server_tx, server_rx) = mpsc::channel(3);
|
||||
|
||||
let (replace_me, replace_me_rx) = mpsc::channel(3);
|
||||
|
||||
let peer_node_data = self
|
||||
.state
|
||||
.peer_basic_node_data()
|
||||
.expect("We must be in state complete to be here");
|
||||
let connection_info = ConnectionInfo {
|
||||
addr: self.addr,
|
||||
support_flags: peer_node_data.support_flags,
|
||||
peer_id: peer_node_data.peer_id,
|
||||
rpc_port: peer_node_data.rpc_port,
|
||||
rpc_credits_per_hash: peer_node_data.rpc_credits_per_hash,
|
||||
};
|
||||
|
||||
let connection = Connection::new(
|
||||
self.addr,
|
||||
self.peer_sink,
|
||||
self.peer_stream,
|
||||
server_rx,
|
||||
replace_me,
|
||||
self.peer_request_service,
|
||||
);
|
||||
|
||||
let client = Client::new(connection_info.into(), server_tx);
|
||||
|
||||
tokio::task::spawn(connection.run().instrument(self.connection_span));
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
}
|
1
p2p/src/peer/tests.rs
Normal file
1
p2p/src/peer/tests.rs
Normal file
|
@ -0,0 +1 @@
|
|||
mod handshake;
|
1
p2p/src/peer/tests/handshake.rs
Normal file
1
p2p/src/peer/tests/handshake.rs
Normal file
|
@ -0,0 +1 @@
|
|||
pub use crate::peer::handshaker::{Handshake, Handshaker};
|
13
p2p/src/protocol.rs
Normal file
13
p2p/src/protocol.rs
Normal file
|
@ -0,0 +1,13 @@
|
|||
pub mod internal_network;
|
||||
pub mod temp_database;
|
||||
|
||||
pub use internal_network::{InternalMessageRequest, InternalMessageResponse};
|
||||
|
||||
pub const BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT: usize = 10000;
|
||||
pub const BLOCKS_IDS_SYNCHRONIZING_MAX_COUNT: usize = 25000;
|
||||
pub const P2P_MAX_PEERS_IN_HANDSHAKE: usize = 250;
|
||||
|
||||
pub enum Direction {
|
||||
Inbound,
|
||||
Outbound,
|
||||
}
|
183
p2p/src/protocol/internal_network.rs
Normal file
183
p2p/src/protocol/internal_network.rs
Normal file
|
@ -0,0 +1,183 @@
|
|||
/// This module defines InternalRequests and InternalResponses. Cuprate's P2P works by translating network messages into an internal
|
||||
/// request/ response, this is easy for levin "requests" and "responses" (admin messages) but takes a bit more work with "notifications"
|
||||
/// (protocol messages).
|
||||
///
|
||||
/// Some notifications are easy to translate, like `GetObjectsRequest` is obviously a request but others like `NewFluffyBlock` are a
|
||||
/// bit tricker. To translate a `NewFluffyBlock` into a request/ response we will have to look to see if we asked for `FluffyMissingTransactionsRequest`
|
||||
/// if we have we interpret `NewFluffyBlock` as a response if not its a request that doesn't require a response.
|
||||
///
|
||||
/// Here is every P2P request/ response. *note admin messages are already request/ response so "Handshake" is actually made of a HandshakeRequest & HandshakeResponse
|
||||
///
|
||||
/// Admin:
|
||||
/// Handshake,
|
||||
/// TimedSync,
|
||||
/// Ping,
|
||||
/// SupportFlags
|
||||
/// Protocol:
|
||||
/// Request: GetObjectsRequest, Response: GetObjectsResponse,
|
||||
/// Request: ChainRequest, Response: ChainResponse,
|
||||
/// Request: FluffyMissingTransactionsRequest, Response: NewFluffyBlock, <- these 2 could be requests or responses
|
||||
/// Request: GetTxPoolCompliment, Response: NewTransactions, <-
|
||||
/// Request: NewBlock, Response: None,
|
||||
/// Request: NewFluffyBlock, Response: None,
|
||||
/// Request: NewTransactions, Response: None
|
||||
///
|
||||
use monero_wire::messages::{
|
||||
AdminMessage, ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
|
||||
GetObjectsResponse, GetTxPoolCompliment, Handshake, Message, MessageNotification,
|
||||
MessageRequest, MessageResponse, NewBlock, NewFluffyBlock, NewTransactions, Ping,
|
||||
ProtocolMessage, SupportFlags, TimedSync,
|
||||
};
|
||||
|
||||
macro_rules! client_request_peer_response {
|
||||
(
|
||||
Admin:
|
||||
$($admin_mes:ident),+
|
||||
Protocol:
|
||||
$(Request: $protocol_req:ident, Response: $(SOME: $protocol_res:ident)? $(NULL: $none:expr)? ),+
|
||||
) => {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum InternalMessageRequest {
|
||||
$($admin_mes(<$admin_mes as AdminMessage>::Request),)+
|
||||
$($protocol_req(<$protocol_req as ProtocolMessage>::Notification),)+
|
||||
}
|
||||
|
||||
impl InternalMessageRequest {
|
||||
pub fn get_str_name(&self) -> &'static str {
|
||||
match self {
|
||||
$(InternalMessageRequest::$admin_mes(_) => $admin_mes::NAME,)+
|
||||
$(InternalMessageRequest::$protocol_req(_) => $protocol_req::NAME,)+
|
||||
}
|
||||
}
|
||||
pub fn id(&self) -> u32 {
|
||||
match self {
|
||||
$(InternalMessageRequest::$admin_mes(_) => $admin_mes::ID,)+
|
||||
$(InternalMessageRequest::$protocol_req(_) => $protocol_req::ID,)+
|
||||
}
|
||||
}
|
||||
pub fn expected_id(&self) -> Option<u32> {
|
||||
match self {
|
||||
$(InternalMessageRequest::$admin_mes(_) => Some($admin_mes::ID),)+
|
||||
$(InternalMessageRequest::$protocol_req(_) => $(Some($protocol_res::ID))? $($none)?,)+
|
||||
}
|
||||
}
|
||||
pub fn is_levin_request(&self) -> bool {
|
||||
match self {
|
||||
$(InternalMessageRequest::$admin_mes(_) => true,)+
|
||||
$(InternalMessageRequest::$protocol_req(_) => false,)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MessageRequest> for InternalMessageRequest {
|
||||
fn from(value: MessageRequest) -> Self {
|
||||
match value {
|
||||
$(MessageRequest::$admin_mes(mes) => InternalMessageRequest::$admin_mes(mes),)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Message> for InternalMessageRequest {
|
||||
fn into(self) -> Message {
|
||||
match self {
|
||||
$(InternalMessageRequest::$admin_mes(mes) => Message::Request(MessageRequest::$admin_mes(mes)),)+
|
||||
$(InternalMessageRequest::$protocol_req(mes) => Message::Notification(MessageNotification::$protocol_req(mes)),)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NotAnInternalRequest;
|
||||
|
||||
impl TryFrom<Message> for InternalMessageRequest {
|
||||
type Error = NotAnInternalRequest;
|
||||
fn try_from(value: Message) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
Message::Response(_) => Err(NotAnInternalRequest),
|
||||
Message::Request(req) => Ok(req.into()),
|
||||
Message::Notification(noti) => {
|
||||
match noti {
|
||||
$(MessageNotification::$protocol_req(noti) => Ok(InternalMessageRequest::$protocol_req(noti)),)+
|
||||
_ => Err(NotAnInternalRequest),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum InternalMessageResponse {
|
||||
$($admin_mes(<$admin_mes as AdminMessage>::Response),)+
|
||||
$($($protocol_res(<$protocol_res as ProtocolMessage>::Notification),)?)+
|
||||
}
|
||||
|
||||
impl InternalMessageResponse {
|
||||
pub fn get_str_name(&self) -> &'static str {
|
||||
match self {
|
||||
$(InternalMessageResponse::$admin_mes(_) => $admin_mes::NAME,)+
|
||||
$($(InternalMessageResponse::$protocol_res(_) => $protocol_res::NAME,)?)+
|
||||
}
|
||||
}
|
||||
pub fn id(&self) -> u32 {
|
||||
match self{
|
||||
$(InternalMessageResponse::$admin_mes(_) => $admin_mes::ID,)+
|
||||
$($(InternalMessageResponse::$protocol_res(_) => $protocol_res::ID,)?)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MessageResponse> for InternalMessageResponse {
|
||||
fn from(value: MessageResponse) -> Self {
|
||||
match value {
|
||||
$(MessageResponse::$admin_mes(mes) => InternalMessageResponse::$admin_mes(mes),)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Message> for InternalMessageResponse {
|
||||
fn into(self) -> Message {
|
||||
match self {
|
||||
$(InternalMessageResponse::$admin_mes(mes) => Message::Response(MessageResponse::$admin_mes(mes)),)+
|
||||
$($(InternalMessageResponse::$protocol_res(mes) => Message::Notification(MessageNotification::$protocol_res(mes)),)?)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NotAnInternalResponse;
|
||||
|
||||
impl TryFrom<Message> for InternalMessageResponse {
|
||||
type Error = NotAnInternalResponse;
|
||||
fn try_from(value: Message) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
Message::Response(res) => Ok(res.into()),
|
||||
Message::Request(_) => Err(NotAnInternalResponse),
|
||||
Message::Notification(noti) => {
|
||||
match noti {
|
||||
$($(MessageNotification::$protocol_res(noti) => Ok(InternalMessageResponse::$protocol_res(noti)),)?)+
|
||||
_ => Err(NotAnInternalResponse),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
client_request_peer_response!(
|
||||
Admin:
|
||||
Handshake,
|
||||
TimedSync,
|
||||
Ping,
|
||||
SupportFlags
|
||||
Protocol:
|
||||
Request: GetObjectsRequest, Response: SOME: GetObjectsResponse,
|
||||
Request: ChainRequest, Response: SOME: ChainResponse,
|
||||
Request: FluffyMissingTransactionsRequest, Response: SOME: NewFluffyBlock, // these 2 could be requests or responses
|
||||
Request: GetTxPoolCompliment, Response: SOME: NewTransactions, //
|
||||
// these don't need to be responded to
|
||||
Request: NewBlock, Response: NULL: None,
|
||||
Request: NewFluffyBlock, Response: NULL: None,
|
||||
Request: NewTransactions, Response: NULL: None
|
||||
);
|
13
p2p/src/protocol/lib.rs
Normal file
13
p2p/src/protocol/lib.rs
Normal file
|
@ -0,0 +1,13 @@
|
|||
pub mod internal_network;
|
||||
pub mod temp_database;
|
||||
|
||||
pub use internal_network::{InternalMessageRequest, InternalMessageResponse};
|
||||
|
||||
pub const BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT: usize = 10000;
|
||||
pub const BLOCKS_IDS_SYNCHRONIZING_MAX_COUNT: usize = 25000;
|
||||
pub const P2P_MAX_PEERS_IN_HANDSHAKE: usize = 250;
|
||||
|
||||
pub enum Direction {
|
||||
Inbound,
|
||||
Outbound,
|
||||
}
|
36
p2p/src/protocol/temp_database.rs
Normal file
36
p2p/src/protocol/temp_database.rs
Normal file
|
@ -0,0 +1,36 @@
|
|||
use monero_wire::messages::CoreSyncData;
|
||||
use thiserror::Error;
|
||||
|
||||
pub enum BlockKnown {
|
||||
No,
|
||||
OnMainChain,
|
||||
OnSideChain,
|
||||
KnownBad,
|
||||
}
|
||||
|
||||
impl BlockKnown {
|
||||
pub fn is_known(&self) -> bool {
|
||||
!matches!(self, BlockKnown::No)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum DataBaseRequest {
|
||||
CurrentHeight,
|
||||
CumulativeDifficulty,
|
||||
CoreSyncData,
|
||||
Chain,
|
||||
BlockHeight([u8; 32]),
|
||||
BlockKnown([u8; 32]),
|
||||
}
|
||||
|
||||
pub enum DataBaseResponse {
|
||||
CurrentHeight(u64),
|
||||
CumulativeDifficulty(u128),
|
||||
CoreSyncData(CoreSyncData),
|
||||
Chain(Vec<[u8; 32]>),
|
||||
BlockHeight(Option<u64>),
|
||||
BlockKnown(BlockKnown),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error, PartialEq, Eq)]
|
||||
pub enum DatabaseError {}
|
21
p2p/sync-states/Cargo.toml
Normal file
21
p2p/sync-states/Cargo.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[package]
|
||||
name = "cuprate-sync-states"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
cuprate-common = {path = "../../common"}
|
||||
cuprate-peer = {path = "../peer"}
|
||||
cuprate-protocol = {path = "../protocol"}
|
||||
monero = {git="https://github.com/Boog900/monero-rs.git", branch="db", features=["database"]}
|
||||
monero-wire = {path= "../../net/monero-wire"}
|
||||
futures = "0.3.26"
|
||||
tower = {version = "0.4.13", features = ["util"]}
|
||||
thiserror = "1.0.39"
|
||||
|
||||
|
||||
tokio = {version="1.1", features=["full"]}
|
||||
tokio-util = {version ="0.7", features=["compat"]}
|
||||
|
538
p2p/sync-states/src/lib.rs
Normal file
538
p2p/sync-states/src/lib.rs
Normal file
|
@ -0,0 +1,538 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use futures::channel::mpsc;
|
||||
use futures::StreamExt;
|
||||
use monero::Hash;
|
||||
use thiserror::Error;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_common::{hardforks, HardForks};
|
||||
use cuprate_peer::connection::PeerSyncChange;
|
||||
use cuprate_protocol::temp_database::{
|
||||
BlockKnown, DataBaseRequest, DataBaseResponse, DatabaseError,
|
||||
};
|
||||
use cuprate_protocol::{InternalMessageRequest, InternalMessageResponse};
|
||||
use monero_wire::messages::protocol::ChainResponse;
|
||||
use monero_wire::messages::{ChainRequest, CoreSyncData};
|
||||
use monero_wire::{Message, NetworkAddress};
|
||||
|
||||
// TODO: Move this!!!!!!!
|
||||
// ********************************
|
||||
|
||||
pub enum PeerSetRequest {
|
||||
DisconnectPeer(NetworkAddress),
|
||||
BanPeer(NetworkAddress),
|
||||
SendRequest(InternalMessageRequest, Option<NetworkAddress>),
|
||||
}
|
||||
|
||||
pub struct PeerSetResponse {
|
||||
peer: NetworkAddress,
|
||||
response: Option<InternalMessageResponse>,
|
||||
}
|
||||
|
||||
// *******************************
|
||||
#[derive(Debug, Default)]
|
||||
pub struct IndividualPeerSync {
|
||||
height: u64,
|
||||
// no grantee this is the same block as height
|
||||
top_id: Hash,
|
||||
top_version: u8,
|
||||
cumulative_difficulty: u128,
|
||||
/// the height the list of needed blocks starts at
|
||||
start_height: u64,
|
||||
/// list of block hashes our node does not have.
|
||||
needed_blocks: Vec<(Hash, Option<u64>)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct PeersSyncData {
|
||||
peers: HashMap<NetworkAddress, IndividualPeerSync>,
|
||||
}
|
||||
|
||||
impl PeersSyncData {
|
||||
pub fn new_core_sync_data(
|
||||
&mut self,
|
||||
id: &NetworkAddress,
|
||||
core_sync: CoreSyncData,
|
||||
) -> Result<(), SyncStatesError> {
|
||||
let peer_data = self.peers.get_mut(&id);
|
||||
if peer_data.is_none() {
|
||||
let ips = IndividualPeerSync {
|
||||
height: core_sync.current_height,
|
||||
top_id: core_sync.top_id,
|
||||
top_version: core_sync.top_version,
|
||||
cumulative_difficulty: core_sync.cumulative_difficulty(),
|
||||
start_height: 0,
|
||||
needed_blocks: vec![],
|
||||
};
|
||||
self.peers.insert(*id, ips);
|
||||
} else {
|
||||
let peer_data = peer_data.unwrap();
|
||||
if peer_data.height > core_sync.current_height {
|
||||
return Err(SyncStatesError::PeersHeightHasDropped);
|
||||
}
|
||||
if peer_data.cumulative_difficulty > core_sync.cumulative_difficulty() {
|
||||
return Err(SyncStatesError::PeersCumulativeDifficultyDropped);
|
||||
}
|
||||
peer_data.height = core_sync.current_height;
|
||||
peer_data.cumulative_difficulty = core_sync.cumulative_difficulty();
|
||||
peer_data.top_id = core_sync.top_id;
|
||||
peer_data.top_version = core_sync.top_version;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new_chain_response(
|
||||
&mut self,
|
||||
id: &NetworkAddress,
|
||||
chain_response: ChainResponse,
|
||||
needed_blocks: Vec<(Hash, Option<u64>)>,
|
||||
) -> Result<(), SyncStatesError> {
|
||||
let peer_data = self
|
||||
.peers
|
||||
.get_mut(&id)
|
||||
.expect("Peers must give use their core sync before chain response");
|
||||
|
||||
// it's sad we have to do this so late in the response validation process
|
||||
if peer_data.height > chain_response.total_height {
|
||||
return Err(SyncStatesError::PeersHeightHasDropped);
|
||||
}
|
||||
if peer_data.cumulative_difficulty > chain_response.cumulative_difficulty() {
|
||||
return Err(SyncStatesError::PeersCumulativeDifficultyDropped);
|
||||
}
|
||||
|
||||
peer_data.cumulative_difficulty = chain_response.cumulative_difficulty();
|
||||
peer_data.height = chain_response.total_height;
|
||||
peer_data.start_height = chain_response.start_height
|
||||
+ chain_response.m_block_ids.len() as u64
|
||||
- needed_blocks.len() as u64;
|
||||
peer_data.needed_blocks = needed_blocks;
|
||||
Ok(())
|
||||
}
|
||||
// returns true if we have ran out of known blocks for that peer
|
||||
pub fn new_objects_response(
|
||||
&mut self,
|
||||
id: &NetworkAddress,
|
||||
mut block_ids: HashSet<Hash>,
|
||||
) -> Result<bool, SyncStatesError> {
|
||||
let peer_data = self
|
||||
.peers
|
||||
.get_mut(id)
|
||||
.expect("Peers must give use their core sync before objects response");
|
||||
let mut i = 0;
|
||||
if peer_data.needed_blocks.is_empty() {
|
||||
return Ok(true);
|
||||
}
|
||||
while !block_ids.contains(&peer_data.needed_blocks[i].0) {
|
||||
i += 1;
|
||||
if i == peer_data.needed_blocks.len() {
|
||||
peer_data.needed_blocks = vec![];
|
||||
peer_data.start_height = 0;
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
for _ in 0..block_ids.len() {
|
||||
if !block_ids.remove(&peer_data.needed_blocks[i].0) {
|
||||
return Err(SyncStatesError::PeerSentAnUnexpectedBlockId);
|
||||
}
|
||||
i += 1;
|
||||
if i == peer_data.needed_blocks.len() {
|
||||
peer_data.needed_blocks = vec![];
|
||||
peer_data.start_height = 0;
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
peer_data.needed_blocks = peer_data.needed_blocks[i..].to_vec();
|
||||
peer_data.start_height = peer_data.start_height + i as u64;
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
pub fn peer_disconnected(&mut self, id: &NetworkAddress) {
|
||||
let _ = self.peers.remove(id);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error, PartialEq, Eq)]
|
||||
pub enum SyncStatesError {
|
||||
#[error("Peer sent a block id we know is bad")]
|
||||
PeerSentKnownBadBlock,
|
||||
#[error("Peer sent a block id we weren't expecting")]
|
||||
PeerSentAnUnexpectedBlockId,
|
||||
#[error("Peer sent a chain entry where we don't know the start")]
|
||||
PeerSentNoneOverlappingFirstBlock,
|
||||
#[error("We have the peers block just at a different height")]
|
||||
WeHaveBlockAtDifferentHeight,
|
||||
#[error("The peer sent a top version we weren't expecting")]
|
||||
PeerSentBadTopVersion,
|
||||
#[error("The peer sent a weird pruning seed")]
|
||||
PeerSentBadPruningSeed,
|
||||
#[error("The peer height has dropped")]
|
||||
PeersHeightHasDropped,
|
||||
#[error("The peers cumulative difficulty has dropped")]
|
||||
PeersCumulativeDifficultyDropped,
|
||||
#[error("Our database returned an error: {0}")]
|
||||
DataBaseError(#[from] DatabaseError),
|
||||
}
|
||||
|
||||
pub struct SyncStates<Db> {
|
||||
peer_sync_rx: mpsc::Receiver<PeerSyncChange>,
|
||||
hardforks: HardForks,
|
||||
peer_sync_states: Arc<Mutex<PeersSyncData>>,
|
||||
blockchain: Db,
|
||||
}
|
||||
|
||||
impl<Db> SyncStates<Db>
|
||||
where
|
||||
Db: Service<DataBaseRequest, Response = DataBaseResponse, Error = DatabaseError>,
|
||||
{
|
||||
pub fn new(
|
||||
peer_sync_rx: mpsc::Receiver<PeerSyncChange>,
|
||||
hardforks: HardForks,
|
||||
peer_sync_states: Arc<Mutex<PeersSyncData>>,
|
||||
blockchain: Db,
|
||||
) -> Self {
|
||||
SyncStates {
|
||||
peer_sync_rx,
|
||||
hardforks,
|
||||
peer_sync_states,
|
||||
blockchain,
|
||||
}
|
||||
}
|
||||
async fn send_database_request(
|
||||
&mut self,
|
||||
req: DataBaseRequest,
|
||||
) -> Result<DataBaseResponse, DatabaseError> {
|
||||
let ready_blockchain = self.blockchain.ready().await?;
|
||||
ready_blockchain.call(req).await
|
||||
}
|
||||
|
||||
async fn handle_core_sync_change(
|
||||
&mut self,
|
||||
id: &NetworkAddress,
|
||||
core_sync: CoreSyncData,
|
||||
) -> Result<bool, SyncStatesError> {
|
||||
if core_sync.current_height > 0 {
|
||||
let version = self
|
||||
.hardforks
|
||||
.get_ideal_version_from_height(core_sync.current_height - 1);
|
||||
if version >= 6 && version != core_sync.top_version {
|
||||
return Err(SyncStatesError::PeerSentBadTopVersion);
|
||||
}
|
||||
}
|
||||
if core_sync.pruning_seed != 0 {
|
||||
let log_stripes =
|
||||
monero::database::pruning::get_pruning_log_stripes(core_sync.pruning_seed);
|
||||
let stripe =
|
||||
monero::database::pruning::get_pruning_stripe_for_seed(core_sync.pruning_seed);
|
||||
if stripe != monero::database::pruning::CRYPTONOTE_PRUNING_LOG_STRIPES
|
||||
|| stripe > (1 << log_stripes)
|
||||
{
|
||||
return Err(SyncStatesError::PeerSentBadPruningSeed);
|
||||
}
|
||||
}
|
||||
//if core_sync.current_height > max block numb
|
||||
let DataBaseResponse::BlockHeight(height) = self.send_database_request(DataBaseRequest::BlockHeight(core_sync.top_id)).await? else {
|
||||
unreachable!("the blockchain won't send the wrong response");
|
||||
};
|
||||
|
||||
let behind: bool;
|
||||
|
||||
if let Some(height) = height {
|
||||
if height != core_sync.current_height {
|
||||
return Err(SyncStatesError::WeHaveBlockAtDifferentHeight);
|
||||
}
|
||||
behind = false;
|
||||
} else {
|
||||
let DataBaseResponse::CumulativeDifficulty(cumulative_diff) = self.send_database_request(DataBaseRequest::CumulativeDifficulty).await? else {
|
||||
unreachable!("the blockchain won't send the wrong response");
|
||||
};
|
||||
// if their chain has more POW we want it
|
||||
if cumulative_diff < core_sync.cumulative_difficulty() {
|
||||
behind = true;
|
||||
} else {
|
||||
behind = false;
|
||||
}
|
||||
}
|
||||
|
||||
let mut sync_states = self.peer_sync_states.lock().unwrap();
|
||||
sync_states.new_core_sync_data(id, core_sync)?;
|
||||
|
||||
Ok(behind)
|
||||
}
|
||||
|
||||
async fn handle_chain_entry_response(
|
||||
&mut self,
|
||||
id: &NetworkAddress,
|
||||
chain_response: ChainResponse,
|
||||
) -> Result<(), SyncStatesError> {
|
||||
let mut expect_unknown = false;
|
||||
let mut needed_blocks = Vec::with_capacity(chain_response.m_block_ids.len());
|
||||
|
||||
for (index, block_id) in chain_response.m_block_ids.iter().enumerate() {
|
||||
let DataBaseResponse::BlockKnown(known) = self.send_database_request(DataBaseRequest::BlockKnown(*block_id)).await? else {
|
||||
unreachable!("the blockchain won't send the wrong response");
|
||||
};
|
||||
if index == 0 {
|
||||
if !known.is_known() {
|
||||
return Err(SyncStatesError::PeerSentNoneOverlappingFirstBlock);
|
||||
}
|
||||
} else {
|
||||
match known {
|
||||
BlockKnown::No => expect_unknown = true,
|
||||
BlockKnown::OnMainChain => {
|
||||
if expect_unknown {
|
||||
return Err(SyncStatesError::PeerSentAnUnexpectedBlockId);
|
||||
} else {
|
||||
let DataBaseResponse::BlockHeight(height) = self.send_database_request(DataBaseRequest::BlockHeight(*block_id)).await? else {
|
||||
unreachable!("the blockchain won't send the wrong response");
|
||||
};
|
||||
if chain_response.start_height + index as u64
|
||||
!= height.expect("We already know this block is in our main chain.")
|
||||
{
|
||||
return Err(SyncStatesError::WeHaveBlockAtDifferentHeight);
|
||||
}
|
||||
}
|
||||
}
|
||||
BlockKnown::OnSideChain => {
|
||||
if expect_unknown {
|
||||
return Err(SyncStatesError::PeerSentAnUnexpectedBlockId);
|
||||
}
|
||||
}
|
||||
BlockKnown::KnownBad => return Err(SyncStatesError::PeerSentKnownBadBlock),
|
||||
}
|
||||
}
|
||||
let block_weight = chain_response.m_block_weights.get(index).map(|f| f.clone());
|
||||
needed_blocks.push((*block_id, block_weight));
|
||||
}
|
||||
let mut sync_states = self.peer_sync_states.lock().unwrap();
|
||||
sync_states.new_chain_response(id, chain_response, needed_blocks)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_chain_request(&mut self) -> Result<ChainRequest, DatabaseError> {
|
||||
let DataBaseResponse::Chain(ids) = self.send_database_request(DataBaseRequest::Chain).await? else {
|
||||
unreachable!("the blockchain won't send the wrong response");
|
||||
};
|
||||
|
||||
Ok(ChainRequest {
|
||||
block_ids: ids,
|
||||
prune: false,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_peers_chain_entry<Svc>(
|
||||
&mut self,
|
||||
peer_set: &mut Svc,
|
||||
id: &NetworkAddress,
|
||||
) -> Result<ChainResponse, DatabaseError>
|
||||
where
|
||||
Svc: Service<PeerSetRequest, Response = PeerSetResponse, Error = DatabaseError>,
|
||||
{
|
||||
let chain_req = self.build_chain_request().await?;
|
||||
let ready_set = peer_set.ready().await.unwrap();
|
||||
let response: PeerSetResponse = ready_set
|
||||
.call(PeerSetRequest::SendRequest(
|
||||
Message::Notification(chain_req.into())
|
||||
.try_into()
|
||||
.expect("Chain request can always be converted to IMR"),
|
||||
Some(*id),
|
||||
))
|
||||
.await?;
|
||||
let InternalMessageResponse::ChainResponse(response) = response.response.expect("peer set will return a result for a chain request") else {
|
||||
unreachable!("peer set will return correct response");
|
||||
};
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn get_and_handle_chain_entry<Svc>(
|
||||
&mut self,
|
||||
peer_set: &mut Svc,
|
||||
id: NetworkAddress,
|
||||
) -> Result<(), SyncStatesError>
|
||||
where
|
||||
Svc: Service<PeerSetRequest, Response = PeerSetResponse, Error = DatabaseError>,
|
||||
{
|
||||
let chain_response = self.get_peers_chain_entry(peer_set, &id).await?;
|
||||
self.handle_chain_entry_response(&id, chain_response).await
|
||||
}
|
||||
|
||||
async fn handle_objects_response(
|
||||
&mut self,
|
||||
id: NetworkAddress,
|
||||
block_ids: Vec<Hash>,
|
||||
peers_height: u64,
|
||||
) -> Result<bool, SyncStatesError> {
|
||||
let mut sync_states = self.peer_sync_states.lock().unwrap();
|
||||
let ran_out_of_blocks =
|
||||
sync_states.new_objects_response(&id, HashSet::from_iter(block_ids))?;
|
||||
drop(sync_states);
|
||||
if ran_out_of_blocks {
|
||||
let DataBaseResponse::CurrentHeight(our_height) = self.send_database_request(DataBaseRequest::CurrentHeight).await? else {
|
||||
unreachable!("the blockchain won't send the wrong response");
|
||||
};
|
||||
if our_height < peers_height {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn handle_peer_disconnect(&mut self, id: NetworkAddress) {
|
||||
let mut sync_states = self.peer_sync_states.lock().unwrap();
|
||||
sync_states.peer_disconnected(&id);
|
||||
}
|
||||
|
||||
pub async fn run<Svc>(mut self, mut peer_set: Svc)
|
||||
where
|
||||
Svc: Service<PeerSetRequest, Response = PeerSetResponse, Error = DatabaseError>,
|
||||
{
|
||||
loop {
|
||||
let Some(change) = self.peer_sync_rx.next().await else {
|
||||
// is this best?
|
||||
return;
|
||||
};
|
||||
|
||||
match change {
|
||||
PeerSyncChange::CoreSyncData(id, csd) => {
|
||||
match self.handle_core_sync_change(&id, csd).await {
|
||||
Err(_) => {
|
||||
// TODO: check if error needs ban or forget
|
||||
let ready_set = peer_set.ready().await.unwrap();
|
||||
let res = ready_set.call(PeerSetRequest::BanPeer(id)).await;
|
||||
}
|
||||
Ok(request_chain) => {
|
||||
if request_chain {
|
||||
self.get_and_handle_chain_entry(&mut peer_set, id).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
PeerSyncChange::ObjectsResponse(id, block_ids, height) => {
|
||||
match self.handle_objects_response(id, block_ids, height).await {
|
||||
Err(_) => {
|
||||
// TODO: check if error needs ban or forget
|
||||
let ready_set = peer_set.ready().await.unwrap();
|
||||
let res = ready_set.call(PeerSetRequest::BanPeer(id)).await;
|
||||
}
|
||||
Ok(res) => {
|
||||
if res {
|
||||
self.get_and_handle_chain_entry(&mut peer_set, id).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
PeerSyncChange::PeerDisconnected(id) => {
|
||||
self.handle_peer_disconnect(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use monero::Hash;
|
||||
use monero_wire::messages::{ChainResponse, CoreSyncData};
|
||||
|
||||
use crate::{PeersSyncData, SyncStatesError};
|
||||
|
||||
#[test]
|
||||
fn peer_sync_data_good_core_sync() {
|
||||
let mut peer_sync_states = PeersSyncData::default();
|
||||
let core_sync = CoreSyncData::new(65346753, 1232, 389, Hash::null(), 1);
|
||||
|
||||
peer_sync_states
|
||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), core_sync)
|
||||
.unwrap();
|
||||
|
||||
let new_core_sync = CoreSyncData::new(65346754, 1233, 389, Hash::null(), 1);
|
||||
|
||||
peer_sync_states
|
||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), new_core_sync)
|
||||
.unwrap();
|
||||
|
||||
let peer = peer_sync_states
|
||||
.peers
|
||||
.get(&monero_wire::NetworkAddress::default())
|
||||
.unwrap();
|
||||
assert_eq!(peer.height, 1233);
|
||||
assert_eq!(peer.cumulative_difficulty, 65346754);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_sync_data_peer_height_dropped() {
|
||||
let mut peer_sync_states = PeersSyncData::default();
|
||||
let core_sync = CoreSyncData::new(65346753, 1232, 389, Hash::null(), 1);
|
||||
|
||||
peer_sync_states
|
||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), core_sync)
|
||||
.unwrap();
|
||||
|
||||
let new_core_sync = CoreSyncData::new(65346754, 1231, 389, Hash::null(), 1);
|
||||
|
||||
let res = peer_sync_states
|
||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), new_core_sync)
|
||||
.unwrap_err();
|
||||
|
||||
assert_eq!(res, SyncStatesError::PeersHeightHasDropped);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_sync_data_peer_cumulative_difficulty_dropped() {
|
||||
let mut peer_sync_states = PeersSyncData::default();
|
||||
let core_sync = CoreSyncData::new(65346753, 1232, 389, Hash::null(), 1);
|
||||
|
||||
peer_sync_states
|
||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), core_sync)
|
||||
.unwrap();
|
||||
|
||||
let new_core_sync = CoreSyncData::new(65346752, 1233, 389, Hash::null(), 1);
|
||||
|
||||
let res = peer_sync_states
|
||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), new_core_sync)
|
||||
.unwrap_err();
|
||||
|
||||
assert_eq!(res, SyncStatesError::PeersCumulativeDifficultyDropped);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_sync_new_chain_response() {
|
||||
let mut peer_sync_states = PeersSyncData::default();
|
||||
let core_sync = CoreSyncData::new(65346753, 1232, 389, Hash::null(), 1);
|
||||
|
||||
peer_sync_states
|
||||
.new_core_sync_data(&monero_wire::NetworkAddress::default(), core_sync)
|
||||
.unwrap();
|
||||
|
||||
let chain_response = ChainResponse::new(
|
||||
10,
|
||||
1233,
|
||||
65346754,
|
||||
vec![Hash::new(&[1]), Hash::new(&[2])],
|
||||
vec![],
|
||||
vec![],
|
||||
);
|
||||
|
||||
let needed_blocks = vec![(Hash::new(&[2]), None)];
|
||||
|
||||
peer_sync_states
|
||||
.new_chain_response(
|
||||
&monero_wire::NetworkAddress::default(),
|
||||
chain_response,
|
||||
needed_blocks,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let peer = peer_sync_states
|
||||
.peers
|
||||
.get(&monero_wire::NetworkAddress::default())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(peer.start_height, 11);
|
||||
assert_eq!(peer.height, 1233);
|
||||
assert_eq!(peer.cumulative_difficulty, 65346754);
|
||||
assert_eq!(peer.needed_blocks, vec![(Hash::new(&[2]), None)]);
|
||||
}
|
||||
}
|
109
p2p/sync-states/tests/mod.rs
Normal file
109
p2p/sync-states/tests/mod.rs
Normal file
|
@ -0,0 +1,109 @@
|
|||
use std::{
|
||||
pin::Pin,
|
||||
str::FromStr,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
use cuprate_common::{HardForks, Network};
|
||||
use cuprate_peer::PeerError;
|
||||
use cuprate_protocol::{
|
||||
temp_database::{BlockKnown, DataBaseRequest, DataBaseResponse, DatabaseError},
|
||||
Direction, InternalMessageRequest, InternalMessageResponse,
|
||||
};
|
||||
use cuprate_sync_states::SyncStates;
|
||||
use futures::{channel::mpsc, Future, FutureExt};
|
||||
use monero::Hash;
|
||||
use monero_wire::messages::{admin::HandshakeResponse, CoreSyncData};
|
||||
use tower::ServiceExt;
|
||||
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
|
||||
struct TestBlockchain;
|
||||
|
||||
impl tower::Service<DataBaseRequest> for TestBlockchain {
|
||||
type Error = DatabaseError;
|
||||
type Response = DataBaseResponse;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
std::task::Poll::Ready(Ok(()))
|
||||
}
|
||||
fn call(&mut self, req: DataBaseRequest) -> Self::Future {
|
||||
let res = match req {
|
||||
DataBaseRequest::BlockHeight(h) => DataBaseResponse::BlockHeight(Some(221)),
|
||||
DataBaseRequest::BlockKnown(_) => DataBaseResponse::BlockKnown(BlockKnown::OnMainChain),
|
||||
DataBaseRequest::Chain => todo!(),
|
||||
DataBaseRequest::CoreSyncData => {
|
||||
DataBaseResponse::CoreSyncData(CoreSyncData::new(0, 0, 0, Hash::null(), 0))
|
||||
}
|
||||
DataBaseRequest::CumulativeDifficulty => DataBaseResponse::CumulativeDifficulty(0),
|
||||
DataBaseRequest::CurrentHeight => DataBaseResponse::CurrentHeight(0),
|
||||
};
|
||||
|
||||
async { Ok(res) }.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct TestPeerRequest;
|
||||
|
||||
impl tower::Service<InternalMessageRequest> for TestPeerRequest {
|
||||
type Error = PeerError;
|
||||
type Response = InternalMessageResponse;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
todo!()
|
||||
}
|
||||
fn call(&mut self, req: InternalMessageRequest) -> Self::Future {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_p2p_conn() {
|
||||
let conf = cuprate_peer::handshaker::NetworkConfig::default();
|
||||
let (addr_tx, addr_rx) = mpsc::channel(21);
|
||||
let (sync_tx, sync_rx) = mpsc::channel(21);
|
||||
let peer_sync_states = Arc::new(Mutex::default());
|
||||
|
||||
let peer_sync_states = SyncStates::new(
|
||||
sync_rx,
|
||||
HardForks::new(Network::MainNet),
|
||||
peer_sync_states,
|
||||
TestBlockchain,
|
||||
);
|
||||
|
||||
let mut handshaker = cuprate_peer::handshaker::Handshaker::new(
|
||||
conf,
|
||||
addr_tx,
|
||||
TestBlockchain,
|
||||
sync_tx,
|
||||
TestPeerRequest.boxed_clone(),
|
||||
);
|
||||
|
||||
let soc = tokio::net::TcpSocket::new_v4().unwrap();
|
||||
let addr = std::net::SocketAddr::from_str("127.0.0.1:18080").unwrap();
|
||||
|
||||
let mut con = soc.connect(addr).await.unwrap();
|
||||
|
||||
let (r_h, w_h) = con.split();
|
||||
|
||||
let (client, conn) = handshaker
|
||||
.complete_handshake(
|
||||
r_h.compat(),
|
||||
w_h.compat_write(),
|
||||
Direction::Outbound,
|
||||
monero_wire::NetworkAddress::default(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
//conn.run().await;
|
||||
}
|
|
@ -120,4 +120,4 @@ If you wish to contact contributors privately, you can import our pgp keys from
|
|||
|
||||
We're working on Cuprate in our free time, it take times & effort to make progress. We greatly appreciate your support, it really means a lot and encourage us to continue. If you wanna buy us a coffee (or tea for some of us) you can send your kindness at this address : </br><p align=center><strong>82rrTEtqbEa7GJkk7WeRXn67wC3acqG5mc7k6ce1b37jTdv5uM15gJa3vw7s4fDuA31BEufjBj2DzZUb42UqBaP23APEujL</strong></p>
|
||||
|
||||
<div align=center><img src="https://github.com/Cuprate/cuprate/raw/best-readme-ever/qr-code.png"></img></div>
|
||||
<div align=center><img src="https://raw.githubusercontent.com/Cuprate/cuprate/main/qr-code.png"></img></div>
|
||||
|
|
Loading…
Reference in a new issue