diff --git a/Cargo.toml b/Cargo.toml
index c78d4bc5..0012894f 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -18,10 +18,13 @@ authors=[
[workspace]
members = [
- "cuprate",
+ "common",
+ "cuprate",
"database",
"net/levin",
- "net/monero-wire"
+ "net/monero-wire",
+ "p2p",
+ # "p2p/sync-states"
]
[workspace.dependencies]
diff --git a/clippy.toml b/clippy.toml
deleted file mode 100644
index 5f5ce64e..00000000
--- a/clippy.toml
+++ /dev/null
@@ -1,2 +0,0 @@
-avoid-breaking-exported-api = false
-msrv = "1.67.0"
\ No newline at end of file
diff --git a/common/Cargo.toml b/common/Cargo.toml
new file mode 100644
index 00000000..b46325c2
--- /dev/null
+++ b/common/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "cuprate-common"
+version = "0.1.0"
+edition = "2021"
+license = "AGPL-3.0-only"
+authors = ["Boog900"]
+
+
+[dependencies]
+chrono = "0.4.24"
+thiserror = "1"
\ No newline at end of file
diff --git a/common/LICENSE b/common/LICENSE
new file mode 100644
index 00000000..e19903e6
--- /dev/null
+++ b/common/LICENSE
@@ -0,0 +1,14 @@
+ Copyright (C) 2023 Cuprate Contributors
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
\ No newline at end of file
diff --git a/common/src/hardforks.rs b/common/src/hardforks.rs
new file mode 100644
index 00000000..ca014cb8
--- /dev/null
+++ b/common/src/hardforks.rs
@@ -0,0 +1,250 @@
+use chrono::NaiveDateTime;
+
+use crate::network::Network;
+
+// this function blindly unwraps
+// SAFETY: only call when you know the timestamp is good
+fn time_from_timestamp(stamp: i64) -> NaiveDateTime {
+ NaiveDateTime::from_timestamp_opt(stamp, 0).unwrap()
+}
+
+fn get_hard_forks(network: Network) -> [(u8, u64, NaiveDateTime); 16] {
+ match network {
+ Network::MainNet => {
+ [
+ // | version | Height | TimeStamp | *timestamp is when fork height was decided
+ (1, 1, time_from_timestamp(1341378000)),
+ (2, 1009827, time_from_timestamp(1442763710)),
+ (3, 1141317, time_from_timestamp(1458558528)),
+ (4, 1220516, time_from_timestamp(1483574400)),
+ (5, 1288616, time_from_timestamp(1489520158)),
+ (6, 1400000, time_from_timestamp(1503046577)),
+ (7, 1546000, time_from_timestamp(1521303150)),
+ (8, 1685555, time_from_timestamp(1535889547)),
+ (9, 1686275, time_from_timestamp(1535889548)),
+ (10, 1788000, time_from_timestamp(1549792439)),
+ (11, 1788720, time_from_timestamp(1550225678)),
+ (12, 1978433, time_from_timestamp(1571419280)),
+ (13, 2210000, time_from_timestamp(1598180817)),
+ (14, 2210720, time_from_timestamp(1598180818)),
+ (15, 2688888, time_from_timestamp(1656629117)),
+ (16, 2689608, time_from_timestamp(1656629118)),
+ ]
+ }
+ Network::TestNet => [
+ (1, 1, time_from_timestamp(1341378000)),
+ (2, 624634, time_from_timestamp(1445355000)),
+ (3, 800500, time_from_timestamp(1472415034)),
+ (4, 801219, time_from_timestamp(1472415035)),
+ (5, 802660, time_from_timestamp(1472415036 + 86400 * 180)),
+ (6, 971400, time_from_timestamp(1501709789)),
+ (7, 1057027, time_from_timestamp(1512211236)),
+ (8, 1057058, time_from_timestamp(1533211200)),
+ (9, 1057778, time_from_timestamp(1533297600)),
+ (10, 1154318, time_from_timestamp(1550153694)),
+ (11, 1155038, time_from_timestamp(1550225678)),
+ (12, 1308737, time_from_timestamp(1569582000)),
+ (13, 1543939, time_from_timestamp(1599069376)),
+ (14, 1544659, time_from_timestamp(1599069377)),
+ (15, 1982800, time_from_timestamp(1652727000)),
+ (16, 1983520, time_from_timestamp(1652813400)),
+ ],
+ Network::StageNet => [
+ (1, 1, time_from_timestamp(1341378000)),
+ (2, 32000, time_from_timestamp(1521000000)),
+ (3, 33000, time_from_timestamp(1521120000)),
+ (4, 34000, time_from_timestamp(1521240000)),
+ (5, 35000, time_from_timestamp(1521360000)),
+ (6, 36000, time_from_timestamp(1521480000)),
+ (7, 37000, time_from_timestamp(1521600000)),
+ (8, 176456, time_from_timestamp(1537821770)),
+ (9, 177176, time_from_timestamp(1537821771)),
+ (10, 269000, time_from_timestamp(1550153694)),
+ (11, 269720, time_from_timestamp(1550225678)),
+ (12, 454721, time_from_timestamp(1571419280)),
+ (13, 675405, time_from_timestamp(1598180817)),
+ (14, 676125, time_from_timestamp(1598180818)),
+ (15, 1151000, time_from_timestamp(1656629117)),
+ (16, 1151720, time_from_timestamp(1656629118)),
+ ],
+ }
+}
+
+pub struct HardForks {
+ hard_forks: [(u8, u64, NaiveDateTime); 16],
+}
+
+impl HardForks {
+ pub fn new(network: Network) -> Self {
+ HardForks {
+ hard_forks: get_hard_forks(network),
+ }
+ }
+
+ pub fn get_ideal_version_from_height(&self, height: u64) -> u8 {
+ for hf in self.hard_forks.iter().rev() {
+ if height >= hf.1 {
+ return hf.0;
+ }
+ }
+ 0
+ }
+
+ pub fn get_earliest_ideal_height_for_version(&self, version: u8) -> Option {
+ if self.hard_forks.len() < version as usize {
+ None
+ } else if version == 0 {
+ Some(0)
+ } else {
+ Some(self.hard_forks[(version - 1) as usize].1)
+ }
+ }
+
+ pub fn get_ideal_version(&self) -> u8 {
+ self.hard_forks.last().expect("This is not empty").0
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::network::Network;
+
+ use super::HardForks;
+
+ const MAIN_NET_FORKS: [u64; 16] = [
+ 1, 1009827, 1141317, 1220516, 1288616, 1400000, 1546000, 1685555, 1686275, 1788000,
+ 1788720, 1978433, 2210000, 2210720, 2688888, 2689608,
+ ];
+ const TEST_NET_FORKS: [u64; 16] = [
+ 1, 624634, 800500, 801219, 802660, 971400, 1057027, 1057058, 1057778, 1154318, 1155038,
+ 1308737, 1543939, 1544659, 1982800, 1983520,
+ ];
+ const STAGE_NET_FORKS: [u64; 16] = [
+ 1, 32000, 33000, 34000, 35000, 36000, 37000, 176456, 177176, 269000, 269720, 454721,
+ 675405, 676125, 1151000, 1151720,
+ ];
+
+ #[test]
+ fn get_ideal_version() {
+ let hardforks = HardForks::new(Network::MainNet);
+
+ let version = hardforks.get_ideal_version();
+ assert_eq!(version as usize, MAIN_NET_FORKS.len());
+ assert_eq!(version as usize, TEST_NET_FORKS.len());
+ assert_eq!(version as usize, STAGE_NET_FORKS.len());
+
+ let height = hardforks
+ .get_earliest_ideal_height_for_version(version)
+ .unwrap();
+ let got_version = hardforks.get_ideal_version_from_height(height);
+ assert_eq!(version, got_version);
+ }
+
+ #[test]
+ fn get_earliest_ideal_height_for_version_mainnet() {
+ let hardforks = HardForks::new(Network::MainNet);
+
+ for (height, version) in MAIN_NET_FORKS.iter().zip(1..MAIN_NET_FORKS.len() as u8) {
+ assert_eq!(
+ hardforks
+ .get_earliest_ideal_height_for_version(version)
+ .unwrap(),
+ *height
+ );
+ assert_eq!(
+ hardforks
+ .get_earliest_ideal_height_for_version(version)
+ .unwrap(),
+ *height
+ );
+ }
+ assert!(hardforks
+ .get_earliest_ideal_height_for_version(MAIN_NET_FORKS.len() as u8 + 1)
+ .is_none())
+ }
+
+ #[test]
+ fn get_earliest_ideal_height_for_version_testnet() {
+ let hardforks = HardForks::new(Network::TestNet);
+
+ for (height, version) in TEST_NET_FORKS.iter().zip(1..TEST_NET_FORKS.len() as u8) {
+ assert_eq!(
+ hardforks
+ .get_earliest_ideal_height_for_version(version)
+ .unwrap(),
+ *height
+ );
+ assert_eq!(
+ hardforks
+ .get_earliest_ideal_height_for_version(version)
+ .unwrap(),
+ *height
+ );
+ }
+ assert!(hardforks
+ .get_earliest_ideal_height_for_version(TEST_NET_FORKS.len() as u8 + 1)
+ .is_none())
+ }
+
+ #[test]
+ fn get_earliest_ideal_height_for_version_stagenet() {
+ let hardforks = HardForks::new(Network::StageNet);
+
+ for (height, version) in STAGE_NET_FORKS.iter().zip(1..STAGE_NET_FORKS.len() as u8) {
+ assert_eq!(
+ hardforks
+ .get_earliest_ideal_height_for_version(version)
+ .unwrap(),
+ *height
+ );
+ assert_eq!(
+ hardforks
+ .get_earliest_ideal_height_for_version(version)
+ .unwrap(),
+ *height
+ );
+ }
+ assert!(hardforks
+ .get_earliest_ideal_height_for_version(STAGE_NET_FORKS.len() as u8 + 1)
+ .is_none())
+ }
+
+ #[test]
+ fn get_ideal_version_from_height_mainnet() {
+ let hardforks = HardForks::new(Network::MainNet);
+
+ for (height, version) in MAIN_NET_FORKS.iter().zip(1..MAIN_NET_FORKS.len() as u8) {
+ assert_eq!(hardforks.get_ideal_version_from_height(*height), version);
+ assert_eq!(
+ hardforks.get_ideal_version_from_height(*height - 1),
+ version - 1
+ );
+ }
+ }
+
+ #[test]
+ fn get_ideal_version_from_height_testnet() {
+ let hardforks = HardForks::new(Network::TestNet);
+
+ for (height, version) in TEST_NET_FORKS.iter().zip(1..TEST_NET_FORKS.len() as u8) {
+ assert_eq!(hardforks.get_ideal_version_from_height(*height), version);
+ assert_eq!(
+ hardforks.get_ideal_version_from_height(*height - 1),
+ version - 1
+ );
+ }
+ }
+
+ #[test]
+ fn get_ideal_version_from_height_stagenet() {
+ let hardforks = HardForks::new(Network::StageNet);
+
+ for (height, version) in STAGE_NET_FORKS.iter().zip(1..STAGE_NET_FORKS.len() as u8) {
+ assert_eq!(hardforks.get_ideal_version_from_height(*height), version);
+ assert_eq!(
+ hardforks.get_ideal_version_from_height(*height - 1),
+ version - 1
+ );
+ }
+ }
+}
diff --git a/common/src/lib.rs b/common/src/lib.rs
new file mode 100644
index 00000000..14fa114b
--- /dev/null
+++ b/common/src/lib.rs
@@ -0,0 +1,14 @@
+pub mod hardforks;
+pub mod network;
+pub mod pruning;
+
+pub use hardforks::HardForks;
+pub use network::Network;
+pub use pruning::{PruningError, PruningSeed};
+
+pub const CRYPTONOTE_MAX_BLOCK_NUMBER: u64 = 500000000;
+
+// pruning
+pub const CRYPTONOTE_PRUNING_LOG_STRIPES: u32 = 3;
+pub const CRYPTONOTE_PRUNING_STRIPE_SIZE: u64 = 4096;
+pub const CRYPTONOTE_PRUNING_TIP_BLOCKS: u64 = 5500;
diff --git a/common/src/network.rs b/common/src/network.rs
new file mode 100644
index 00000000..0e6c4e1e
--- /dev/null
+++ b/common/src/network.rs
@@ -0,0 +1,26 @@
+const MAINNET_NETWORK_ID: [u8; 16] = [
+ 0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x10,
+];
+const TESTNET_NETWORK_ID: [u8; 16] = [
+ 0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x11,
+];
+const STAGENET_NETWORK_ID: [u8; 16] = [
+ 0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x12,
+];
+
+#[derive(Debug, Clone, Copy)]
+pub enum Network {
+ MainNet,
+ TestNet,
+ StageNet,
+}
+
+impl Network {
+ pub fn network_id(&self) -> [u8; 16] {
+ match self {
+ Network::MainNet => MAINNET_NETWORK_ID,
+ Network::TestNet => TESTNET_NETWORK_ID,
+ Network::StageNet => STAGENET_NETWORK_ID,
+ }
+ }
+}
diff --git a/common/src/pruning.rs b/common/src/pruning.rs
new file mode 100644
index 00000000..13e85506
--- /dev/null
+++ b/common/src/pruning.rs
@@ -0,0 +1,406 @@
+//! # Pruning Mechanism for Monero
+//!
+//! This module provides an implementation of the pruning mechanism used in Monero.
+//! The main data structure, `PruningSeed`, encapsulates the logic for creating and manipulating pruning seeds,
+//! which determine the set of blocks to be pruned from the blockchain.
+//!
+//! `PruningSeed` also contains a method for checking if a pruning seed is valid for Monero rules (must only be
+//! split into 8 parts):
+//!
+//! ```rust
+//! use cuprate_common::pruning::PruningSeed;
+//!
+//! let seed: u32 = 386; // the seed you wan't to check is valid
+//! match PruningSeed::try_from(seed) {
+//! Ok(seed) => seed, // seed is valid
+//! Err(e) => panic!("seed is invalid")
+//! };
+//! ```
+//!
+
+use thiserror::Error;
+
+use super::{
+ CRYPTONOTE_MAX_BLOCK_NUMBER, CRYPTONOTE_PRUNING_LOG_STRIPES, CRYPTONOTE_PRUNING_STRIPE_SIZE,
+ CRYPTONOTE_PRUNING_TIP_BLOCKS,
+};
+
+const PRUNING_SEED_LOG_STRIPES_SHIFT: u32 = 7;
+const PRUNING_SEED_STRIPE_SHIFT: u32 = 0;
+const PRUNING_SEED_LOG_STRIPES_MASK: u32 = 0x7;
+const PRUNING_SEED_STRIPE_MASK: u32 = 127;
+
+#[derive(Debug, Error)]
+pub enum PruningError {
+ #[error("log_stripes is out of range")]
+ LogStripesOutOfRange,
+ #[error("Stripe is out of range")]
+ StripeOutOfRange,
+ #[error("The block height is greater than `CRYPTONOTE_MAX_BLOCK_NUMBER`")]
+ BlockHeightTooLarge,
+ #[error("The blockchain height is greater than `CRYPTONOTE_MAX_BLOCK_NUMBER`")]
+ BlockChainHeightTooLarge,
+ #[error("The calculated height is smaller than the block height entered")]
+ CalculatedHeightSmallerThanEnteredBlock,
+ #[error("The entered seed has incorrect log stripes")]
+ SeedDoesNotHaveCorrectLogStripes,
+}
+
+/// A Monero pruning seed which has methods to get the next pruned/ unpruned block.
+///
+// Internally we use an Option to represent if a pruning seed is 0 (None)which means
+// no pruning will take place.
+pub struct PruningSeed(Option);
+
+impl PruningSeed {
+ /// Creates a new pruning seed from a `stripe` and `log_stripes`
+ ///
+ /// ### What is a `stripe`
+ ///
+ /// A stripe is the part of the blockchain this peer will keep.
+ ///
+ /// Monero, when pruning, will split the blockchain into multiple
+ /// "stripes", that amount is currently 8 and each pruned peer
+ /// will keep one of those 8 stripes.
+ ///
+ /// ### What is `log_stripes`
+ ///
+ /// `log_stripes` is log2 of the amount of stripes used.
+ ///
+ /// For Monero, currently, that amount is 8 so `log_stripes` will
+ /// be 3.
+ ///
+ /// ---------------------------------------------------------------
+ ///
+ /// *note this function allows you to make invalid seeds, this is done
+ /// to allow the specifics of pruning to change in the future. To make
+ /// a valid seed you currently MUST pass in a number 1 to 8 for `stripe`
+ /// and 3 for `log_stripes`.*
+ ///
+ pub fn new(stripe: u32, log_stripes: u32) -> Result {
+ if !(log_stripes <= PRUNING_SEED_LOG_STRIPES_MASK) {
+ Err(PruningError::LogStripesOutOfRange)
+ } else if !(stripe > 0 && stripe <= (1 << log_stripes)) {
+ Err(PruningError::StripeOutOfRange)
+ } else {
+ Ok(PruningSeed(Some(
+ (log_stripes << PRUNING_SEED_LOG_STRIPES_SHIFT)
+ | ((stripe - 1) << PRUNING_SEED_STRIPE_SHIFT),
+ )))
+ }
+ }
+
+ // Gets log2 of the total amount of stripes this seed is using.
+ fn get_log_stripes(&self) -> Option {
+ let seed: u32 = self.0?;
+ Some((seed >> PRUNING_SEED_LOG_STRIPES_SHIFT) & PRUNING_SEED_LOG_STRIPES_MASK)
+ }
+
+ // Gets the specific stripe of this seed.
+ fn get_stripe(&self) -> Option {
+ let seed: u32 = self.0?;
+ Some(1 + ((seed >> PRUNING_SEED_STRIPE_SHIFT) & PRUNING_SEED_STRIPE_MASK))
+ }
+
+ /// Gets the next unpruned block for a given `block_height` and `blockchain_height`
+ ///
+ /// Each seed will store, in a cyclic manner, a portion of blocks while discarding
+ /// the ones that are out of your stripe. This function is finding the next height
+ /// for which a specific seed will start storing blocks.
+ ///
+ /// ### Errors
+ ///
+ /// This function will return an Error if the inputted `block_height` or
+ /// `blockchain_height` is greater than [`CRYPTONOTE_MAX_BLOCK_NUMBER`].
+ ///
+ /// This function will also error if `block_height` > `blockchain_height`
+ ///
+ pub fn get_next_unpruned_block(
+ &self,
+ block_height: u64,
+ blockchain_height: u64,
+ ) -> Result {
+ if block_height > CRYPTONOTE_MAX_BLOCK_NUMBER || block_height > blockchain_height {
+ Err(PruningError::BlockHeightTooLarge)
+ } else if blockchain_height > CRYPTONOTE_MAX_BLOCK_NUMBER {
+ Err(PruningError::BlockChainHeightTooLarge)
+ } else {
+ let Some(seed_stripe) = self.get_stripe() else {
+ // If the `get_stripe` returns None that means no pruning so the next
+ // unpruned block is the one inputted.
+ return Ok(block_height);
+ };
+ if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
+ // If we are within `CRYPTONOTE_PRUNING_TIP_BLOCKS` of the chain we should
+ // not prune blocks.
+ return Ok(block_height);
+ }
+ let seed_log_stripes = self
+ .get_log_stripes()
+ .unwrap_or(CRYPTONOTE_PRUNING_LOG_STRIPES);
+ let block_pruning_stripe = get_block_pruning_stripe(block_height, blockchain_height, seed_log_stripes)
+ .expect("We just checked if `block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height`");
+ if seed_stripe == block_pruning_stripe {
+ // if we have the same stripe as a block that means we keep the block so
+ // the entered block is the next un-pruned one.
+ return Ok(block_height);
+ }
+
+ // cycles: how many times each seed has stored blocks so when all seeds have
+ // stored blocks thats 1 cycle
+ let cycles = (block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) >> seed_log_stripes;
+ // if our seed is before the blocks seed in a cycle that means we have already past our
+ // seed this cycle and need to start the next
+ let cycles_start = cycles
+ + if seed_stripe > block_pruning_stripe {
+ 0
+ } else {
+ 1
+ };
+
+ // amt_of_cycles * blocks in a cycle + how many blocks through a cycles until the seed starts storing blocks
+ let calculated_height = cycles_start
+ * (CRYPTONOTE_PRUNING_STRIPE_SIZE << seed_log_stripes)
+ + (seed_stripe as u64 - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE;
+ if calculated_height + CRYPTONOTE_PRUNING_TIP_BLOCKS > blockchain_height {
+ // if our calculated height is greater than the amount of tip blocks the the start of the tip blocks will be the next un-pruned
+ return Ok(blockchain_height.saturating_sub(CRYPTONOTE_PRUNING_TIP_BLOCKS));
+ }
+ if calculated_height < block_height {
+ return Err(PruningError::CalculatedHeightSmallerThanEnteredBlock);
+ }
+ Ok(calculated_height)
+ }
+ }
+
+ /// Gets the next pruned block for a given `block_height` and `blockchain_height`
+ ///
+ /// Each seed will store, in a cyclic manner, a portion of blocks while discarding
+ /// the ones that are out of your stripe. This function is finding the next height
+ /// for which a specific seed will start pruning blocks.
+ ///
+ /// ### Errors
+ ///
+ /// This function will return an Error if the inputted `block_height` or
+ /// `blockchain_height` is greater than [`CRYPTONOTE_MAX_BLOCK_NUMBER`].
+ ///
+ /// This function will also error if `block_height` > `blockchain_height`
+ ///
+ pub fn get_next_pruned_block(
+ &self,
+ block_height: u64,
+ blockchain_height: u64,
+ ) -> Result {
+ let Some(seed_stripe) = self.get_stripe() else {
+ // If the `get_stripe` returns None that means no pruning so the next
+ // pruned block is nonexistent so we return the blockchain_height.
+ return Ok(blockchain_height);
+ };
+ if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
+ // If we are within `CRYPTONOTE_PRUNING_TIP_BLOCKS` of the chain we should
+ // not prune blocks.
+ return Ok(blockchain_height);
+ }
+ let seed_log_stripes = self
+ .get_log_stripes()
+ .unwrap_or(CRYPTONOTE_PRUNING_LOG_STRIPES);
+ let block_pruning_stripe = get_block_pruning_stripe(block_height, blockchain_height, seed_log_stripes)
+ .expect("We just checked if `block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height`");
+ if seed_stripe != block_pruning_stripe {
+ // if our stripe != the blocks stripe that means we prune that block
+ return Ok(block_height);
+ }
+
+ // We can get the end of our "non-pruning" cycle by getting the next stripe's after us first un-pruned block height
+ // so we calculate the next un-pruned block for the next stripe and return it as our next pruned block
+ let next_stripe = (1 + seed_log_stripes) & ((1 << seed_log_stripes) - 1);
+ let seed = PruningSeed::new(next_stripe, seed_log_stripes)?;
+ seed.get_next_unpruned_block(block_height, blockchain_height)
+ }
+}
+
+impl TryFrom for PruningSeed {
+ type Error = PruningError;
+
+ fn try_from(value: u32) -> Result {
+ if value == 0 {
+ Ok(PruningSeed(None))
+ } else {
+ let seed = Self(Some(value));
+ let log_stripes = seed.get_log_stripes().expect("This will only return None if the inner value is None which will only happen if the seed is 0 but we checked for that");
+ if log_stripes != CRYPTONOTE_PRUNING_LOG_STRIPES {
+ return Err(PruningError::SeedDoesNotHaveCorrectLogStripes);
+ }
+ if seed.get_stripe().expect("same as above") > (1 << log_stripes) {
+ return Err(PruningError::StripeOutOfRange);
+ }
+ Ok(seed)
+ }
+ }
+}
+
+fn get_block_pruning_stripe(
+ block_height: u64,
+ blockchain_height: u64,
+ log_stripe: u32,
+) -> Option {
+ if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
+ None
+ } else {
+ Some(
+ (((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as u64 - 1)) + 1)
+ as u32, // it's trivial to prove it's ok to us `as` here
+ )
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::pruning::{get_block_pruning_stripe, PruningSeed};
+
+ use super::CRYPTONOTE_PRUNING_LOG_STRIPES;
+
+ fn make_all_pruning_seeds() -> Vec {
+ let possible_stripes = 1..(1 << CRYPTONOTE_PRUNING_LOG_STRIPES);
+ possible_stripes
+ .map(|stripe| PruningSeed::new(stripe, CRYPTONOTE_PRUNING_LOG_STRIPES).unwrap())
+ .collect()
+ }
+
+ #[test]
+ fn from_u32_for_pruning_seed() {
+ let good_seeds = 384..=391;
+ for seed in good_seeds {
+ assert!(PruningSeed::try_from(seed).is_ok());
+ }
+ let bad_seeds = [383, 392];
+ for seed in bad_seeds {
+ assert!(PruningSeed::try_from(seed).is_err());
+ }
+ }
+
+ #[test]
+ fn make_invalid_pruning_seeds() {
+ let invalid_stripes = [0, (1 << CRYPTONOTE_PRUNING_LOG_STRIPES) + 1];
+
+ for stripe in invalid_stripes {
+ assert!(PruningSeed::new(stripe, CRYPTONOTE_PRUNING_LOG_STRIPES).is_err());
+ }
+ }
+
+ #[test]
+ fn get_pruning_log_stripe() {
+ let all_valid_seeds = make_all_pruning_seeds();
+ for seed in all_valid_seeds.iter() {
+ assert_eq!(seed.get_log_stripes().unwrap(), 3)
+ }
+ }
+
+ #[test]
+ fn get_pruning_stripe() {
+ let all_valid_seeds = make_all_pruning_seeds();
+ for (i, seed) in all_valid_seeds.iter().enumerate() {
+ assert_eq!(seed.get_stripe().unwrap(), i as u32 + 1)
+ }
+ }
+
+ #[test]
+ fn blocks_pruning_stripe() {
+ let blockchain_height = 76437863;
+
+ for i in 0_u32..8 {
+ assert_eq!(
+ get_block_pruning_stripe(
+ (i * 4096) as u64,
+ blockchain_height,
+ CRYPTONOTE_PRUNING_LOG_STRIPES
+ )
+ .unwrap(),
+ i + 1
+ );
+ }
+
+ for i in 0_u32..8 {
+ assert_eq!(
+ get_block_pruning_stripe(
+ 32768 + (i * 4096) as u64,
+ blockchain_height,
+ CRYPTONOTE_PRUNING_LOG_STRIPES
+ )
+ .unwrap(),
+ i + 1
+ );
+ }
+
+ for i in 1_u32..8 {
+ assert_eq!(
+ get_block_pruning_stripe(
+ 32767 + (i * 4096) as u64,
+ blockchain_height,
+ CRYPTONOTE_PRUNING_LOG_STRIPES
+ )
+ .unwrap(),
+ i
+ );
+ }
+
+ // Block shouldn't be pruned
+ assert!(get_block_pruning_stripe(
+ blockchain_height - 5500,
+ blockchain_height,
+ CRYPTONOTE_PRUNING_LOG_STRIPES
+ )
+ .is_none());
+ }
+
+ #[test]
+ fn next_unpruned_block() {
+ let all_valid_seeds = make_all_pruning_seeds();
+ let blockchain_height = 76437863;
+
+ for (i, seed) in all_valid_seeds.iter().enumerate() {
+ assert_eq!(
+ seed.get_next_unpruned_block(0, blockchain_height).unwrap(),
+ i as u64 * 4096
+ )
+ }
+
+ for (i, seed) in all_valid_seeds.iter().enumerate() {
+ assert_eq!(
+ seed.get_next_unpruned_block((i as u64 + 1) * 4096, blockchain_height)
+ .unwrap(),
+ i as u64 * 4096 + 32768
+ )
+ }
+
+ for (i, seed) in all_valid_seeds.iter().enumerate() {
+ assert_eq!(
+ seed.get_next_unpruned_block((i as u64 + 8) * 4096, blockchain_height)
+ .unwrap(),
+ i as u64 * 4096 + 32768
+ )
+ }
+
+ for (_, seed) in all_valid_seeds.iter().enumerate() {
+ assert_eq!(
+ seed.get_next_unpruned_block(76437863 - 1, blockchain_height)
+ .unwrap(),
+ 76437863 - 1
+ )
+ }
+
+ let zero_seed = PruningSeed(None);
+
+ assert_eq!(
+ zero_seed.get_next_unpruned_block(33443, 5565445).unwrap(),
+ 33443
+ );
+
+ let seed = PruningSeed(Some(384));
+
+ // the next unpruned block is the first tip block
+ assert_eq!(seed.get_next_unpruned_block(5000, 11000).unwrap(), 5500)
+ }
+
+ // TODO: next_pruned_block
+}
diff --git a/database/src/table.rs b/database/src/table.rs
index ab1e4935..aaa71a28 100644
--- a/database/src/table.rs
+++ b/database/src/table.rs
@@ -1,33 +1,37 @@
//! ### Table module
-//! This module contains the definition of the [`Table`] and [`DupTable`] trait, and the actual tables used in the database.
+//! This module contains the definition of the [`Table`] and [`DupTable`] trait, and the actual tables used in the database.
//! [`DupTable`] are just a trait used to define that they support DUPSORT|DUPFIXED operation (as of now we don't know the equivalent for HSE).
//! All tables are defined with docs explaining its purpose, what types are the key and data.
-//! For more details please look at Cuprate's book :
+//! For more details please look at Cuprate's book :
-use monero::{Hash, Block, blockdata::transaction::KeyImage};
-use bincode::{enc::Encode,de::Decode};
-use crate::{types::{BlockMetadata, /*OutAmountIdx,*/ /*KeyImage,*/ TxOutputIdx, /*OutTx,*/ AltBlock, TxIndex, TransactionPruned, /*RctOutkey,*/ OutputMetadata}, encoding::Compat};
+use crate::{
+ encoding::Compat,
+ types::{
+ /*OutTx,*/ AltBlock, BlockMetadata, /*RctOutkey,*/ OutputMetadata,
+ TransactionPruned, TxIndex, /*OutAmountIdx,*/ /*KeyImage,*/ TxOutputIdx,
+ },
+};
+use bincode::{de::Decode, enc::Encode};
+use monero::{blockdata::transaction::KeyImage, Block, Hash};
-/// A trait implementing a table interaction for the database. It is implemented to an empty struct to specify the name and table's associated types. These associated
+/// A trait implementing a table interaction for the database. It is implemented to an empty struct to specify the name and table's associated types. These associated
/// types are used to simplify deserialization process.
pub trait Table: Send + Sync + 'static + Clone {
-
- // name of the table
- const TABLE_NAME: &'static str;
+ // name of the table
+ const TABLE_NAME: &'static str;
- // Definition of a key & value types of the database
- type Key: Encode + Decode;
- type Value: Encode + Decode;
+ // Definition of a key & value types of the database
+ type Key: Encode + Decode;
+ type Value: Encode + Decode;
}
/// A trait implementing a table with duplicated data support.
pub trait DupTable: Table {
-
- // Subkey of the table (prefix of the data)
- type SubKey: Encode + Decode;
+ // Subkey of the table (prefix of the data)
+ type SubKey: Encode + Decode;
}
-/// This declarative macro declare a new empty struct and impl the specified name, and corresponding types.
+/// This declarative macro declare a new empty struct and impl the specified name, and corresponding types.
macro_rules! impl_table {
( $(#[$docs:meta])* $table:ident , $key:ty , $value:ty ) => {
#[derive(Clone)]
@@ -58,70 +62,120 @@ macro_rules! impl_duptable {
// ----- BLOCKS -----
impl_duptable!(
- /// `blockhash` is table defining a relation between the hash of a block and its height. Its primary use is to quickly find block's hash by its height.
- blockhash, (), Compat, u64);
+ /// `blockhash` is table defining a relation between the hash of a block and its height. Its primary use is to quickly find block's hash by its height.
+ blockhash,
+ (),
+ Compat,
+ u64
+);
impl_duptable!(
- /// `blockmetadata` store block metadata alongside their corresponding Hash. The blocks metadata can contains the total_coins_generated, weight, long_term_block_weight & cumulative RingCT
- blockmetadata, (), u64, BlockMetadata);
-
+ /// `blockmetadata` store block metadata alongside their corresponding Hash. The blocks metadata can contains the total_coins_generated, weight, long_term_block_weight & cumulative RingCT
+ blockmetadata,
+ (),
+ u64,
+ BlockMetadata
+);
+
impl_table!(
- /// `blockbody` store blocks' bodies along their Hash. The blocks body contains the coinbase transaction and its corresponding mined transactions' hashes.
- blocks, u64, Compat);
+ /// `blockbody` store blocks' bodies along their Hash. The blocks body contains the coinbase transaction and its corresponding mined transactions' hashes.
+ blocks,
+ u64,
+ Compat
+);
/*
impl_table!(
- /// `blockhfversion` keep track of block's hard fork version. If an outdated node continue to run after a hard fork, it needs to know, after updating, what blocks needs to be update.
- blockhfversion, u64, u8);
+ /// `blockhfversion` keep track of block's hard fork version. If an outdated node continue to run after a hard fork, it needs to know, after updating, what blocks needs to be update.
+ blockhfversion, u64, u8);
*/
-
-impl_table!(
- /// `altblock` is a table that permits the storage of blocks from an alternative chain, which may cause a re-org. These blocks can be fetch by their corresponding hash.
- altblock, Compat, AltBlock);
+
+impl_table!(
+ /// `altblock` is a table that permits the storage of blocks from an alternative chain, which may cause a re-org. These blocks can be fetch by their corresponding hash.
+ altblock,
+ Compat,
+ AltBlock
+);
// ------- TXNs -------
impl_table!(
- /// `txspruned` is table storing TransactionPruned (or Pruned Tx). These can be fetch by the corresponding Transaction ID.
- txspruned, u64, TransactionPruned);
-
-impl_table!(
- /// `txsprunable` is a table storing the Prunable part of transactions (Signatures and RctSig), stored as raw bytes. These can be fetch by the corresponding Transaction ID.
- txsprunable, u64, Vec);
-
-impl_duptable!(
- /// `txsprunablehash` is a table storing hashes of prunable part of transactions. These hash can be fetch by the corresponding Transaction ID.
- txsprunablehash, u64, (), Compat);
+ /// `txspruned` is table storing TransactionPruned (or Pruned Tx). These can be fetch by the corresponding Transaction ID.
+ txspruned,
+ u64,
+ TransactionPruned
+);
impl_table!(
- /// `txsprunabletip` is a table used for optimization purpose. It defines at which block's height this transaction belong as long as the block is with Tip blocks. These can be fetch by the corresponding Transaction ID.
- txsprunabletip, u64, u64);
-
-impl_duptable!(
- /// `txsoutputs` is a table storing output indices used in a transaction. These can be fetch by the corresponding Transaction ID.
- txsoutputs, u64, (), TxOutputIdx);
+ /// `txsprunable` is a table storing the Prunable part of transactions (Signatures and RctSig), stored as raw bytes. These can be fetch by the corresponding Transaction ID.
+ txsprunable,
+ u64,
+ Vec
+);
impl_duptable!(
- /// `txsidentifier` is a table defining a relation between the hash of a transaction and its transaction Indexes. Its primarly used to quickly find tx's ID by its hash.
- txsidentifier, Compat, (), TxIndex);
-
+ /// `txsprunablehash` is a table storing hashes of prunable part of transactions. These hash can be fetch by the corresponding Transaction ID.
+ txsprunablehash,
+ u64,
+ (),
+ Compat
+);
+
+impl_table!(
+ /// `txsprunabletip` is a table used for optimization purpose. It defines at which block's height this transaction belong as long as the block is with Tip blocks. These can be fetch by the corresponding Transaction ID.
+ txsprunabletip,
+ u64,
+ u64
+);
+
+impl_duptable!(
+ /// `txsoutputs` is a table storing output indices used in a transaction. These can be fetch by the corresponding Transaction ID.
+ txsoutputs,
+ u64,
+ (),
+ TxOutputIdx
+);
+
+impl_duptable!(
+ /// `txsidentifier` is a table defining a relation between the hash of a transaction and its transaction Indexes. Its primarly used to quickly find tx's ID by its hash.
+ txsidentifier,
+ Compat,
+ (),
+ TxIndex
+);
+
// ---- OUTPUTS ----
impl_duptable!(
- /// `prerctoutputmetadata` is a duplicated table storing Pre-RingCT output's metadata. The key is the amount of this output, and the subkey is its amount idx.
- prerctoutputmetadata, u64, u64, OutputMetadata);
+ /// `prerctoutputmetadata` is a duplicated table storing Pre-RingCT output's metadata. The key is the amount of this output, and the subkey is its amount idx.
+ prerctoutputmetadata,
+ u64,
+ u64,
+ OutputMetadata
+);
impl_duptable!(
- /// `prerctoutputmetadata` is a table storing RingCT output's metadata. The key is the amount idx of this output since amount is always 0 for RingCT outputs.
- outputmetadata, (), u64, OutputMetadata);
+ /// `prerctoutputmetadata` is a table storing RingCT output's metadata. The key is the amount idx of this output since amount is always 0 for RingCT outputs.
+ outputmetadata,
+ (),
+ u64,
+ OutputMetadata
+);
// ---- SPT KEYS ----
impl_duptable!(
- /// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
- spentkeys, (), Compat, ());
+ /// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
+ spentkeys,
+ (),
+ Compat,
+ ()
+);
// ---- PROPERTIES ----
impl_table!(
- /// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
- properties, u32, u32);
\ No newline at end of file
+ /// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
+ properties,
+ u32,
+ u32
+);
diff --git a/net/levin/src/bucket_sink.rs b/net/levin/src/bucket_sink.rs
index 0cbec964..50f82251 100644
--- a/net/levin/src/bucket_sink.rs
+++ b/net/levin/src/bucket_sink.rs
@@ -31,12 +31,22 @@ use crate::{Bucket, BucketError};
/// A BucketSink writes Bucket instances to the provided AsyncWrite target.
#[pin_project]
-pub struct BucketSink {
+pub struct BucketSink {
#[pin]
writer: W,
buffer: VecDeque,
}
+impl BucketSink {
+ /// Creates a new [`BucketSink`] from the given [`AsyncWrite`] writer.
+ pub fn new(writer: W) -> Self {
+ BucketSink {
+ writer,
+ buffer: VecDeque::with_capacity(2),
+ }
+ }
+}
+
impl Sink for BucketSink {
type Error = BucketError;
diff --git a/net/levin/src/lib.rs b/net/levin/src/lib.rs
index 72f45738..c9f08fdd 100644
--- a/net/levin/src/lib.rs
+++ b/net/levin/src/lib.rs
@@ -40,6 +40,8 @@ pub mod message_sink;
pub mod message_stream;
pub use header::BucketHead;
+pub use message_sink::MessageSink;
+pub use message_stream::MessageStream;
use std::fmt::Debug;
@@ -103,23 +105,29 @@ pub enum MessageType {
Request,
/// Response
Response,
+ /// Notification
+ Notification,
}
-impl From for header::Flags {
- fn from(val: MessageType) -> Self {
- match val {
- MessageType::Request => header::REQUEST,
- MessageType::Response => header::RESPONSE,
+impl MessageType {
+ /// Returns if the message requires a response
+ pub fn have_to_return_data(&self) -> bool {
+ match self {
+ MessageType::Request => true,
+ MessageType::Response | MessageType::Notification => false,
}
}
-}
-impl TryInto for header::Flags {
- type Error = BucketError;
- fn try_into(self) -> Result {
- if self.is_request() {
+ /// Returns the `MessageType` given the flags and have_to_return_data fields
+ pub fn from_flags_and_have_to_return(
+ flags: header::Flags,
+ have_to_return: bool,
+ ) -> Result {
+ if flags.is_request() && have_to_return {
Ok(MessageType::Request)
- } else if self.is_response() {
+ } else if flags.is_request() {
+ Ok(MessageType::Notification)
+ } else if flags.is_response() && !have_to_return {
Ok(MessageType::Response)
} else {
Err(BucketError::UnknownFlags)
@@ -127,23 +135,26 @@ impl TryInto for header::Flags {
}
}
+impl From for header::Flags {
+ fn from(val: MessageType) -> Self {
+ match val {
+ MessageType::Request | MessageType::Notification => header::REQUEST,
+ MessageType::Response => header::RESPONSE,
+ }
+ }
+}
+
/// A levin body
pub trait LevinBody: Sized {
/// Decodes the message from the data in the header
- fn decode_message(
- buf: &[u8],
- typ: MessageType,
- have_to_return: bool,
- command: u32,
- ) -> Result;
+ fn decode_message(buf: &[u8], typ: MessageType, command: u32) -> Result;
/// Encodes the message
///
/// returns:
/// return_code: i32,
/// command: u32,
- /// have_to_return: bool,
/// message_type: MessageType
- /// bytes: Bytes
- fn encode(&self) -> Result<(i32, u32, bool, MessageType, Bytes), BucketError>;
+ /// bytes: Vec
+ fn encode(&self) -> Result<(i32, u32, MessageType, Vec), BucketError>;
}
diff --git a/net/levin/src/message_sink.rs b/net/levin/src/message_sink.rs
index 1fef30cf..dfcf2765 100644
--- a/net/levin/src/message_sink.rs
+++ b/net/levin/src/message_sink.rs
@@ -32,12 +32,22 @@ use crate::LevinBody;
/// A Sink that converts levin messages to buckets and passes them onto the `BucketSink`
#[pin_project]
-pub struct MessageSink {
+pub struct MessageSink {
#[pin]
bucket_sink: BucketSink,
phantom: PhantomData,
}
+impl MessageSink {
+ /// Creates a new sink from the provided [`AsyncWrite`]
+ pub fn new(writer: W) -> Self {
+ MessageSink {
+ bucket_sink: BucketSink::new(writer),
+ phantom: PhantomData,
+ }
+ }
+}
+
impl Sink for MessageSink {
type Error = BucketError;
@@ -49,16 +59,19 @@ impl Sink for MessageSink, item: E) -> Result<(), Self::Error> {
- let (return_code, command, have_to_return_data, flags, body) = item.encode()?;
+ let (return_code, command, message_type, body) = item.encode()?;
let header = BucketHead::build(
body.len() as u64,
- have_to_return_data,
+ message_type.have_to_return_data(),
command,
- flags.into(),
+ message_type.into(),
return_code,
);
- let bucket = Bucket { header, body };
+ let bucket = Bucket {
+ header,
+ body: body.into(),
+ };
self.project().bucket_sink.start_send(bucket)
}
diff --git a/net/levin/src/message_stream.rs b/net/levin/src/message_stream.rs
index b5b351b0..ce680747 100644
--- a/net/levin/src/message_stream.rs
+++ b/net/levin/src/message_stream.rs
@@ -27,19 +27,20 @@ use pin_project::pin_project;
use crate::bucket_stream::BucketStream;
use crate::BucketError;
use crate::LevinBody;
+use crate::MessageType;
use crate::LEVIN_SIGNATURE;
use crate::PROTOCOL_VERSION;
/// A stream that reads from the underlying `BucketStream` and uses the the
/// methods on the `LevinBody` trait to decode the inner messages(bodies)
#[pin_project]
-pub struct MessageStream {
+pub struct MessageStream {
#[pin]
bucket_stream: BucketStream,
phantom: PhantomData,
}
-impl MessageStream {
+impl MessageStream {
/// Creates a new stream from the provided `AsyncRead`
pub fn new(stream: S) -> Self {
MessageStream {
@@ -49,7 +50,7 @@ impl MessageStream {
}
}
-impl Stream for MessageStream {
+impl Stream for MessageStream {
type Item = Result;
fn poll_next(
@@ -71,6 +72,8 @@ impl Stream for MessageStream Stream for MessageStream {
- $map.get($field_name)
- .ok_or_else(|| serde::de::Error::missing_field($field_name))?
- };
-}
+macro_rules! message {
+ (
+ Admin,
+ Name: $name:ident,
+ ID: $id:expr,
+ Request: $req:ident {
+ EncodingError: $req_enc_err:path,
+ Encode: $req_enc:path,
+ Decode: $req_dec:path,
+ },
+ Response: $res:ident {
+ EncodingError: $res_enc_err:path,
+ Encode: $res_enc:path,
+ Decode: $res_dec:path,
+ },
+ ) => {
+ #[sealed::sealed]
+ impl crate::messages::NetworkMessage for $req {
+ type EncodingError = $req_enc_err;
+ fn decode(buf: &[u8]) -> Result {
+ $req_dec(buf)
+ }
+ fn encode(&self) -> Result, Self::EncodingError> {
+ $req_enc(self)
+ }
+ }
+ #[sealed::sealed]
+ impl crate::messages::NetworkMessage for $res {
+ type EncodingError = $res_enc_err;
+ fn decode(buf: &[u8]) -> Result {
+ $res_dec(buf)
+ }
+ fn encode(&self) -> Result, Self::EncodingError> {
+ $res_enc(self)
+ }
+ }
-macro_rules! get_val_from_map {
- ($map:ident, $field_name:expr, $get_fn:ident, $expected_ty:expr) => {
- $map.get($field_name)
- .ok_or_else(|| serde::de::Error::missing_field($field_name))?
- .$get_fn()
- .ok_or_else(|| {
- serde::de::Error::invalid_type($map.get_value_type_as_unexpected(), &$expected_ty)
- })?
- };
-}
+ pub struct $name;
-macro_rules! get_internal_val {
- ($value:ident, $get_fn:ident, $expected_ty:expr) => {
- $value.$get_fn().ok_or_else(|| {
- serde::de::Error::invalid_type($value.get_value_type_as_unexpected(), &$expected_ty)
- })?
- };
-}
+ #[sealed::sealed]
+ impl crate::messages::AdminMessage for $name {
+ const ID: u32 = $id;
+ const NAME: &'static str = stringify!($name);
-macro_rules! monero_decode_into_serde_err {
- ($ty:ty, $buf:ident) => {
- monero::consensus::deserialize::<$ty>($buf).map_err(serde::de::Error::custom)?
+ type Request = $req;
+ type Response = $res;
+ }
+ };
+ (
+ Protocol,
+ Name: $name:ident {
+ EncodingError: $enc_err:path,
+ Encode: $enc:path,
+ Decode: $dec:path,
+ },
+ ID: $id:expr,
+ ) => {
+ #[sealed::sealed]
+ impl crate::messages::NetworkMessage for $name {
+ type EncodingError = $enc_err;
+ fn decode(buf: &[u8]) -> Result {
+ $dec(buf)
+ }
+ fn encode(&self) -> Result, Self::EncodingError> {
+ $enc(self)
+ }
+ }
+
+ #[sealed::sealed]
+ impl crate::messages::ProtocolMessage for $name {
+ const ID: u32 = $id;
+ const NAME: &'static str = stringify!($name);
+
+ type Notification = Self;
+ }
};
}
diff --git a/net/monero-wire/src/lib.rs b/net/monero-wire/src/lib.rs
index 742e4ea4..05c20bcd 100644
--- a/net/monero-wire/src/lib.rs
+++ b/net/monero-wire/src/lib.rs
@@ -27,98 +27,15 @@
#![deny(non_upper_case_globals)]
#![deny(non_camel_case_types)]
#![deny(unused_mut)]
-#![deny(missing_docs)]
+//#![deny(missing_docs)]
#[macro_use]
mod internal_macros;
pub mod messages;
pub mod network_address;
+mod utils;
+pub use messages::{Message, P2pCommand};
pub use network_address::NetworkAddress;
-
// re-exports
pub use levin;
-pub use levin::message_sink::MessageSink;
-pub use levin::message_stream::MessageStream;
-
-use levin::BucketError;
-
-/// The possible commands that can be in a levin header
-#[derive(Debug, PartialEq, Eq, Clone, Copy)]
-pub enum P2pCommand {
- // 100* admin commands
- /// Handshake
- Handshake,
- /// TimedSync
- TimedSync,
- /// Ping
- Ping,
- /// SupportFlags
- SupportFlags,
-
- // 200* protocol commands
- /// NewBlock
- NewBlock,
- /// NewTransactions
- NewTransactions,
- /// RequestGetObject
- RequestGetObject,
- /// ResponseGetObject
- ResponseGetObject,
- /// RequestChain
- RequestChain,
- /// ResponseChainEntry
- ResponseChainEntry,
- /// NewFluffyBlock
- NewFluffyBlock,
- /// RequestFluffyMissingTx
- RequestFluffyMissingTx,
- /// GetTxPoolComplement
- GetTxPoolComplement,
-}
-
-impl TryFrom for P2pCommand {
- type Error = BucketError;
-
- fn try_from(value: u32) -> Result {
- match value {
- 1001 => Ok(P2pCommand::Handshake),
- 1002 => Ok(P2pCommand::TimedSync),
- 1003 => Ok(P2pCommand::Ping),
- 1007 => Ok(P2pCommand::SupportFlags),
-
- 2001 => Ok(P2pCommand::NewBlock),
- 2002 => Ok(P2pCommand::NewTransactions),
- 2003 => Ok(P2pCommand::RequestGetObject),
- 2004 => Ok(P2pCommand::ResponseGetObject),
- 2006 => Ok(P2pCommand::RequestChain),
- 2007 => Ok(P2pCommand::ResponseChainEntry),
- 2008 => Ok(P2pCommand::NewFluffyBlock),
- 2009 => Ok(P2pCommand::RequestFluffyMissingTx),
- 2010 => Ok(P2pCommand::GetTxPoolComplement),
-
- _ => Err(BucketError::UnsupportedP2pCommand(value)),
- }
- }
-}
-
-impl From for u32 {
- fn from(val: P2pCommand) -> Self {
- match val {
- P2pCommand::Handshake => 1001,
- P2pCommand::TimedSync => 1002,
- P2pCommand::Ping => 1003,
- P2pCommand::SupportFlags => 1007,
-
- P2pCommand::NewBlock => 2001,
- P2pCommand::NewTransactions => 2002,
- P2pCommand::RequestGetObject => 2003,
- P2pCommand::ResponseGetObject => 2004,
- P2pCommand::RequestChain => 2006,
- P2pCommand::ResponseChainEntry => 2007,
- P2pCommand::NewFluffyBlock => 2008,
- P2pCommand::RequestFluffyMissingTx => 2009,
- P2pCommand::GetTxPoolComplement => 2010,
- }
- }
-}
diff --git a/net/monero-wire/src/messages.rs b/net/monero-wire/src/messages.rs
index 62658538..c278c617 100644
--- a/net/monero-wire/src/messages.rs
+++ b/net/monero-wire/src/messages.rs
@@ -20,319 +20,289 @@ pub mod admin;
pub mod common;
pub mod protocol;
+pub use admin::{Handshake, Ping, SupportFlags, TimedSync};
pub use common::{BasicNodeData, CoreSyncData, PeerID, PeerListEntryBase};
+pub use protocol::{
+ ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
+ GetObjectsResponse, GetTxPoolCompliment, NewBlock, NewFluffyBlock, NewTransactions,
+};
-use bytes::Bytes;
-use levin::BucketError;
-use levin::MessageType;
+use levin::{BucketError, MessageType};
-use crate::P2pCommand;
-
-fn zero_val>() -> T {
- T::from(0_u8)
+#[sealed::sealed]
+pub trait NetworkMessage: Sized {
+ type EncodingError: std::fmt::Debug;
+ fn decode(buf: &[u8]) -> Result;
+ fn encode(&self) -> Result, Self::EncodingError>;
}
-fn default_true() -> bool {
- true
+#[sealed::sealed]
+pub trait AdminMessage {
+ const ID: u32;
+ const NAME: &'static str;
+ type Request: NetworkMessage;
+ type Response: NetworkMessage;
}
-fn default_false() -> bool {
- false
+#[sealed::sealed]
+pub trait ProtocolMessage {
+ const ID: u32;
+ const NAME: &'static str;
+ type Notification: NetworkMessage;
}
-/// A message request
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub enum MessageRequest {
- /// Handshake
- Handshake(admin::HandshakeRequest),
- /// TimedSync
- TimedSync(admin::TimedSyncRequest),
- /// Ping
- Ping(admin::PingRequest),
- /// SupportFlags
- SupportFlags(admin::SupportFlagsRequest),
-}
+macro_rules! p2p_command {
+ ($($message:ident),+) => {
+ pub enum P2pCommand {
+ $($message,)+
+ }
-/// A message response
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub enum MessageResponse {
- /// Handshake
- Handshake(admin::HandshakeResponse),
- /// TimedSync
- TimedSync(admin::TimedSyncResponse),
- /// Ping
- Ping(admin::PingResponse),
- /// SupportFlags
- SupportFlags(admin::SupportFlagsResponse),
-}
-
-/// A messages notification
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub enum MessageNotification {
- /// NewBlock
- NewBlock(protocol::NewBlock),
- /// NewTransactions
- NewTransactions(protocol::NewTransactions),
- /// RequestGetObject
- RequestGetObject(protocol::GetObjectsRequest),
- /// ResponseGetObject
- ResponseGetObject(protocol::GetObjectsResponse),
- /// RequestChain
- RequestChain(protocol::ChainRequest),
- /// ResponseChainEntry
- ResponseChainEntry(protocol::ChainResponse),
- /// NewFluffyBlock
- NewFluffyBlock(protocol::NewFluffyBlock),
- /// RequestFluffyMissingTx
- RequestFluffyMissingTx(protocol::FluffyMissingTransactionsRequest),
- /// GetTxPoolComplement
- GetTxPoolComplement(protocol::TxPoolCompliment),
-}
-
-/// A Monero Message (levin body)
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub enum Message {
- /// Request
- Request(MessageRequest),
- /// Response
- Response(MessageResponse),
- /// Notification
- Notification(Box), // check benefits/ drawbacks of doing this, im just boxing it for now to satisfy clippy
-}
-
-fn epee_encode_error_to_levin(err: epee_serde::Error) -> BucketError {
- BucketError::FailedToEncodeBucketBody(err.to_string())
-}
-
-fn encode_message(message: &T) -> Result, BucketError> {
- epee_serde::to_bytes(message).map_err(epee_encode_error_to_levin)
-}
-
-fn epee_decode_error_to_levin(err: epee_serde::Error) -> BucketError {
- BucketError::FailedToDecodeBucketBody(err.to_string())
-}
-
-fn decode_message(buf: &[u8]) -> Result {
- epee_serde::from_bytes(buf).map_err(epee_decode_error_to_levin)
-}
-
-impl levin::LevinBody for Message {
- fn decode_message(
- buf: &[u8],
- typ: MessageType,
- have_to_return: bool,
- command: u32,
- ) -> Result {
- let command = P2pCommand::try_from(command)?;
-
- Ok(match typ {
- MessageType::Response => Message::Response(match command {
- P2pCommand::Handshake => MessageResponse::Handshake(decode_message(buf)?),
- P2pCommand::TimedSync => MessageResponse::TimedSync(decode_message(buf)?),
- P2pCommand::Ping => MessageResponse::Ping(decode_message(buf)?),
- P2pCommand::SupportFlags => MessageResponse::SupportFlags(decode_message(buf)?),
- _ => {
- return Err(levin::BucketError::FailedToDecodeBucketBody(
- "Invalid header flag/command/have_to_return combination".to_string(),
- ))
- }
- }),
-
- MessageType::Request if have_to_return => Message::Request(match command {
- P2pCommand::Handshake => MessageRequest::Handshake(decode_message(buf)?),
- P2pCommand::TimedSync => MessageRequest::TimedSync(decode_message(buf)?),
- P2pCommand::Ping => MessageRequest::Ping(admin::PingRequest),
- P2pCommand::SupportFlags => {
- MessageRequest::SupportFlags(admin::SupportFlagsRequest)
- }
- _ => {
- return Err(levin::BucketError::FailedToDecodeBucketBody(
- "Invalid header flag/command/have_to_return combination".to_string(),
- ))
- }
- }),
-
- MessageType::Request if !have_to_return => {
- Message::Notification(Box::new(match command {
- P2pCommand::NewBlock => MessageNotification::NewBlock(decode_message(buf)?),
- P2pCommand::NewTransactions => {
- MessageNotification::NewTransactions(decode_message(buf)?)
- }
- P2pCommand::RequestGetObject => {
- MessageNotification::RequestGetObject(decode_message(buf)?)
- }
- P2pCommand::ResponseGetObject => {
- MessageNotification::ResponseGetObject(decode_message(buf)?)
- }
- P2pCommand::RequestChain => {
- MessageNotification::RequestChain(decode_message(buf)?)
- }
- P2pCommand::ResponseChainEntry => {
- MessageNotification::ResponseChainEntry(decode_message(buf)?)
- }
- P2pCommand::NewFluffyBlock => {
- MessageNotification::NewFluffyBlock(decode_message(buf)?)
- }
- P2pCommand::RequestFluffyMissingTx => {
- MessageNotification::RequestFluffyMissingTx(decode_message(buf)?)
- }
- P2pCommand::GetTxPoolComplement => {
- MessageNotification::GetTxPoolComplement(decode_message(buf)?)
- }
- _ => {
- return Err(levin::BucketError::FailedToDecodeBucketBody(
- "Invalid header flag/command/have_to_return combination".to_string(),
- ))
- }
- }))
- }
- _ => unreachable!("All typs are handleded"),
- })
- }
-
- fn encode(&self) -> Result<(i32, u32, bool, MessageType, Bytes), BucketError> {
- let return_code;
- let command;
- let have_to_return_data;
- let flag;
- let bytes;
-
- match self {
- Message::Request(req) => {
- return_code = 0;
- have_to_return_data = true;
- flag = MessageType::Request;
- match req {
- MessageRequest::Handshake(handshake) => {
- command = P2pCommand::Handshake;
- bytes = encode_message(handshake)?;
- }
- MessageRequest::TimedSync(timedsync) => {
- command = P2pCommand::TimedSync;
- bytes = encode_message(timedsync)?;
- }
- MessageRequest::Ping(_) => {
- command = P2pCommand::Ping;
- bytes = Vec::new();
- }
- MessageRequest::SupportFlags(_) => {
- command = P2pCommand::SupportFlags;
- bytes = Vec::new();
- }
- }
- }
- Message::Response(res) => {
- return_code = 1;
- have_to_return_data = false;
- flag = MessageType::Response;
- match res {
- MessageResponse::Handshake(handshake) => {
- command = P2pCommand::Handshake;
- bytes = encode_message(handshake)?;
- }
- MessageResponse::TimedSync(timed_sync) => {
- command = P2pCommand::TimedSync;
- bytes = encode_message(timed_sync)?;
- }
- MessageResponse::Ping(ping) => {
- command = P2pCommand::Ping;
- bytes = encode_message(ping)?;
- }
- MessageResponse::SupportFlags(support_flags) => {
- command = P2pCommand::SupportFlags;
- bytes = encode_message(support_flags)?;
- }
- }
- }
- Message::Notification(noti) => {
- return_code = 0;
- have_to_return_data = false;
- flag = MessageType::Response;
- match noti.as_ref() {
- MessageNotification::NewBlock(new_block) => {
- command = P2pCommand::NewBlock;
- bytes = encode_message(new_block)?;
- }
- MessageNotification::NewTransactions(new_txs) => {
- command = P2pCommand::NewTransactions;
- bytes = encode_message(new_txs)?;
- }
- MessageNotification::RequestGetObject(obj) => {
- command = P2pCommand::RequestGetObject;
- bytes = encode_message(obj)?;
- }
- MessageNotification::ResponseGetObject(obj) => {
- command = P2pCommand::ResponseGetObject;
- bytes = encode_message(obj)?;
- }
- MessageNotification::RequestChain(chain) => {
- command = P2pCommand::RequestChain;
- bytes = encode_message(chain)?;
- }
- MessageNotification::ResponseChainEntry(chain_entry) => {
- command = P2pCommand::ResponseChainEntry;
- bytes = encode_message(chain_entry)?;
- }
- MessageNotification::NewFluffyBlock(fluffy_block) => {
- command = P2pCommand::NewFluffyBlock;
- bytes = encode_message(fluffy_block)?;
- }
- MessageNotification::RequestFluffyMissingTx(txs) => {
- command = P2pCommand::RequestFluffyMissingTx;
- bytes = encode_message(txs)?;
- }
- MessageNotification::GetTxPoolComplement(txpool) => {
- command = P2pCommand::GetTxPoolComplement;
- bytes = encode_message(txpool)?;
- }
+ pub struct P2pCommandFromU32Err;
+ impl TryFrom for P2pCommand {
+ type Error = P2pCommandFromU32Err;
+ fn try_from(value: u32) -> Result {
+ match value {
+ $($message::ID => Ok(P2pCommand::$message),)+
+ _ => Err(P2pCommandFromU32Err)
}
}
}
- return Ok((
- return_code,
- command.into(),
- have_to_return_data,
- flag,
- bytes.into(),
- ));
- }
+ impl From for u32 {
+ fn from(val: P2pCommand) -> Self {
+ match val {
+ $(P2pCommand::$message => $message::ID,)+
+ }
+ }
+ }
+ };
}
-#[cfg(test)]
-mod tests {
- use super::Message;
- use levin::{LevinBody, MessageType};
+macro_rules! levin_body {
+ (
+ Admin:
+ $($admin_mes:ident),+
+ Protocol:
+ $($protocol_mes:ident),+
+ ) => {
- #[test]
- fn decode_handshake_request() {
- let buf = [
- 1, 17, 1, 1, 1, 1, 2, 1, 1, 12, 9, 110, 111, 100, 101, 95, 100, 97, 116, 97, 12, 24, 7,
- 109, 121, 95, 112, 111, 114, 116, 6, 168, 70, 0, 0, 10, 110, 101, 116, 119, 111, 114,
- 107, 95, 105, 100, 10, 64, 18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161,
- 161, 16, 7, 112, 101, 101, 114, 95, 105, 100, 5, 153, 5, 227, 61, 188, 214, 159, 10,
- 13, 115, 117, 112, 112, 111, 114, 116, 95, 102, 108, 97, 103, 115, 6, 1, 0, 0, 0, 8,
- 114, 112, 99, 95, 112, 111, 114, 116, 7, 0, 0, 20, 114, 112, 99, 95, 99, 114, 101, 100,
- 105, 116, 115, 95, 112, 101, 114, 95, 104, 97, 115, 104, 6, 0, 0, 0, 0, 12, 112, 97,
- 121, 108, 111, 97, 100, 95, 100, 97, 116, 97, 12, 24, 21, 99, 117, 109, 117, 108, 97,
- 116, 105, 118, 101, 95, 100, 105, 102, 102, 105, 99, 117, 108, 116, 121, 5, 59, 90,
- 163, 153, 0, 0, 0, 0, 27, 99, 117, 109, 117, 108, 97, 116, 105, 118, 101, 95, 100, 105,
- 102, 102, 105, 99, 117, 108, 116, 121, 95, 116, 111, 112, 54, 52, 5, 0, 0, 0, 0, 0, 0,
- 0, 0, 14, 99, 117, 114, 114, 101, 110, 116, 95, 104, 101, 105, 103, 104, 116, 5, 190,
- 50, 0, 0, 0, 0, 0, 0, 12, 112, 114, 117, 110, 105, 110, 103, 95, 115, 101, 101, 100, 6,
- 0, 0, 0, 0, 6, 116, 111, 112, 95, 105, 100, 10, 128, 230, 40, 186, 45, 79, 79, 224,
- 164, 117, 133, 84, 130, 185, 94, 4, 1, 57, 126, 74, 145, 238, 238, 122, 44, 214, 85,
- 129, 237, 230, 14, 67, 218, 11, 116, 111, 112, 95, 118, 101, 114, 115, 105, 111, 110,
- 8, 1, 18, 108, 111, 99, 97, 108, 95, 112, 101, 101, 114, 108, 105, 115, 116, 95, 110,
- 101, 119, 140, 4, 24, 3, 97, 100, 114, 12, 8, 4, 116, 121, 112, 101, 8, 1, 4, 97, 100,
- 100, 114, 12, 8, 4, 109, 95, 105, 112, 6, 225, 219, 21, 0, 6, 109, 95, 112, 111, 114,
- 116, 7, 0, 0, 2, 105, 100, 5, 0, 0, 0, 0, 0, 0, 0, 0, 9, 108, 97, 115, 116, 95, 115,
- 101, 101, 110, 1, 0, 0, 0, 0, 0, 0, 0, 0, 12, 112, 114, 117, 110, 105, 110, 103, 95,
- 115, 101, 101, 100, 6, 0, 0, 0, 0, 8, 114, 112, 99, 95, 112, 111, 114, 116, 7, 0, 0,
- 20, 114, 112, 99, 95, 99, 114, 101, 100, 105, 116, 115, 95, 112, 101, 114, 95, 104, 97,
- 115, 104, 6, 0, 0, 0, 0,
- ];
+ #[derive(Debug, Clone)]
+ pub enum MessageRequest {
+ $($admin_mes(<$admin_mes as AdminMessage>::Request),)+
+ }
- let message = Message::decode_message(&buf, MessageType::Request, true, 1001);
- println!("{:?}", message);
- }
+ $(
+ impl From<<$admin_mes as AdminMessage>::Request> for MessageRequest {
+ fn from(value: <$admin_mes as AdminMessage>::Request) -> MessageRequest {
+ MessageRequest::$admin_mes(value)
+ }
+ }
+ )+
+
+ impl MessageRequest {
+ pub fn id(&self) -> u32 {
+ match self {
+ $(MessageRequest::$admin_mes(_) => $admin_mes::ID,)+
+ }
+ }
+ pub fn decode(buf: &[u8], command: u32) -> Result {
+ match command {
+ $($admin_mes::ID => Ok(
+ MessageRequest::$admin_mes(<$admin_mes as AdminMessage>::Request::decode(buf)
+ .map_err(|e| BucketError::FailedToDecodeBucketBody(e.to_string()))?)),)+
+ _ => Err(BucketError::UnsupportedP2pCommand(command))
+ }
+ }
+
+ pub fn encode(&self) -> Result<(u32, Vec), BucketError> {
+ match self {
+ $(MessageRequest::$admin_mes(mes) => Ok(($admin_mes::ID, mes.encode()
+ .map_err(|e| BucketError::FailedToEncodeBucketBody(e.to_string()))?)),)+
+ }
+ }
+ }
+
+ #[derive(Debug, Clone)]
+ pub enum MessageResponse {
+ $($admin_mes(<$admin_mes as AdminMessage>::Response),)+
+ }
+
+ $(
+ impl From<<$admin_mes as AdminMessage>::Response> for MessageResponse {
+ fn from(value: <$admin_mes as AdminMessage>::Response) -> MessageResponse {
+ MessageResponse::$admin_mes(value)
+ }
+ }
+ )+
+
+ impl MessageResponse {
+ pub fn id(&self) -> u32 {
+ match self {
+ $(MessageResponse::$admin_mes(_) => $admin_mes::ID,)+
+ }
+ }
+
+ pub fn decode(buf: &[u8], command: u32) -> Result {
+ match command {
+ $($admin_mes::ID => Ok(
+ MessageResponse::$admin_mes(<$admin_mes as AdminMessage>::Response::decode(buf)
+ .map_err(|e| BucketError::FailedToDecodeBucketBody(e.to_string()))?)),)+
+ _ => Err(BucketError::UnsupportedP2pCommand(command))
+ }
+ }
+
+ pub fn encode(&self) -> Result<(u32, Vec), BucketError> {
+ match self {
+ $(MessageResponse::$admin_mes(mes) => Ok(($admin_mes::ID, mes.encode()
+ .map_err(|e| BucketError::FailedToEncodeBucketBody(e.to_string()))?)),)+
+ }
+ }
+ }
+
+ #[derive(Debug, Clone)]
+ pub enum MessageNotification {
+ $($protocol_mes(<$protocol_mes as ProtocolMessage>::Notification),)+
+ }
+
+ $(
+ impl From<<$protocol_mes as ProtocolMessage>::Notification> for MessageNotification {
+ fn from(value: <$protocol_mes as ProtocolMessage>::Notification) -> MessageNotification {
+ MessageNotification::$protocol_mes(value)
+ }
+ }
+ )+
+
+
+ impl MessageNotification {
+ pub fn id(&self) -> u32 {
+ match self {
+ $(MessageNotification::$protocol_mes(_) => $protocol_mes::ID,)+
+ }
+ }
+
+
+ pub fn decode(buf: &[u8], command: u32) -> Result {
+ match command {
+ $($protocol_mes::ID => Ok(
+ MessageNotification::$protocol_mes(<$protocol_mes as ProtocolMessage>::Notification::decode(buf)
+ .map_err(|e| BucketError::FailedToDecodeBucketBody(e.to_string()))?)),)+
+ _ => Err(BucketError::UnsupportedP2pCommand(command))
+ }
+ }
+
+ pub fn encode(&self) -> Result<(u32, Vec), BucketError> {
+ match self {
+ $(MessageNotification::$protocol_mes(mes) => Ok(($protocol_mes::ID, mes.encode()
+ .map_err(|e| BucketError::FailedToEncodeBucketBody(e.to_string()))?)),)+
+ }
+ }
+ }
+
+ #[derive(Debug, Clone)]
+ pub enum Message {
+ Request(MessageRequest),
+ Response(MessageResponse),
+ Notification(MessageNotification)
+ }
+
+ impl From for Message {
+ fn from(value: MessageResponse) -> Message {
+ Message::Response(value)
+ }
+ }
+
+ impl From for Message {
+ fn from(value: MessageRequest) -> Message {
+ Message::Request(value)
+ }
+ }
+
+ impl From for Message {
+ fn from(value: MessageNotification) -> Message {
+ Message::Notification(value)
+ }
+ }
+
+ impl Message {
+ pub fn id(&self) -> u32 {
+ match self {
+ Message::Request(req) => req.id(),
+ Message::Response(res) => res.id(),
+ Message::Notification(noti) => noti.id(),
+ }
+ }
+ pub fn is_request(&self) -> bool {
+ matches!(self, Self::Request(_))
+ }
+ pub fn is_response(&self) -> bool {
+ matches!(self, Self::Response(_))
+ }
+ pub fn is_notification(&self) -> bool {
+ matches!(self, Self::Notification(_))
+ }
+ }
+
+ impl levin::LevinBody for Message {
+ fn decode_message(buf: &[u8], typ: MessageType, command: u32) -> Result {
+ Ok(match typ {
+ MessageType::Response => Message::Response(MessageResponse::decode(buf, command)?),
+ MessageType::Request => Message::Request(MessageRequest::decode(buf, command)?),
+ MessageType::Notification => Message::Notification(MessageNotification::decode(buf, command)?),
+ })
+ }
+
+ fn encode(&self) -> Result<(i32, u32, MessageType, Vec), BucketError> {
+ match self {
+ Message::Response(mes) => {
+ let (command, bytes)= mes.encode()?;
+ Ok((1, command, MessageType::Response, bytes))
+ },
+ Message::Request(mes) => {
+ let (command, bytes)= mes.encode()?;
+ Ok((0, command, MessageType::Request, bytes))
+ },
+ Message::Notification(mes) => {
+ let (command, bytes)= mes.encode()?;
+ Ok((0, command, MessageType::Notification, bytes))
+ },
+ }
+ }
+
+
+ }
+
+ };
}
+
+p2p_command!(
+ Handshake,
+ TimedSync,
+ Ping,
+ SupportFlags,
+ NewBlock,
+ NewTransactions,
+ GetObjectsRequest,
+ GetObjectsResponse,
+ ChainRequest,
+ ChainResponse,
+ NewFluffyBlock,
+ FluffyMissingTransactionsRequest,
+ GetTxPoolCompliment
+);
+
+levin_body!(
+ Admin:
+ Handshake,
+ TimedSync,
+ Ping,
+ SupportFlags
+ Protocol:
+ NewBlock,
+ NewTransactions,
+ GetObjectsRequest,
+ GetObjectsResponse,
+ ChainRequest,
+ ChainResponse,
+ NewFluffyBlock,
+ FluffyMissingTransactionsRequest,
+ GetTxPoolCompliment
+);
diff --git a/net/monero-wire/src/messages/admin.rs b/net/monero-wire/src/messages/admin.rs
index 4128710d..b8888f14 100644
--- a/net/monero-wire/src/messages/admin.rs
+++ b/net/monero-wire/src/messages/admin.rs
@@ -18,13 +18,34 @@
//! Admin message requests must be responded to in order unlike
//! protocol messages.
+use std::fmt::Display;
+
use serde::{Deserialize, Serialize};
use super::{
- common::{BasicNodeData, CoreSyncData, PeerListEntryBase},
+ common::{BasicNodeData, CoreSyncData, PeerListEntryBase, PeerSupportFlags},
PeerID,
};
+const P2P_ADMIN_BASE: u32 = 1000;
+
+#[derive(Debug)]
+pub struct SillyEncodingError;
+
+impl Display for SillyEncodingError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.write_str("Literally impossible to get this error")
+ }
+}
+
+fn silly_encode(_: &T) -> Result, SillyEncodingError> {
+ Ok(vec![])
+}
+
+fn silly_decode(_: &[u8]) -> Result {
+ Ok(T::default())
+}
+
/// A Handshake Request
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct HandshakeRequest {
@@ -34,6 +55,10 @@ pub struct HandshakeRequest {
pub payload_data: CoreSyncData,
}
+fn empty_vec() -> Vec {
+ vec![]
+}
+
/// A Handshake Response
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct HandshakeResponse {
@@ -42,9 +67,26 @@ pub struct HandshakeResponse {
/// Core Sync Data
pub payload_data: CoreSyncData,
/// PeerList
+ #[serde(default = "empty_vec")]
pub local_peerlist_new: Vec,
}
+message!(
+ Admin,
+ Name: Handshake,
+ ID: P2P_ADMIN_BASE + 1,
+ Request: HandshakeRequest {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ Response: HandshakeResponse {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+);
+
/// A TimedSync Request
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct TimedSyncRequest {
@@ -61,11 +103,27 @@ pub struct TimedSyncResponse {
pub local_peerlist_new: Vec,
}
+message!(
+ Admin,
+ Name: TimedSync,
+ ID: P2P_ADMIN_BASE + 2,
+ Request: TimedSyncRequest {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ Response: TimedSyncResponse {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+);
+
/// The status field of an okay ping response
pub const PING_OK_RESPONSE_STATUS_TEXT: &str = "OK";
/// A Ping Request
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct PingRequest;
/// A Ping Response
@@ -77,25 +135,54 @@ pub struct PingResponse {
pub peer_id: PeerID,
}
+message!(
+ Admin,
+ Name: Ping,
+ ID: P2P_ADMIN_BASE + 3,
+ Request: PingRequest {
+ EncodingError: SillyEncodingError,
+ Encode: silly_encode,
+ Decode: silly_decode,
+ },
+ Response: PingResponse {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+);
+
/// A Support Flags Request
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct SupportFlagsRequest;
/// A Support Flags Response
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct SupportFlagsResponse {
/// Support Flags
- pub support_flags: u32,
+ pub support_flags: PeerSupportFlags,
}
+message!(
+ Admin,
+ Name: SupportFlags,
+ ID: P2P_ADMIN_BASE + 7,
+ Request: SupportFlagsRequest {
+ EncodingError: SillyEncodingError,
+ Encode: silly_encode,
+ Decode: silly_decode,
+ },
+ Response: SupportFlagsResponse {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+);
+
#[cfg(test)]
mod tests {
- use std::str::FromStr;
-
- use monero::Hash;
use super::{BasicNodeData, CoreSyncData, HandshakeRequest, HandshakeResponse};
- use crate::messages::common::PeerID;
+ use crate::messages::common::{PeerID, PeerSupportFlags};
#[test]
fn serde_handshake_req() {
@@ -120,7 +207,7 @@ mod tests {
18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161, 161, 16,
],
peer_id: PeerID(9671405426614699871),
- support_flags: 1,
+ support_flags: PeerSupportFlags::from(1_u32),
rpc_port: 0,
rpc_credits_per_hash: 0,
};
@@ -130,9 +217,11 @@ mod tests {
cumulative_difficulty_top64: 0,
current_height: 0,
pruning_seed: 0,
- top_id: Hash::from_str(
+ top_id: hex::decode(
"0x418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3",
)
+ .unwrap()
+ .try_into()
.unwrap(),
top_version: 1,
};
@@ -925,7 +1014,7 @@ mod tests {
18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161, 161, 16,
],
peer_id: PeerID(6037804360359455404),
- support_flags: 1,
+ support_flags: PeerSupportFlags::from(1_u32),
rpc_port: 18089,
rpc_credits_per_hash: 0,
};
@@ -935,9 +1024,11 @@ mod tests {
cumulative_difficulty_top64: 0,
current_height: 2775167,
pruning_seed: 386,
- top_id: Hash::from_str(
+ top_id: hex::decode(
"0x40780072dae9123108599a9f6585f2474d03f7b6dbb5d8c18717baa8cf7756eb",
)
+ .unwrap()
+ .try_into()
.unwrap(),
top_version: 16,
};
diff --git a/net/monero-wire/src/messages/common.rs b/net/monero-wire/src/messages/common.rs
index 649c613a..8a20475b 100644
--- a/net/monero-wire/src/messages/common.rs
+++ b/net/monero-wire/src/messages/common.rs
@@ -17,19 +17,52 @@
//
use epee_serde::Value;
-use monero::{Block, Hash, Transaction};
use serde::de;
-use serde::ser::SerializeStruct;
+use serde::ser::{SerializeSeq, SerializeStruct};
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use serde_with::TryFromInto;
-use super::zero_val;
+use crate::utils;
use crate::NetworkAddress;
-/// A PeerID, different from a `NetworkAddress`
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)]
#[serde(transparent)]
+pub struct PeerSupportFlags(u32); // had to name it this to avoid conflict
+
+impl PeerSupportFlags {
+ const FLUFFY_BLOCKS: u32 = 0b0000_0001;
+ /// checks if `self` has all the flags that `other` has
+ pub fn contains(&self, other: &PeerSupportFlags) -> bool {
+ self.0 & other.0 == other.0
+ }
+ pub fn supports_fluffy_blocks(&self) -> bool {
+ self.0 & Self::FLUFFY_BLOCKS == Self::FLUFFY_BLOCKS
+ }
+ pub fn get_support_flag_fluffy_blocks() -> Self {
+ PeerSupportFlags(Self::FLUFFY_BLOCKS)
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.0 == 0
+ }
+}
+
+impl From for PeerSupportFlags {
+ fn from(value: u8) -> Self {
+ PeerSupportFlags(value as u32)
+ }
+}
+
+impl From for PeerSupportFlags {
+ fn from(value: u32) -> Self {
+ PeerSupportFlags(value)
+ }
+}
+
+/// A PeerID, different from a `NetworkAddress`
+#[derive(Debug, Clone, Default, Copy, Deserialize, Serialize, PartialEq, Eq)]
+#[serde(transparent)]
pub struct PeerID(pub u64);
/// Basic Node Data, information on the connected peer
@@ -43,15 +76,15 @@ pub struct BasicNodeData {
pub peer_id: PeerID,
/// The Peers Support Flags
/// (If this is not in the message the default is 0)
- #[serde(default = "zero_val")]
- pub support_flags: u32,
+ #[serde(default = "utils::zero_val")]
+ pub support_flags: PeerSupportFlags,
/// RPC Port
/// (If this is not in the message the default is 0)
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub rpc_port: u16,
/// RPC Credits Per Hash
/// (If this is not in the message the default is 0)
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub rpc_credits_per_hash: u32,
}
@@ -64,23 +97,41 @@ pub struct CoreSyncData {
pub cumulative_difficulty: u64,
/// Cumulative Difficulty High
/// The upper 64 bits of the 128 bit cumulative difficulty
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub cumulative_difficulty_top64: u64,
/// Current Height of the peer
pub current_height: u64,
/// Pruning Seed of the peer
/// (If this is not in the message the default is 0)
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub pruning_seed: u32,
/// Hash of the top block
#[serde_as(as = "TryFromInto<[u8; 32]>")]
- pub top_id: Hash,
+ pub top_id: [u8; 32],
/// Version of the top block
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub top_version: u8,
}
impl CoreSyncData {
+ pub fn new(
+ cumulative_difficulty_128: u128,
+ current_height: u64,
+ pruning_seed: u32,
+ top_id: [u8; 32],
+ top_version: u8,
+ ) -> CoreSyncData {
+ let cumulative_difficulty = cumulative_difficulty_128 as u64;
+ let cumulative_difficulty_top64 = (cumulative_difficulty_128 >> 64) as u64;
+ CoreSyncData {
+ cumulative_difficulty,
+ cumulative_difficulty_top64,
+ current_height,
+ pruning_seed,
+ top_id,
+ top_version,
+ }
+ }
/// Returns the 128 bit cumulative difficulty of the peers blockchain
pub fn cumulative_difficulty(&self) -> u128 {
let mut ret: u128 = self.cumulative_difficulty_top64 as u128;
@@ -91,76 +142,150 @@ impl CoreSyncData {
/// PeerListEntryBase, information kept on a peer which will be entered
/// in a peer list/store.
-#[derive(Clone, Copy, Deserialize, Serialize, Debug, Eq, PartialEq)]
+#[derive(Clone, Copy, Default, Deserialize, Serialize, Debug, Eq, PartialEq)]
pub struct PeerListEntryBase {
/// The Peer Address
pub adr: NetworkAddress,
/// The Peer ID
pub id: PeerID,
/// The last Time The Peer Was Seen
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub last_seen: i64,
/// The Pruning Seed
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub pruning_seed: u32,
/// The RPC port
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub rpc_port: u16,
/// The RPC credits per hash
- #[serde(default = "zero_val")]
+ #[serde(default = "utils::zero_val")]
pub rpc_credits_per_hash: u32,
}
+impl std::hash::Hash for PeerListEntryBase {
+ fn hash(&self, state: &mut H) {
+ // We only hash the adr so we can look this up in a HashSet.
+ self.adr.hash(state)
+ }
+}
+
/// A pruned tx with the hash of the missing prunable data
#[derive(Clone, Debug, PartialEq, Eq)]
-pub struct TxBlobEntry {
+pub struct PrunedTxBlobEntry {
/// The Tx
- pub tx: Transaction, // ########### use pruned transaction when PR is merged ##############
+ pub tx: Vec,
/// The Prunable Tx Hash
- pub prunable_hash: Hash,
+ pub prunable_hash: [u8; 32],
}
-impl TxBlobEntry {
- fn from_epee_value(value: &Value) -> Result {
- let tx_blob = get_val_from_map!(value, "blob", get_bytes, "Vec");
+impl PrunedTxBlobEntry {
+ fn from_epee_value(mut value: Value) -> Result {
+ let tx = utils::get_internal_val_from_map(&mut value, "blob", Value::get_bytes, "Vec")?;
- let tx = monero_decode_into_serde_err!(Transaction, tx_blob);
+ let prunable_hash = utils::get_internal_val_from_map(
+ &mut value,
+ "prunable_hash",
+ Value::get_bytes,
+ "Vec",
+ )?;
+ let prunable_hash_len = prunable_hash.len();
- let prunable_hash_blob = get_val_from_map!(value, "prunable_hash", get_bytes, "Vec");
-
- let prunable_hash = Hash::from_slice(prunable_hash_blob);
-
- Ok(Self { tx, prunable_hash })
+ Ok(PrunedTxBlobEntry {
+ tx,
+ prunable_hash: prunable_hash
+ .try_into()
+ .map_err(|_| E::invalid_length(prunable_hash_len, &"a 16-byte array"))?,
+ })
}
}
-impl Serialize for TxBlobEntry {
+impl Serialize for PrunedTxBlobEntry {
fn serialize(&self, serializer: S) -> Result
where
S: serde::Serializer,
{
let mut state = serializer.serialize_struct("", 2)?;
- let tx_blob = monero::consensus::serialize(&self.tx);
- state.serialize_field("blob", &tx_blob)?;
- let prunable_hash = self.prunable_hash.as_bytes();
- state.serialize_field("prunable_hash", prunable_hash)?;
+ state.serialize_field("blob", &self.tx)?;
+ state.serialize_field("prunable_hash", &self.prunable_hash)?;
state.end()
}
}
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum TransactionBlobs {
+ Pruned(Vec),
+ Normal(Vec>),
+}
+
+impl TransactionBlobs {
+ pub fn len(&self) -> usize {
+ match self {
+ TransactionBlobs::Normal(txs) => txs.len(),
+ TransactionBlobs::Pruned(txs) => txs.len(),
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ fn from_epee_value(value: Value, pruned: bool) -> Result {
+ let txs = utils::get_internal_val(value, Value::get_seq, "A sequence")?;
+ if pruned {
+ let mut decoded_txs = Vec::with_capacity(txs.len());
+ for tx in txs {
+ decoded_txs.push(PrunedTxBlobEntry::from_epee_value(tx)?);
+ }
+ Ok(TransactionBlobs::Pruned(decoded_txs))
+ } else {
+ let mut decoded_txs = Vec::with_capacity(txs.len());
+ for tx in txs {
+ decoded_txs.push(utils::get_internal_val(tx, Value::get_bytes, "Vec")?);
+ }
+ Ok(TransactionBlobs::Normal(decoded_txs))
+ }
+ }
+}
+
+impl Serialize for TransactionBlobs {
+ fn serialize(&self, serializer: S) -> Result
+ where
+ S: serde::Serializer,
+ {
+ match self {
+ TransactionBlobs::Pruned(txs) => {
+ let mut seq = serializer.serialize_seq(Some(txs.len()))?;
+
+ for tx in txs {
+ seq.serialize_element(tx)?;
+ }
+
+ seq.end()
+ }
+ TransactionBlobs::Normal(txs) => {
+ let mut seq = serializer.serialize_seq(Some(txs.len()))?;
+
+ for tx in txs {
+ seq.serialize_element(tx)?;
+ }
+
+ seq.end()
+ }
+ }
+ }
+}
+
/// A Block that can contain transactions
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct BlockCompleteEntry {
/// True if tx data is pruned
pub pruned: bool,
/// The Block
- pub block: Block,
+ pub block: Vec,
/// The Block Weight/Size
pub block_weight: u64,
- /// If the Block is pruned the txs will be here
- pub txs_pruned: Vec,
- /// If the Block is not pruned the txs will be here
- pub txs: Vec,
+ /// The blocks txs
+ pub txs: TransactionBlobs,
}
impl<'de> Deserialize<'de> for BlockCompleteEntry {
@@ -168,46 +293,38 @@ impl<'de> Deserialize<'de> for BlockCompleteEntry {
where
D: serde::Deserializer<'de>,
{
- let value = Value::deserialize(deserializer)?;
+ let mut value = Value::deserialize(deserializer)?;
let mut pruned = false;
- if let Some(val) = value.get("pruned") {
- pruned = *get_internal_val!(val, get_bool, "bool");
+ if let Some(val) = value.get_and_remove("pruned") {
+ pruned = utils::get_internal_val(val, Value::get_bool, "bool")?;
}
- let block_bytes = get_val_from_map!(value, "block", get_bytes, "Vec");
-
- let block = monero_decode_into_serde_err!(Block, block_bytes);
+ let block =
+ utils::get_internal_val_from_map(&mut value, "block", Value::get_bytes, "Vec")?;
let mut block_weight = 0;
- let mut txs_pruned = vec![];
- let mut txs = vec![];
+ let txs_value = value.get_and_remove("txs");
+
+ let mut txs = TransactionBlobs::Normal(vec![]);
+
+ if let Some(txs_value) = txs_value {
+ txs = TransactionBlobs::from_epee_value(txs_value, true)?;
+ }
if pruned {
- block_weight = *get_val_from_map!(value, "block_weight", get_u64, "u64");
-
- if let Some(v) = value.get("txs") {
- let v = get_internal_val!(v, get_seq, "a sequence");
-
- txs_pruned.reserve(v.len());
- for val in v {
- txs_pruned.push(TxBlobEntry::from_epee_value(val)?);
- }
- }
- } else if let Some(v) = value.get("txs") {
- let v = get_internal_val!(v, get_seq, "a sequence");
-
- txs.reserve(v.len());
- for val in v {
- let tx_buf = get_internal_val!(val, get_bytes, "Vec");
- txs.push(monero_decode_into_serde_err!(Transaction, tx_buf));
- }
+ block_weight = utils::get_internal_val_from_map(
+ &mut value,
+ "block_weight",
+ Value::get_u64,
+ "u64",
+ )?;
}
+
Ok(BlockCompleteEntry {
pruned,
block,
block_weight,
- txs_pruned,
txs,
})
}
@@ -219,7 +336,7 @@ impl Serialize for BlockCompleteEntry {
S: serde::Serializer,
{
let mut len = 1;
- if !self.txs.is_empty() || !self.txs_pruned.is_empty() {
+ if !self.txs.is_empty() {
len += 1;
}
if self.pruned {
@@ -230,24 +347,31 @@ impl Serialize for BlockCompleteEntry {
let mut state = serializer.serialize_struct("", len)?;
- let block = monero::consensus::serialize(&self.block);
- state.serialize_field("block", &block)?;
+ state.serialize_field("block", &self.block)?;
if self.pruned {
state.serialize_field("pruned", &true)?;
state.serialize_field("block_weight", &self.block_weight)?;
+ }
- if !self.txs_pruned.is_empty() {
- state.serialize_field("txs", &self.txs_pruned)?;
- }
- } else if !self.txs.is_empty() {
- let mut tx_blobs = vec![];
- for tx in self.txs.iter() {
- tx_blobs.push(monero::consensus::serialize(tx));
- }
- state.serialize_field("txs", &tx_blobs)?;
+ if !self.txs.is_empty() {
+ state.serialize_field("txs", &self.txs)?;
}
state.end()
}
}
+
+#[cfg(test)]
+mod tests {
+
+ use super::CoreSyncData;
+
+ #[test]
+ fn core_sync_cumulative_difficulty() {
+ let core_sync = CoreSyncData::new(u128::MAX, 80085, 200, [0; 32], 21);
+ assert_eq!(core_sync.cumulative_difficulty(), u128::MAX);
+ let core_sync = CoreSyncData::new(21, 80085, 200, [0; 32], 21);
+ assert_eq!(core_sync.cumulative_difficulty(), 21);
+ }
+}
diff --git a/net/monero-wire/src/messages/protocol.rs b/net/monero-wire/src/messages/protocol.rs
index f3e15056..0ff9ca9d 100644
--- a/net/monero-wire/src/messages/protocol.rs
+++ b/net/monero-wire/src/messages/protocol.rs
@@ -18,16 +18,15 @@
//! Protocol message requests don't have to be responded to in order unlike
//! admin messages.
-use monero::Hash;
-use monero::Transaction;
use serde::Deserialize;
use serde::Serialize;
use serde_with::serde_as;
use serde_with::Bytes;
-use serde_with::TryFromInto;
use super::common::BlockCompleteEntry;
-use super::{default_false, default_true};
+use crate::utils::{default_false, default_true};
+
+const P2P_PROTOCOL_BASE: u32 = 2000;
/// A block that SHOULD have transactions
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
@@ -38,14 +37,15 @@ pub struct NewBlock {
pub current_blockchain_height: u64,
}
-/// A Block that doesn't have transactions unless requested
-#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
-pub struct NewFluffyBlock {
- /// Block which might have transactions
- pub b: BlockCompleteEntry,
- /// The Block height
- pub current_blockchain_height: u64,
-}
+message!(
+ Protocol,
+ Name: NewBlock {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 1,
+);
/// A Tx Pool transaction blob
#[serde_as]
@@ -53,13 +53,6 @@ pub struct NewFluffyBlock {
#[serde(transparent)]
pub struct TxBlob(#[serde_as(as = "Bytes")] pub Vec);
-impl TxBlob {
- /// Deserialize the transaction
- pub fn deserialize(&self) -> Result {
- monero::consensus::deserialize(&self.0)
- }
-}
-
/// New Tx Pool Transactions
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
@@ -75,18 +68,37 @@ pub struct NewTransactions {
pub padding: Vec,
}
+message!(
+ Protocol,
+ Name: NewTransactions {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 2,
+);
+
/// A Request For Blocks
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct GetObjectsRequest {
/// Blocks
- #[serde_as(as = "Vec>")]
- pub blocks: Vec,
+ pub blocks: Vec<[u8; 32]>,
/// Pruned
#[serde(default = "default_false")]
pub pruned: bool,
}
+message!(
+ Protocol,
+ Name: GetObjectsRequest {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 3,
+);
+
/// A Blocks Response
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
@@ -94,24 +106,42 @@ pub struct GetObjectsResponse {
/// Blocks
pub blocks: Vec,
/// Missed IDs
- #[serde_as(as = "Vec>")]
- pub missed_ids: Vec,
+ pub missed_ids: Vec<[u8; 32]>,
/// The height of the peers blockchain
pub current_blockchain_height: u64,
}
+message!(
+ Protocol,
+ Name: GetObjectsResponse {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 4,
+);
+
/// A Chain Request
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct ChainRequest {
/// Block IDs
- #[serde_as(as = "Vec>")]
- pub block_ids: Vec,
+ pub block_ids: Vec<[u8; 32]>,
/// Prune
#[serde(default = "default_false")]
pub prune: bool,
}
+message!(
+ Protocol,
+ Name: ChainRequest {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 6,
+);
+
/// A Chain Response
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
@@ -125,42 +155,113 @@ pub struct ChainResponse {
/// Cumulative Difficulty High
pub cumulative_difficulty_high: u64,
/// Block IDs
- #[serde_as(as = "Vec>")]
- pub m_block_ids: Vec,
+ pub m_block_ids: Vec<[u8; 32]>,
/// Block Weights
pub m_block_weights: Vec,
- /// The first Block in the blockchain
+ /// The first Block in the response
#[serde_as(as = "Bytes")]
pub first_block: Vec,
}
+impl ChainResponse {
+ pub fn new(
+ start_height: u64,
+ total_height: u64,
+ cumulative_difficulty_128: u128,
+ m_block_ids: Vec<[u8; 32]>,
+ m_block_weights: Vec,
+ first_block: Vec,
+ ) -> Self {
+ let cumulative_difficulty_low = cumulative_difficulty_128 as u64;
+ let cumulative_difficulty_high = (cumulative_difficulty_128 >> 64) as u64;
+ Self {
+ start_height,
+ total_height,
+ cumulative_difficulty_low,
+ cumulative_difficulty_high,
+ m_block_ids,
+ m_block_weights,
+ first_block,
+ }
+ }
+ pub fn cumulative_difficulty(&self) -> u128 {
+ let mut ret: u128 = self.cumulative_difficulty_high as u128;
+ ret <<= 64;
+ ret | self.cumulative_difficulty_low as u128
+ }
+}
+
+message!(
+ Protocol,
+ Name: ChainResponse {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 7,
+);
+
+/// A Block that doesn't have transactions unless requested
+#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
+pub struct NewFluffyBlock {
+ /// Block which might have transactions
+ pub b: BlockCompleteEntry,
+ /// The Block height
+ pub current_blockchain_height: u64,
+}
+
+message!(
+ Protocol,
+ Name: NewFluffyBlock {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 8,
+);
+
/// A request for Txs we are missing from our TxPool
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct FluffyMissingTransactionsRequest {
/// The Block we are missing the Txs in
- #[serde_as(as = "TryFromInto<[u8; 32]>")]
- pub block_hash: Hash,
+ pub block_hash: [u8; 32],
/// The current blockchain height
pub current_blockchain_height: u64,
/// The Tx Indices
pub missing_tx_indices: Vec,
}
+message!(
+ Protocol,
+ Name: FluffyMissingTransactionsRequest {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 9,
+);
+
/// TxPoolCompliment
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
-pub struct TxPoolCompliment {
+pub struct GetTxPoolCompliment {
/// Tx Hashes
- #[serde_as(as = "Vec>")]
- pub hashes: Vec,
+ pub hashes: Vec<[u8; 32]>,
}
+message!(
+ Protocol,
+ Name: GetTxPoolCompliment {
+ EncodingError: epee_serde::Error,
+ Encode: epee_serde::to_bytes,
+ Decode: epee_serde::from_bytes,
+ },
+ ID: P2P_PROTOCOL_BASE + 10,
+);
+
#[cfg(test)]
mod tests {
- use std::str::FromStr;
-
- use monero::Hash;
use super::{NewFluffyBlock, NewTransactions};
@@ -675,14 +776,16 @@ mod tests {
248, 248, 91, 110, 107, 144, 12, 175, 253, 21, 121, 28,
];
+ let now = std::time::Instant::now();
+ for _ in 0..1000 {
+ let _new_transactions: NewTransactions = epee_serde::from_bytes(bytes).unwrap();
+ }
+ println!("in: {}ms", now.elapsed().as_millis());
+
let new_transactions: NewTransactions = epee_serde::from_bytes(bytes).unwrap();
assert_eq!(4, new_transactions.txs.len());
- for transaction in new_transactions.txs.iter() {
- transaction.deserialize().unwrap();
- }
-
let encoded_bytes = epee_serde::to_bytes(&new_transactions).unwrap();
let new_transactions_2: NewTransactions = epee_serde::from_bytes(encoded_bytes).unwrap();
@@ -1031,11 +1134,6 @@ mod tests {
103, 104, 116, 5, 209, 45, 42, 0, 0, 0, 0, 0,
];
let fluffy_block: NewFluffyBlock = epee_serde::from_bytes(bytes).unwrap();
- let hash =
- Hash::from_str("0x0bb7f7cfc8fcf55d3da64093a9ef7e9efb57e14249ef6a392b407aeecb1cd844")
- .unwrap();
-
- assert_eq!(hash, fluffy_block.b.block.id());
let encoded_bytes = epee_serde::to_bytes(&fluffy_block).unwrap();
let fluffy_block_2: NewFluffyBlock = epee_serde::from_bytes(encoded_bytes).unwrap();
diff --git a/net/monero-wire/src/network_address.rs b/net/monero-wire/src/network_address.rs
index 31966fb5..5102b969 100644
--- a/net/monero-wire/src/network_address.rs
+++ b/net/monero-wire/src/network_address.rs
@@ -22,8 +22,17 @@ use std::{hash::Hash, net};
use epee_serde::Value;
use serde::{de, ser::SerializeStruct, Deserialize, Serialize};
+use super::utils;
+
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
+pub enum NetZone {
+ Public,
+ Tor,
+ I2p,
+}
+
/// An IPv4 address with a port
-#[derive(Clone, Copy, Serialize, Debug, PartialEq, Eq, Hash)]
+#[derive(Clone, Copy, Serialize, Debug, Default, PartialEq, Eq, Hash)]
pub struct IPv4Address {
/// IP address
pub m_ip: u32,
@@ -41,20 +50,20 @@ impl From for IPv4Address {
}
impl IPv4Address {
- fn from_value(value: &Value) -> Result {
- let m_ip = get_val_from_map!(value, "m_ip", get_u32, "u32");
+ fn from_value(mut value: Value) -> Result {
+ let m_ip = utils::get_internal_val_from_map(&mut value, "m_ip", Value::get_u32, "u32")?;
- let m_port = get_val_from_map!(value, "m_port", get_u16, "u16");
+ let m_port = utils::get_internal_val_from_map(&mut value, "m_port", Value::get_u16, "u16")?;
Ok(IPv4Address {
- m_ip: *m_ip,
- m_port: *m_port,
+ m_ip: m_ip,
+ m_port: m_port,
})
}
}
/// An IPv6 address with a port
-#[derive(Clone, Copy, Serialize, Debug, PartialEq, Eq, Hash)]
+#[derive(Clone, Copy, Serialize, Debug, Default, PartialEq, Eq, Hash)]
pub struct IPv6Address {
/// Address
pub addr: [u8; 16],
@@ -72,17 +81,18 @@ impl From for IPv6Address {
}
impl IPv6Address {
- fn from_value(value: &Value) -> Result {
- let addr = get_val_from_map!(value, "addr", get_bytes, "Vec");
+ fn from_value(mut value: Value) -> Result {
+ let addr =
+ utils::get_internal_val_from_map(&mut value, "addr", Value::get_bytes, "Vec")?;
+ let addr_len = addr.len();
- let m_port = get_val_from_map!(value, "m_port", get_u16, "u16");
+ let m_port = utils::get_internal_val_from_map(&mut value, "m_port", Value::get_u16, "u16")?;
Ok(IPv6Address {
addr: addr
- .clone()
.try_into()
- .map_err(|_| E::invalid_length(addr.len(), &"a 16-byte array"))?,
- m_port: *m_port,
+ .map_err(|_| E::invalid_length(addr_len, &"a 16-byte array"))?,
+ m_port,
})
}
}
@@ -97,6 +107,37 @@ pub enum NetworkAddress {
IPv6(IPv6Address),
}
+impl NetworkAddress {
+ pub fn get_zone(&self) -> NetZone {
+ match self {
+ NetworkAddress::IPv4(_) | NetworkAddress::IPv6(_) => NetZone::Public,
+ }
+ }
+
+ pub fn is_loopback(&self) -> bool {
+ // TODO
+ false
+ }
+
+ pub fn is_local(&self) -> bool {
+ // TODO
+ false
+ }
+
+ pub fn port(&self) -> u16 {
+ match self {
+ NetworkAddress::IPv4(ip) => ip.m_port,
+ NetworkAddress::IPv6(ip) => ip.m_port,
+ }
+ }
+}
+
+impl Default for NetworkAddress {
+ fn default() -> Self {
+ Self::IPv4(IPv4Address::default())
+ }
+}
+
impl From for NetworkAddress {
fn from(value: net::SocketAddrV4) -> Self {
NetworkAddress::IPv4(value.into())
@@ -123,12 +164,16 @@ impl<'de> Deserialize<'de> for NetworkAddress {
where
D: serde::Deserializer<'de>,
{
- let value = Value::deserialize(deserializer)?;
- let addr_type = get_val_from_map!(value, "type", get_u8, "u8");
+ let mut value = Value::deserialize(deserializer)?;
+ let addr_type = utils::get_internal_val_from_map(&mut value, "type", Value::get_u8, "u8")?;
Ok(match addr_type {
- 1 => NetworkAddress::IPv4(IPv4Address::from_value(get_field_from_map!(value, "addr"))?),
- 2 => NetworkAddress::IPv6(IPv6Address::from_value(get_field_from_map!(value, "addr"))?),
+ 1 => NetworkAddress::IPv4(IPv4Address::from_value(utils::get_field_from_map(
+ &mut value, "addr",
+ )?)?),
+ 2 => NetworkAddress::IPv6(IPv6Address::from_value(utils::get_field_from_map(
+ &mut value, "addr",
+ )?)?),
_ => {
return Err(de::Error::custom(
"Network address type currently unsupported",
diff --git a/net/monero-wire/src/utils.rs b/net/monero-wire/src/utils.rs
new file mode 100644
index 00000000..5c07af28
--- /dev/null
+++ b/net/monero-wire/src/utils.rs
@@ -0,0 +1,45 @@
+use epee_serde::Value;
+
+pub(crate) fn zero_val>() -> T {
+ T::from(0_u8)
+}
+
+pub(crate) fn default_true() -> bool {
+ true
+}
+
+pub(crate) fn default_false() -> bool {
+ false
+}
+
+pub(crate) fn get_field_from_map(
+ value: &mut Value,
+ field_name: &'static str,
+) -> Result {
+ value
+ .get_and_remove(field_name)
+ .ok_or(serde::de::Error::missing_field(field_name))
+}
+
+pub(crate) fn get_internal_val(value: Value, get_fn: F, expected_ty: &str) -> Result
+where
+ E: serde::de::Error,
+ F: Fn(Value) -> Option,
+{
+ let err = serde::de::Error::invalid_type(value.get_value_type_as_unexpected(), &expected_ty);
+ get_fn(value).ok_or(err)
+}
+
+pub(crate) fn get_internal_val_from_map(
+ value: &mut Value,
+ field_name: &'static str,
+ get_fn: F,
+ expected_ty: &str,
+) -> Result
+where
+ E: serde::de::Error,
+ F: Fn(Value) -> Option,
+{
+ let val = get_field_from_map(value, field_name)?;
+ get_internal_val(val, get_fn, expected_ty)
+}
diff --git a/p2p/Cargo.toml b/p2p/Cargo.toml
new file mode 100644
index 00000000..594c9d75
--- /dev/null
+++ b/p2p/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "cuprate-peer"
+version = "0.1.0"
+edition = "2021"
+license = "AGPL-3.0-only"
+authors = ["Boog900"]
+
+
+[dependencies]
+chrono = "0.4.24"
+thiserror = "1.0.39"
+cuprate-common = {path = "../common"}
+monero-wire = {path= "../net/monero-wire"}
+futures = "0.3.26"
+tower = {version = "0.4.13", features = ["util", "steer"]}
+tokio = {version= "1.27", features=["rt", "time"]}
+async-trait = "0.1.68"
+tracing = "0.1.37"
+rand = "0.8.5"
\ No newline at end of file
diff --git a/p2p/LICENSE b/p2p/LICENSE
new file mode 100644
index 00000000..e19903e6
--- /dev/null
+++ b/p2p/LICENSE
@@ -0,0 +1,14 @@
+ Copyright (C) 2023 Cuprate Contributors
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
\ No newline at end of file
diff --git a/p2p/src/address_book.rs b/p2p/src/address_book.rs
new file mode 100644
index 00000000..c7b465ba
--- /dev/null
+++ b/p2p/src/address_book.rs
@@ -0,0 +1,120 @@
+mod addr_book_client;
+pub(crate) mod address_book;
+
+pub use addr_book_client::start_address_book;
+
+use monero_wire::{messages::PeerListEntryBase, network_address::NetZone, NetworkAddress};
+
+const MAX_WHITE_LIST_PEERS: usize = 1000;
+const MAX_GRAY_LIST_PEERS: usize = 5000;
+
+#[derive(Debug, thiserror::Error)]
+pub enum AddressBookError {
+ #[error("Peer was not found in book")]
+ PeerNotFound,
+ #[error("The peer list is empty")]
+ PeerListEmpty,
+ #[error("Peer sent an address out of it's net-zone")]
+ PeerSentAnAddressOutOfZone,
+ #[error("The address books channel has closed.")]
+ AddressBooksChannelClosed,
+ #[error("Peer Store Error: {0}")]
+ PeerStoreError(&'static str),
+}
+
+#[derive(Debug)]
+pub enum AddressBookRequest {
+ HandleNewPeerList(Vec, NetZone),
+ SetPeerSeen(NetworkAddress, i64),
+ BanPeer(NetworkAddress, chrono::NaiveDateTime),
+ AddPeerToAnchor(NetworkAddress),
+ RemovePeerFromAnchor(NetworkAddress),
+ UpdatePeerInfo(PeerListEntryBase),
+
+ GetRandomGrayPeer(NetZone),
+ GetRandomWhitePeer(NetZone),
+}
+
+impl std::fmt::Display for AddressBookRequest {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::HandleNewPeerList(_, _) => f.write_str("HandleNewPeerList"),
+ Self::SetPeerSeen(_, _) => f.write_str("SetPeerSeen"),
+ Self::BanPeer(_, _) => f.write_str("BanPeer"),
+ Self::AddPeerToAnchor(_) => f.write_str("AddPeerToAnchor"),
+ Self::RemovePeerFromAnchor(_) => f.write_str("RemovePeerFromAnchor"),
+ Self::UpdatePeerInfo(_) => f.write_str("UpdatePeerInfo"),
+ Self::GetRandomGrayPeer(_) => f.write_str("GetRandomGrayPeer"),
+ Self::GetRandomWhitePeer(_) => f.write_str("GetRandomWhitePeer"),
+ }
+ }
+}
+
+impl AddressBookRequest {
+ pub fn get_zone(&self) -> NetZone {
+ match self {
+ Self::HandleNewPeerList(_, zone) => *zone,
+ Self::SetPeerSeen(peer, _) => peer.get_zone(),
+ Self::BanPeer(peer, _) => peer.get_zone(),
+ Self::AddPeerToAnchor(peer) => peer.get_zone(),
+ Self::RemovePeerFromAnchor(peer) => peer.get_zone(),
+ Self::UpdatePeerInfo(peer) => peer.adr.get_zone(),
+
+ Self::GetRandomGrayPeer(zone) => *zone,
+ Self::GetRandomWhitePeer(zone) => *zone,
+ }
+ }
+}
+
+#[derive(Debug)]
+pub enum AddressBookResponse {
+ Ok,
+ Peer(PeerListEntryBase),
+}
+
+#[derive(Debug, Clone)]
+pub struct AddressBookConfig {
+ max_white_peers: usize,
+ max_gray_peers: usize,
+}
+
+impl Default for AddressBookConfig {
+ fn default() -> Self {
+ AddressBookConfig {
+ max_white_peers: MAX_WHITE_LIST_PEERS,
+ max_gray_peers: MAX_GRAY_LIST_PEERS,
+ }
+ }
+}
+
+#[async_trait::async_trait]
+pub trait AddressBookStore: Clone {
+ type Error: Into;
+ /// Loads the peers from the peer store.
+ /// returns (in order):
+ /// the white list,
+ /// the gray list,
+ /// the anchor list,
+ /// the ban list
+ async fn load_peers(
+ &mut self,
+ zone: NetZone,
+ ) -> Result<
+ (
+ Vec, // white list
+ Vec, // gray list
+ Vec, // anchor list
+ Vec<(NetworkAddress, chrono::NaiveDateTime)>, // ban list
+ ),
+ Self::Error,
+ >;
+
+ async fn save_peers(
+ &mut self,
+ zone: NetZone,
+ white: Vec,
+ gray: Vec,
+ anchor: Vec,
+ bans: Vec<(NetworkAddress, chrono::NaiveDateTime)>, // ban lists
+ ) -> Result<(), Self::Error>;
+}
diff --git a/p2p/src/address_book/addr_book_client.rs b/p2p/src/address_book/addr_book_client.rs
new file mode 100644
index 00000000..5101cd24
--- /dev/null
+++ b/p2p/src/address_book/addr_book_client.rs
@@ -0,0 +1,121 @@
+use std::future::Future;
+use std::pin::Pin;
+
+use futures::channel::{mpsc, oneshot};
+use futures::FutureExt;
+use tokio::task::spawn;
+use tower::steer::Steer;
+
+use monero_wire::network_address::NetZone;
+
+use super::address_book::{AddressBook, AddressBookClientRequest};
+use super::{
+ AddressBookConfig, AddressBookError, AddressBookRequest, AddressBookResponse, AddressBookStore,
+};
+
+pub async fn start_address_book(
+ peer_store: S,
+ config: AddressBookConfig,
+) -> Result<
+ impl tower::Service<
+ AddressBookRequest,
+ Response = AddressBookResponse,
+ Error = AddressBookError,
+ Future = Pin<
+ Box<
+ dyn Future