From bef2a2cbd4e1194991751d1fbc96603cba8c7a51 Mon Sep 17 00:00:00 2001 From: hinto-janai <hinto.janai@protonmail.com> Date: Thu, 20 Jun 2024 18:20:13 -0400 Subject: [PATCH 01/11] epee: unseal `trait EpeeValue` (#184) * unseal `trait EpeeValue` * fix `container_as_blob.rs` * clippy * epee-encoding: remove `sealed` --- Cargo.lock | 21 +--------------- net/epee-encoding/Cargo.toml | 1 - net/epee-encoding/src/container_as_blob.rs | 4 +-- net/epee-encoding/src/value.rs | 29 +++++----------------- 4 files changed, 8 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1363cd05..d262d17d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -341,7 +341,7 @@ version = "4.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.66", @@ -815,7 +815,6 @@ dependencies = [ "hex", "paste", "ref-cast", - "sealed", "thiserror", ] @@ -1062,12 +1061,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -2161,18 +2154,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sealed" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "syn 2.0.66", -] - [[package]] name = "security-framework" version = "2.11.0" diff --git a/net/epee-encoding/Cargo.toml b/net/epee-encoding/Cargo.toml index 523ee3ee..8bae8579 100644 --- a/net/epee-encoding/Cargo.toml +++ b/net/epee-encoding/Cargo.toml @@ -17,7 +17,6 @@ std = ["dep:thiserror", "bytes/std", "fixed-bytes/std"] [dependencies] fixed-bytes = { path = "../fixed-bytes", default-features = false } -sealed = "0.5.0" paste = "1.0.14" ref-cast = "1.0.22" bytes = { workspace = true } diff --git a/net/epee-encoding/src/container_as_blob.rs b/net/epee-encoding/src/container_as_blob.rs index 084b43bb..a4e88085 100644 --- a/net/epee-encoding/src/container_as_blob.rs +++ b/net/epee-encoding/src/container_as_blob.rs @@ -1,8 +1,7 @@ use bytes::{Buf, BufMut, Bytes, BytesMut}; use ref_cast::RefCast; -use sealed::sealed; -use crate::{error::*, value::*, EpeeValue, InnerMarker, Marker}; +use crate::{error::*, EpeeValue, InnerMarker, Marker}; #[derive(RefCast)] #[repr(transparent)] @@ -26,7 +25,6 @@ impl<'a, T: Containerable + EpeeValue> From<&'a Vec<T>> for &'a ContainerAsBlob< } } -#[sealed] impl<T: Containerable + EpeeValue> EpeeValue for ContainerAsBlob<T> { const MARKER: Marker = Marker::new(InnerMarker::String); diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index ef42241b..74dea35c 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -1,10 +1,10 @@ +//! This module contains a [`EpeeValue`] trait and +//! impls for some possible base epee values. + use alloc::{string::String, vec::Vec}; -/// This module contains a `sealed` [`EpeeValue`] trait and different impls for -/// the different possible base epee values. use core::fmt::Debug; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use sealed::sealed; use fixed_bytes::{ByteArray, ByteArrayVec}; @@ -12,10 +12,9 @@ use crate::{ io::*, varint::*, EpeeObject, Error, InnerMarker, Marker, Result, MAX_STRING_LEN_POSSIBLE, }; -/// A trait for epee values, this trait is sealed as all possible epee values are -/// defined in the lib, to make an [`EpeeValue`] outside the lib you will need to -/// use the trait [`EpeeObject`]. -#[sealed(pub(crate))] +/// A trait for epee values. +/// +/// All [`EpeeObject`] objects automatically implement [`EpeeValue`]. pub trait EpeeValue: Sized { const MARKER: Marker; @@ -37,7 +36,6 @@ pub trait EpeeValue: Sized { fn write<B: BufMut>(self, w: &mut B) -> Result<()>; } -#[sealed] impl<T: EpeeObject> EpeeValue for T { const MARKER: Marker = Marker::new(InnerMarker::Object); @@ -56,7 +54,6 @@ impl<T: EpeeObject> EpeeValue for T { } } -#[sealed] impl<T: EpeeObject> EpeeValue for Vec<T> { const MARKER: Marker = T::MARKER.into_seq(); @@ -94,7 +91,6 @@ impl<T: EpeeObject> EpeeValue for Vec<T> { } } -#[sealed] impl<T: EpeeObject + Debug, const N: usize> EpeeValue for [T; N] { const MARKER: Marker = <T>::MARKER.into_seq(); @@ -119,7 +115,6 @@ impl<T: EpeeObject + Debug, const N: usize> EpeeValue for [T; N] { macro_rules! epee_numb { ($numb:ty, $marker:ident, $read_fn:ident, $write_fn:ident) => { - #[sealed] impl EpeeValue for $numb { const MARKER: Marker = Marker::new(InnerMarker::$marker); @@ -148,7 +143,6 @@ epee_numb!(u32, U32, get_u32_le, put_u32_le); epee_numb!(u64, U64, get_u64_le, put_u64_le); epee_numb!(f64, F64, get_f64_le, put_f64_le); -#[sealed] impl EpeeValue for bool { const MARKER: Marker = Marker::new(InnerMarker::Bool); @@ -165,7 +159,6 @@ impl EpeeValue for bool { } } -#[sealed] impl EpeeValue for Vec<u8> { const MARKER: Marker = Marker::new(InnerMarker::String); @@ -209,7 +202,6 @@ impl EpeeValue for Vec<u8> { } } -#[sealed::sealed] impl EpeeValue for Bytes { const MARKER: Marker = Marker::new(InnerMarker::String); @@ -250,7 +242,6 @@ impl EpeeValue for Bytes { } } -#[sealed::sealed] impl EpeeValue for BytesMut { const MARKER: Marker = Marker::new(InnerMarker::String); @@ -294,7 +285,6 @@ impl EpeeValue for BytesMut { } } -#[sealed::sealed] impl<const N: usize> EpeeValue for ByteArrayVec<N> { const MARKER: Marker = Marker::new(InnerMarker::String); @@ -338,7 +328,6 @@ impl<const N: usize> EpeeValue for ByteArrayVec<N> { } } -#[sealed::sealed] impl<const N: usize> EpeeValue for ByteArray<N> { const MARKER: Marker = Marker::new(InnerMarker::String); @@ -374,7 +363,6 @@ impl<const N: usize> EpeeValue for ByteArray<N> { } } -#[sealed] impl EpeeValue for String { const MARKER: Marker = Marker::new(InnerMarker::String); @@ -403,7 +391,6 @@ impl EpeeValue for String { } } -#[sealed] impl<const N: usize> EpeeValue for [u8; N] { const MARKER: Marker = Marker::new(InnerMarker::String); @@ -429,7 +416,6 @@ impl<const N: usize> EpeeValue for [u8; N] { } } -#[sealed] impl<const N: usize> EpeeValue for Vec<[u8; N]> { const MARKER: Marker = <[u8; N]>::MARKER.into_seq(); @@ -470,7 +456,6 @@ impl<const N: usize> EpeeValue for Vec<[u8; N]> { macro_rules! epee_seq { ($val:ty) => { - #[sealed] impl EpeeValue for Vec<$val> { const MARKER: Marker = <$val>::MARKER.into_seq(); @@ -509,7 +494,6 @@ macro_rules! epee_seq { } } - #[sealed] impl<const N: usize> EpeeValue for [$val; N] { const MARKER: Marker = <$val>::MARKER.into_seq(); @@ -548,7 +532,6 @@ epee_seq!(String); epee_seq!(Bytes); epee_seq!(BytesMut); -#[sealed] impl<T: EpeeValue> EpeeValue for Option<T> { const MARKER: Marker = T::MARKER; From f6c4e4e9a8b0d2e5e6d6f0e20b2b3115b427c242 Mon Sep 17 00:00:00 2001 From: hinto-janai <hinto.janai@protonmail.com> Date: Thu, 20 Jun 2024 18:27:09 -0400 Subject: [PATCH 02/11] repo: add `Tracking Issue` issue template (#182) * add `tracking_issue.md` * fix `{bug,proposal}.md` * format `tracking_issue.md` --- .github/ISSUE_TEMPLATE/bug.md | 2 +- .github/ISSUE_TEMPLATE/proposal.md | 6 ++-- .github/ISSUE_TEMPLATE/tracking_issue.md | 41 ++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 4 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/tracking_issue.md diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index 404bd6ad..a0fdbcba 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -24,7 +24,7 @@ Example: ## Bug What is the bug? -### Expected behavior +## Expected behavior What correct beahvior was expected to happen? ## Steps to reproduce diff --git a/.github/ISSUE_TEMPLATE/proposal.md b/.github/ISSUE_TEMPLATE/proposal.md index 132b2b07..777cf3e2 100644 --- a/.github/ISSUE_TEMPLATE/proposal.md +++ b/.github/ISSUE_TEMPLATE/proposal.md @@ -14,11 +14,11 @@ Note: Please search to see if an issue already exists for this proposal. ## What Describe your proposal. -## Where -Describe where your proposal will cause changes to. - ## Why Describe why the proposal is needed. +## Where +Describe where your proposal will cause changes to. + ## How Describe how the proposal could be implemented. diff --git a/.github/ISSUE_TEMPLATE/tracking_issue.md b/.github/ISSUE_TEMPLATE/tracking_issue.md new file mode 100644 index 00000000..d9423b41 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/tracking_issue.md @@ -0,0 +1,41 @@ +--- +name: 🔍 Tracking Issue +about: Create an issue that tracks a wider effort +title: 'Tracking Issue for ...' +labels: ["C-tracking-issue"] +assignees: '' + +--- + +<!-- Consider keeping the following section in the issue. --> +### About tracking issues +Tracking issues are used to record the overall progress of implementation. +They are also used as hubs connecting to other relevant issues, e.g., bugs or open design questions. +A tracking issue is however not meant for large scale discussion, questions, or bug reports about a feature. +Instead, open a dedicated issue for the specific matter. + +### What +This is a tracking issue for ... + +### Steps +<!-- +Describe the steps required to bring this effort to completion. + +For larger features, more steps might be involved. +If the feature is changed later, please add those PRs here as well. +--> + +- [ ] Initial implementation: #... +- [ ] Other code: #... +- [ ] Multi-PR effort + - #... + - #... + - #... +- [ ] Finalization PR: #... + +### Related +<!-- Link any related issues/PRs here. --> + +- #... +- #... +- #... From ff1172f2abfb840caf115056ff0b2ae95b92a6aa Mon Sep 17 00:00:00 2001 From: hinto-janai <hinto.janai@protonmail.com> Date: Fri, 21 Jun 2024 20:25:21 -0400 Subject: [PATCH 03/11] epee: make `{read,write}_varint` public, create `write_{bytes,container}` (#185) * make `{read,write}_varint` public, create `write_{container,bytes}` * add doc tests to varint functions * `write_container` -> `write_iterator` * add `write_{iterator,bytes}` doc test * fix `write_iterator()` doc --- net/epee-encoding/src/lib.rs | 83 +++++++++++++++++++++++++- net/epee-encoding/src/value.rs | 100 +++++--------------------------- net/epee-encoding/src/varint.rs | 32 ++++++++++ 3 files changed, 130 insertions(+), 85 deletions(-) diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index ffb0a1ea..92046d32 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -77,7 +77,7 @@ pub use error::*; use io::*; pub use marker::{InnerMarker, Marker}; pub use value::EpeeValue; -use varint::*; +pub use varint::{read_varint, write_varint}; /// Header that needs to be at the beginning of every binary blob that follows /// this binary serialization format. @@ -212,6 +212,87 @@ fn write_epee_value<T: EpeeValue, B: BufMut>(val: T, w: &mut B) -> Result<()> { val.write(w) } +/// Write a byte array to `w` with [`write_varint`]. +/// +/// This function: +/// - Writes the length of `t`'s bytes into `w` using [`write_varint`] +/// - Writes `t`'s bytes into `w` +/// +/// It is used as the internal [`EpeeValue::write`] +/// implementation of byte-like containers such as: +/// - [`EpeeValue::<Vec<u8>>::write`] +/// - [`EpeeValue::<String>::write`] +/// +/// # Errors +/// This will error if: +/// - [`write_varint`] fails +/// - `w` does not have enough capacity +/// +/// # Example +/// ```rust +/// let t: [u8; 8] = [3, 0, 0, 0, 1, 0, 0, 0]; +/// let mut w = vec![]; +/// +/// epee_encoding::write_bytes(t, &mut w).unwrap(); +/// +/// assert_eq!(w.len(), 9); // length of bytes + bytes +/// assert_eq!(w[1..], t); +/// ``` +pub fn write_bytes<T: AsRef<[u8]>, B: BufMut>(t: T, w: &mut B) -> Result<()> { + let bytes = t.as_ref(); + let len = bytes.len(); + + write_varint(len.try_into()?, w)?; + + if w.remaining_mut() < len { + return Err(Error::IO("Not enough capacity to write bytes")); + } + + w.put_slice(bytes); + + Ok(()) +} + +/// Write an [`Iterator`] of [`EpeeValue`]s to `w` with [`write_varint`]. +/// +/// This function: +/// - Writes the length of the `iterator`, into `w` using [`write_varint`] +/// - [`EpeeValue::write`]s each `T` of the iterator into `w` +/// +/// It is used as the internal [`EpeeValue::write`] +/// implementation of containers such as [`EpeeValue::<Vec<T>>::write`]. +/// +/// # Errors +/// This will error if: +/// - [`write_varint`] fails +/// - [`EpeeValue::<T>::write`] fails +/// +/// # Example +/// ```rust +/// let t: u64 = 3; +/// let vec: Vec<u64> = vec![t, t]; +/// let mut w = vec![]; +/// +/// let iter: std::vec::IntoIter<u64> = vec.into_iter(); +/// epee_encoding::write_iterator(iter, &mut w).unwrap(); +/// +/// assert_eq!(w.len(), 17); +/// assert_eq!(w[1..9], [3, 0, 0, 0, 0, 0, 0, 0]); +/// assert_eq!(w[9..], [3, 0, 0, 0, 0, 0, 0, 0]); +/// ``` +pub fn write_iterator<T, I, B>(iterator: I, w: &mut B) -> Result<()> +where + T: EpeeValue, + I: Iterator<Item = T> + ExactSizeIterator, + B: BufMut, +{ + write_varint(iterator.len().try_into()?, w)?; + for item in iterator.into_iter() { + item.write(w)?; + } + Ok(()) +} + /// A helper object builder that just skips every field. #[derive(Default)] struct SkipObjectBuilder; diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 74dea35c..0dcd45a8 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -9,7 +9,10 @@ use bytes::{Buf, BufMut, Bytes, BytesMut}; use fixed_bytes::{ByteArray, ByteArrayVec}; use crate::{ - io::*, varint::*, EpeeObject, Error, InnerMarker, Marker, Result, MAX_STRING_LEN_POSSIBLE, + io::{checked_read_primitive, checked_write_primitive}, + varint::{read_varint, write_varint}, + write_bytes, write_iterator, EpeeObject, Error, InnerMarker, Marker, Result, + MAX_STRING_LEN_POSSIBLE, }; /// A trait for epee values. @@ -83,11 +86,7 @@ impl<T: EpeeObject> EpeeValue for Vec<T> { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - for item in self.into_iter() { - item.write(w)?; - } - Ok(()) + write_iterator(self.into_iter(), w) } } @@ -105,11 +104,7 @@ impl<T: EpeeObject + Debug, const N: usize> EpeeValue for [T; N] { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - for item in self.into_iter() { - item.write(w)?; - } - Ok(()) + write_iterator(self.into_iter(), w) } } @@ -191,14 +186,7 @@ impl EpeeValue for Vec<u8> { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - - if w.remaining_mut() < self.len() { - return Err(Error::IO("Not enough capacity to write bytes")); - } - - w.put_slice(&self); - Ok(()) + write_bytes(self, w) } } @@ -231,14 +219,7 @@ impl EpeeValue for Bytes { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - - if w.remaining_mut() < self.len() { - return Err(Error::IO("Not enough capacity to write bytes")); - } - - w.put(self); - Ok(()) + write_bytes(self, w) } } @@ -274,14 +255,7 @@ impl EpeeValue for BytesMut { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - - if w.remaining_mut() < self.len() { - return Err(Error::IO("Not enough capacity to write bytes")); - } - - w.put(self); - Ok(()) + write_bytes(self, w) } } @@ -316,15 +290,7 @@ impl<const N: usize> EpeeValue for ByteArrayVec<N> { fn write<B: BufMut>(self, w: &mut B) -> Result<()> { let bytes = self.take_bytes(); - - write_varint(bytes.len().try_into()?, w)?; - - if w.remaining_mut() < bytes.len() { - return Err(Error::IO("Not enough capacity to write bytes")); - } - - w.put(bytes); - Ok(()) + write_bytes(bytes, w) } } @@ -351,15 +317,7 @@ impl<const N: usize> EpeeValue for ByteArray<N> { fn write<B: BufMut>(self, w: &mut B) -> Result<()> { let bytes = self.take_bytes(); - - write_varint(N.try_into().unwrap(), w)?; - - if w.remaining_mut() < N { - return Err(Error::IO("Not enough capacity to write bytes")); - } - - w.put(bytes); - Ok(()) + write_bytes(bytes, w) } } @@ -380,14 +338,7 @@ impl EpeeValue for String { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - - if w.remaining_mut() < self.len() { - return Err(Error::IO("Not enough capacity to write bytes")); - } - - w.put_slice(self.as_bytes()); - Ok(()) + write_bytes(self, w) } } @@ -405,14 +356,7 @@ impl<const N: usize> EpeeValue for [u8; N] { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - - if w.remaining_mut() < self.len() { - return Err(Error::IO("Not enough capacity to write bytes")); - } - - w.put_slice(&self); - Ok(()) + write_bytes(self, w) } } @@ -446,11 +390,7 @@ impl<const N: usize> EpeeValue for Vec<[u8; N]> { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - for item in self.into_iter() { - item.write(w)?; - } - Ok(()) + write_iterator(self.into_iter(), w) } } @@ -486,11 +426,7 @@ macro_rules! epee_seq { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - for item in self.into_iter() { - item.write(w)?; - } - Ok(()) + write_iterator(self.into_iter(), w) } } @@ -508,11 +444,7 @@ macro_rules! epee_seq { } fn write<B: BufMut>(self, w: &mut B) -> Result<()> { - write_varint(self.len().try_into()?, w)?; - for item in self.into_iter() { - item.write(w)?; - } - Ok(()) + write_iterator(self.into_iter(), w) } } }; diff --git a/net/epee-encoding/src/varint.rs b/net/epee-encoding/src/varint.rs index 5de4eedc..e574ba85 100644 --- a/net/epee-encoding/src/varint.rs +++ b/net/epee-encoding/src/varint.rs @@ -7,6 +7,18 @@ const FITS_IN_ONE_BYTE: u64 = 2_u64.pow(8 - SIZE_OF_SIZE_MARKER) - 1; const FITS_IN_TWO_BYTES: u64 = 2_u64.pow(16 - SIZE_OF_SIZE_MARKER) - 1; const FITS_IN_FOUR_BYTES: u64 = 2_u64.pow(32 - SIZE_OF_SIZE_MARKER) - 1; +/// Read an epee variable sized number from `r`. +/// +/// ```rust +/// use epee_encoding::read_varint; +/// +/// assert_eq!(read_varint(&mut [252].as_slice()).unwrap(), 63); +/// assert_eq!(read_varint(&mut [1, 1].as_slice()).unwrap(), 64); +/// assert_eq!(read_varint(&mut [253, 255].as_slice()).unwrap(), 16_383); +/// assert_eq!(read_varint(&mut [2, 0, 1, 0].as_slice()).unwrap(), 16_384); +/// assert_eq!(read_varint(&mut [254, 255, 255, 255].as_slice()).unwrap(), 1_073_741_823); +/// assert_eq!(read_varint(&mut [3, 0, 0, 0, 1, 0, 0, 0].as_slice()).unwrap(), 1_073_741_824); +/// ``` pub fn read_varint<B: Buf>(r: &mut B) -> Result<u64> { if !r.has_remaining() { Err(Error::IO("Not enough bytes to build VarInt"))? @@ -26,6 +38,26 @@ pub fn read_varint<B: Buf>(r: &mut B) -> Result<u64> { Ok(vi) } +/// Write an epee variable sized number into `w`. +/// +/// ```rust +/// use epee_encoding::write_varint; +/// +/// let mut buf = vec![]; +/// +/// for (number, expected_bytes) in [ +/// (63, [252].as_slice()), +/// (64, [1, 1].as_slice()), +/// (16_383, [253, 255].as_slice()), +/// (16_384, [2, 0, 1, 0].as_slice()), +/// (1_073_741_823, [254, 255, 255, 255].as_slice()), +/// (1_073_741_824, [3, 0, 0, 0, 1, 0, 0, 0].as_slice()), +/// ] { +/// buf.clear(); +/// write_varint(number, &mut buf); +/// assert_eq!(buf.as_slice(), expected_bytes); +/// } +/// ``` pub fn write_varint<B: BufMut>(number: u64, w: &mut B) -> Result<()> { let size_marker = match number { 0..=FITS_IN_ONE_BYTE => 0, From 10aac8cbb206a3868f822132254ada6ae1a4e16a Mon Sep 17 00:00:00 2001 From: Boog900 <boog900@tutanota.com> Date: Sat, 22 Jun 2024 00:29:40 +0000 Subject: [PATCH 04/11] P2P: Block downloader (#132) * impl async buffer * clippy * p2p changes * clippy * a few more docs * init cuprate-p2p * remove some unrelated code and add some docs * start documenting client_pool.rs * add more docs * typo * fix docs * use JoinSet in connection maintainer * small changes * add peer sync state svc * add broadcast svc * add more docs * add some tests * add a test * fix merge * add another test * unify PeerDisconnectFut and add more docs * start network init * add an inbound connection server * remove crate doc for now * fix address book docs * fix leak in client pool * correct comment * fix merge + add some docs * review comments * init block downloader * fix doc * initial chain search * add chain_tracker * move block downloader to struct * spawn task whe getting blocks * check for free peers and handle batch response * add test bin * working block downloader * dynamic batch sizes * dandelion_tower -> dandelion-tower * fix async-buffer builds * check if incoming peers are banned * add interface methods * update docs * use a JoinSet for background network tasks * dynamic batch size changes * Keep a longer of queue of blocks to get * more checks on incoming data * fix merge * fix imports * add more docs * add some limits on messages * keep peers that dont have the current need data * fix clippy * fix .lock * fix stopping the block downloader * clean up API and add more docs * tracing + bug fixes * fix panic * doc changes * remove test_init * remove spammy log * fix previous merge * add a test * fix test * remove test unwrap * order imports correctly * clean up test * add a timeout * fix tests * review fixes * make `BlockDownloader` pub * make `initial_chain_search` pub * make `block_downloader` private * Apply suggestions from code review Co-authored-by: hinto-janai <hinto.janai@protonmail.com> * split some sections into separate modules * split chain requests * sort imports * check previous ID is correct * fix typos * Apply suggestions from code review Co-authored-by: hinto-janai <hinto.janai@protonmail.com> --------- Co-authored-by: hinto-janai <hinto.janai@protonmail.com> --- Cargo.lock | 52 +- Cargo.toml | 1 + p2p/cuprate-p2p/Cargo.toml | 9 +- p2p/cuprate-p2p/src/block_downloader.rs | 733 ++++++++++++++++++ .../src/block_downloader/block_queue.rs | 172 ++++ .../src/block_downloader/chain_tracker.rs | 211 +++++ .../src/block_downloader/download_batch.rs | 199 +++++ .../src/block_downloader/request_chain.rs | 238 ++++++ p2p/cuprate-p2p/src/block_downloader/tests.rs | 323 ++++++++ p2p/cuprate-p2p/src/client_pool.rs | 15 +- p2p/cuprate-p2p/src/constants.rs | 41 + p2p/cuprate-p2p/src/lib.rs | 32 +- p2p/monero-p2p/Cargo.toml | 2 +- p2p/monero-p2p/src/client.rs | 76 +- 14 files changed, 2033 insertions(+), 71 deletions(-) create mode 100644 p2p/cuprate-p2p/src/block_downloader.rs create mode 100644 p2p/cuprate-p2p/src/block_downloader/block_queue.rs create mode 100644 p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs create mode 100644 p2p/cuprate-p2p/src/block_downloader/download_batch.rs create mode 100644 p2p/cuprate-p2p/src/block_downloader/request_chain.rs create mode 100644 p2p/cuprate-p2p/src/block_downloader/tests.rs diff --git a/Cargo.lock b/Cargo.lock index d262d17d..e83e38a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -598,6 +598,7 @@ dependencies = [ name = "cuprate-p2p" version = "0.1.0" dependencies = [ + "async-buffer", "bytes", "cuprate-helper", "cuprate-test-utils", @@ -612,16 +613,17 @@ dependencies = [ "monero-serai", "monero-wire", "pin-project", + "proptest", "rand", "rand_distr", "rayon", "thiserror", "tokio", "tokio-stream", + "tokio-test", "tokio-util", "tower", "tracing", - "tracing-subscriber", ] [[package]] @@ -1564,16 +1566,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -1621,12 +1613,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "page_size" version = "0.6.0" @@ -2235,15 +2221,6 @@ dependencies = [ "keccak", ] -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -2596,18 +2573,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", ] [[package]] @@ -2616,12 +2581,7 @@ version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ - "nu-ansi-term", - "sharded-slab", - "smallvec", - "thread_local", "tracing-core", - "tracing-log", ] [[package]] @@ -2680,12 +2640,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - [[package]] name = "version_check" version = "0.9.4" diff --git a/Cargo.toml b/Cargo.toml index 8100af72..7be28732 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -89,6 +89,7 @@ tempfile = { version = "3" } pretty_assertions = { version = "1.4.0" } proptest = { version = "1" } proptest-derive = { version = "0.4.0" } +tokio-test = { version = "0.4.4" } ## TODO: ## Potential dependencies. diff --git a/p2p/cuprate-p2p/Cargo.toml b/p2p/cuprate-p2p/Cargo.toml index 687493a0..ab477a83 100644 --- a/p2p/cuprate-p2p/Cargo.toml +++ b/p2p/cuprate-p2p/Cargo.toml @@ -11,7 +11,8 @@ monero-wire = { path = "../../net/monero-wire" } monero-p2p = { path = "../monero-p2p", features = ["borsh"] } monero-address-book = { path = "../address-book" } monero-pruning = { path = "../../pruning" } -cuprate-helper = { path = "../../helper", features = ["asynch"] } +cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } +async-buffer = { path = "../async-buffer" } monero-serai = { workspace = true, features = ["std"] } @@ -26,13 +27,13 @@ dashmap = { workspace = true } thiserror = { workspace = true } bytes = { workspace = true, features = ["std"] } -indexmap = { workspace = true, features = ["std"] } rand = { workspace = true, features = ["std", "std_rng"] } rand_distr = { workspace = true, features = ["std"] } hex = { workspace = true, features = ["std"] } tracing = { workspace = true, features = ["std", "attributes"] } -tracing-subscriber = "0.3.18" - [dev-dependencies] cuprate-test-utils = { path = "../../test-utils" } +indexmap = { workspace = true } +proptest = { workspace = true } +tokio-test = { workspace = true } diff --git a/p2p/cuprate-p2p/src/block_downloader.rs b/p2p/cuprate-p2p/src/block_downloader.rs new file mode 100644 index 00000000..3f7f7e73 --- /dev/null +++ b/p2p/cuprate-p2p/src/block_downloader.rs @@ -0,0 +1,733 @@ +//! # Block Downloader +//! +//! This module contains the [`BlockDownloader`], which finds a chain to +//! download from our connected peers and downloads it. See the actual +//! `struct` documentation for implementation details. +//! +//! The block downloader is started by [`download_blocks`]. +use std::{ + cmp::{max, min, Reverse}, + collections::{BTreeMap, BinaryHeap}, + sync::Arc, + time::Duration, +}; + +use futures::TryFutureExt; +use monero_serai::{block::Block, transaction::Transaction}; +use tokio::{ + task::JoinSet, + time::{interval, timeout, MissedTickBehavior}, +}; +use tower::{Service, ServiceExt}; +use tracing::{instrument, Instrument, Span}; + +use async_buffer::{BufferAppender, BufferStream}; +use monero_p2p::{ + handles::ConnectionHandle, + services::{PeerSyncRequest, PeerSyncResponse}, + NetworkZone, PeerSyncSvc, +}; +use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; + +use crate::{ + client_pool::{ClientPool, ClientPoolDropGuard}, + constants::{ + BLOCK_DOWNLOADER_REQUEST_TIMEOUT, EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED, LONG_BAN, + MAX_BLOCK_BATCH_LEN, MAX_DOWNLOAD_FAILURES, + }, +}; + +mod block_queue; +mod chain_tracker; +mod download_batch; +mod request_chain; +#[cfg(test)] +mod tests; + +use block_queue::{BlockQueue, ReadyQueueBatch}; +use chain_tracker::{BlocksToRetrieve, ChainEntry, ChainTracker}; +use download_batch::download_batch_task; +use request_chain::{initial_chain_search, request_chain_entry_from_peer}; + +/// A downloaded batch of blocks. +#[derive(Debug, Clone)] +pub struct BlockBatch { + /// The blocks. + pub blocks: Vec<(Block, Vec<Transaction>)>, + /// The size in bytes of this batch. + pub size: usize, + /// The peer that gave us this batch. + pub peer_handle: ConnectionHandle, +} + +/// The block downloader config. +#[derive(Debug, Copy, Clone, PartialOrd, PartialEq, Ord, Eq)] +pub struct BlockDownloaderConfig { + /// The size in bytes of the buffer between the block downloader and the place which + /// is consuming the downloaded blocks. + pub buffer_size: usize, + /// The size of the in progress queue (in bytes) at which we stop requesting more blocks. + pub in_progress_queue_size: usize, + /// The [`Duration`] between checking the client pool for free peers. + pub check_client_pool_interval: Duration, + /// The target size of a single batch of blocks (in bytes). + pub target_batch_size: usize, + /// The initial amount of blocks to request (in number of blocks) + pub initial_batch_size: usize, +} + +/// An error that occurred in the [`BlockDownloader`]. +#[derive(Debug, thiserror::Error)] +pub enum BlockDownloadError { + #[error("A request to a peer timed out.")] + TimedOut, + #[error("The block buffer was closed.")] + BufferWasClosed, + #[error("The peers we requested data from did not have all the data.")] + PeerDidNotHaveRequestedData, + #[error("The peers response to a request was invalid.")] + PeersResponseWasInvalid, + #[error("The chain we are following is invalid.")] + ChainInvalid, + #[error("Failed to find a more advanced chain to follow")] + FailedToFindAChainToFollow, + #[error("The peer did not send any overlapping blocks, unknown start height.")] + PeerSentNoOverlappingBlocks, + #[error("Service error: {0}")] + ServiceError(#[from] tower::BoxError), +} + +/// The request type for the chain service. +pub enum ChainSvcRequest { + /// A request for the current chain history. + CompactHistory, + /// A request to find the first unknown block ID in a list of block IDs. + FindFirstUnknown(Vec<[u8; 32]>), + /// A request for our current cumulative difficulty. + CumulativeDifficulty, +} + +/// The response type for the chain service. +pub enum ChainSvcResponse { + /// The response for [`ChainSvcRequest::CompactHistory`]. + CompactHistory { + /// A list of blocks IDs in our chain, starting with the most recent block, all the way to the genesis block. + /// + /// These blocks should be in reverse chronological order, not every block is needed. + block_ids: Vec<[u8; 32]>, + /// The current cumulative difficulty of the chain. + cumulative_difficulty: u128, + }, + /// The response for [`ChainSvcRequest::FindFirstUnknown`]. + /// + /// Contains the index of the first unknown block and its expected height. + FindFirstUnknown(usize, u64), + /// The response for [`ChainSvcRequest::CumulativeDifficulty`]. + /// + /// The current cumulative difficulty of our chain. + CumulativeDifficulty(u128), +} + +/// This function starts the block downloader and returns a [`BufferStream`] that will produce +/// a sequential stream of blocks. +/// +/// The block downloader will pick the longest chain and will follow it for as long as possible, +/// the blocks given from the [`BufferStream`] will be in order. +/// +/// The block downloader may fail before the whole chain is downloaded. If this is the case you can +/// call this function again, so it can start the search again. +#[instrument(level = "error", skip_all, name = "block_downloader")] +pub fn download_blocks<N: NetworkZone, S, C>( + client_pool: Arc<ClientPool<N>>, + peer_sync_svc: S, + our_chain_svc: C, + config: BlockDownloaderConfig, +) -> BufferStream<BlockBatch> +where + S: PeerSyncSvc<N> + Clone, + C: Service<ChainSvcRequest, Response = ChainSvcResponse, Error = tower::BoxError> + + Send + + 'static, + C::Future: Send + 'static, +{ + let (buffer_appender, buffer_stream) = async_buffer::new_buffer(config.buffer_size); + + let block_downloader = BlockDownloader::new( + client_pool, + peer_sync_svc, + our_chain_svc, + buffer_appender, + config, + ); + + tokio::spawn( + block_downloader + .run() + .inspect_err(|e| tracing::debug!("Error downloading blocks: {e}")) + .instrument(Span::current()), + ); + + buffer_stream +} + +/// # Block Downloader +/// +/// This is the block downloader, which finds a chain to follow and attempts to follow it, adding the +/// downloaded blocks to an [`async_buffer`]. +/// +/// ## Implementation Details +/// +/// The first step to downloading blocks is to find a chain to follow, this is done by [`initial_chain_search`], +/// docs can be found on that function for details on how this is done. +/// +/// With an initial list of block IDs to follow the block downloader will then look for available peers +/// to download blocks from. +/// +/// For each peer we will then allocate a batch of blocks for them to retrieve, as these blocks come in +/// we add them to the [`BlockQueue`] for pushing into the [`async_buffer`], once we have the oldest block downloaded +/// we send it into the buffer, repeating this until the oldest current block is still being downloaded. +/// +/// When a peer has finished downloading blocks we add it to our list of ready peers, so it can be used to +/// request more data from. +/// +/// Ready peers will either: +/// - download the next batch of blocks +/// - request the next chain entry +/// - download an already requested batch of blocks (this might happen due to an error in the previous request +/// or because the queue of ready blocks is too large, so we need the oldest block to clear it). +struct BlockDownloader<N: NetworkZone, S, C> { + /// The client pool. + client_pool: Arc<ClientPool<N>>, + + /// The service that holds the peer's sync states. + peer_sync_svc: S, + /// The service that holds our current chain state. + our_chain_svc: C, + + /// The amount of blocks to request in the next batch. + amount_of_blocks_to_request: usize, + /// The height at which [`Self::amount_of_blocks_to_request`] was updated. + amount_of_blocks_to_request_updated_at: u64, + + /// The amount of consecutive empty chain entries we received. + /// + /// An empty chain entry means we reached the peer's chain tip. + amount_of_empty_chain_entries: usize, + + /// The running block download tasks. + block_download_tasks: JoinSet<BlockDownloadTaskResponse<N>>, + /// The running chain entry tasks. + /// + /// Returns a result of the chain entry or an error. + #[allow(clippy::type_complexity)] + chain_entry_task: JoinSet<Result<(ClientPoolDropGuard<N>, ChainEntry<N>), BlockDownloadError>>, + + /// The current inflight requests. + /// + /// This is a map of batch start heights to block IDs and related information of the batch. + inflight_requests: BTreeMap<u64, BlocksToRetrieve<N>>, + + /// A queue of start heights from failed batches that should be retried. + /// + /// Wrapped in [`Reverse`] so we prioritize early batches. + failed_batches: BinaryHeap<Reverse<u64>>, + + block_queue: BlockQueue, + + /// The [`BlockDownloaderConfig`]. + config: BlockDownloaderConfig, +} + +impl<N: NetworkZone, S, C> BlockDownloader<N, S, C> +where + S: PeerSyncSvc<N> + Clone, + C: Service<ChainSvcRequest, Response = ChainSvcResponse, Error = tower::BoxError> + + Send + + 'static, + C::Future: Send + 'static, +{ + /// Creates a new [`BlockDownloader`] + fn new( + client_pool: Arc<ClientPool<N>>, + + peer_sync_svc: S, + our_chain_svc: C, + buffer_appender: BufferAppender<BlockBatch>, + + config: BlockDownloaderConfig, + ) -> Self { + Self { + client_pool, + peer_sync_svc, + our_chain_svc, + amount_of_blocks_to_request: config.initial_batch_size, + amount_of_blocks_to_request_updated_at: 0, + amount_of_empty_chain_entries: 0, + block_download_tasks: JoinSet::new(), + chain_entry_task: JoinSet::new(), + inflight_requests: BTreeMap::new(), + block_queue: BlockQueue::new(buffer_appender), + failed_batches: BinaryHeap::new(), + config, + } + } + + /// Checks if we can make use of any peers that are currently pending requests. + async fn check_pending_peers( + &mut self, + chain_tracker: &mut ChainTracker<N>, + pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientPoolDropGuard<N>>>, + ) { + tracing::debug!("Checking if we can give any work to pending peers."); + + for (_, peers) in pending_peers.iter_mut() { + while let Some(peer) = peers.pop() { + if peer.info.handle.is_closed() { + // Peer has disconnected, drop it. + continue; + } + + if let Some(peer) = self.try_handle_free_client(chain_tracker, peer).await { + // This peer is ok however it does not have the data we currently need, this will only happen + // because of its pruning seed so just skip over all peers with this pruning seed. + peers.push(peer); + break; + } + } + } + } + + /// Attempts to send another request for an inflight batch + /// + /// This function will find the batch(es) that we are waiting on to clear our ready queue and sends another request + /// for them. + /// + /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the batch according to its pruning seed. + async fn request_inflight_batch_again( + &mut self, + client: ClientPoolDropGuard<N>, + ) -> Option<ClientPoolDropGuard<N>> { + tracing::debug!( + "Requesting an inflight batch, current ready queue size: {}", + self.block_queue.size() + ); + + assert!( + !self.inflight_requests.is_empty(), + "We need requests inflight to be able to send the request again", + ); + + let oldest_ready_batch = self.block_queue.oldest_ready_batch().unwrap(); + + for (_, in_flight_batch) in self.inflight_requests.range_mut(0..oldest_ready_batch) { + if in_flight_batch.requests_sent >= 2 { + continue; + } + + if !client_has_block_in_range( + &client.info.pruning_seed, + in_flight_batch.start_height, + in_flight_batch.ids.len(), + ) { + return Some(client); + } + + self.block_download_tasks.spawn(download_batch_task( + client, + in_flight_batch.ids.clone(), + in_flight_batch.prev_id, + in_flight_batch.start_height, + in_flight_batch.requests_sent, + )); + + return None; + } + + tracing::debug!("Could not find an inflight request applicable for this peer."); + + Some(client) + } + + /// Spawns a task to request blocks from the given peer. + /// + /// The batch requested will depend on our current state, failed batches will be prioritised. + /// + /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according + /// to its pruning seed. + async fn request_block_batch( + &mut self, + chain_tracker: &mut ChainTracker<N>, + client: ClientPoolDropGuard<N>, + ) -> Option<ClientPoolDropGuard<N>> { + tracing::trace!("Using peer to request a batch of blocks."); + // First look to see if we have any failed requests. + while let Some(failed_request) = self.failed_batches.peek() { + // Check if we still have the request that failed - another peer could have completed it after + // failure. + let Some(request) = self.inflight_requests.get_mut(&failed_request.0) else { + // We don't have the request in flight so remove the failure. + self.failed_batches.pop(); + continue; + }; + // Check if this peer has the blocks according to their pruning seed. + if client_has_block_in_range( + &client.info.pruning_seed, + request.start_height, + request.ids.len(), + ) { + tracing::debug!("Using peer to request a failed batch"); + // They should have the blocks so send the re-request to this peer. + + request.requests_sent += 1; + + self.block_download_tasks.spawn(download_batch_task( + client, + request.ids.clone(), + request.prev_id, + request.start_height, + request.requests_sent, + )); + + // Remove the failure, we have just handled it. + self.failed_batches.pop(); + + return None; + } + // The peer doesn't have the batch according to its pruning seed. + break; + } + + // If our ready queue is too large send duplicate requests for the blocks we are waiting on. + if self.block_queue.size() >= self.config.in_progress_queue_size { + return self.request_inflight_batch_again(client).await; + } + + // No failed requests that we can handle, request some new blocks. + + let Some(mut block_entry_to_get) = chain_tracker + .blocks_to_get(&client.info.pruning_seed, self.amount_of_blocks_to_request) + else { + return Some(client); + }; + + tracing::debug!("Requesting a new batch of blocks"); + + block_entry_to_get.requests_sent = 1; + self.inflight_requests + .insert(block_entry_to_get.start_height, block_entry_to_get.clone()); + + self.block_download_tasks.spawn(download_batch_task( + client, + block_entry_to_get.ids.clone(), + block_entry_to_get.prev_id, + block_entry_to_get.start_height, + block_entry_to_get.requests_sent, + )); + + None + } + + /// Attempts to give work to a free client. + /// + /// This function will use our current state to decide if we should send a request for a chain entry + /// or if we should request a batch of blocks. + /// + /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according + /// to its pruning seed. + async fn try_handle_free_client( + &mut self, + chain_tracker: &mut ChainTracker<N>, + client: ClientPoolDropGuard<N>, + ) -> Option<ClientPoolDropGuard<N>> { + // We send 2 requests, so if one of them is slow or doesn't have the next chain, we still have a backup. + if self.chain_entry_task.len() < 2 + // If we have had too many failures then assume the tip has been found so no more chain entries. + && self.amount_of_empty_chain_entries <= EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED + // Check we have a big buffer of pending block IDs to retrieve, we don't want to be waiting around + // for a chain entry. + && chain_tracker.block_requests_queued(self.amount_of_blocks_to_request) < 500 + // Make sure this peer actually has the chain. + && chain_tracker.should_ask_for_next_chain_entry(&client.info.pruning_seed) + { + tracing::debug!("Requesting next chain entry"); + + let history = chain_tracker.get_simple_history(); + + self.chain_entry_task.spawn( + async move { + timeout( + BLOCK_DOWNLOADER_REQUEST_TIMEOUT, + request_chain_entry_from_peer(client, history), + ) + .await + .map_err(|_| BlockDownloadError::TimedOut)? + } + .instrument(tracing::debug_span!( + "request_chain_entry", + current_height = chain_tracker.top_height() + )), + ); + + return None; + } + + // Request a batch of blocks instead. + self.request_block_batch(chain_tracker, client).await + } + + /// Checks the [`ClientPool`] for free peers. + async fn check_for_free_clients( + &mut self, + chain_tracker: &mut ChainTracker<N>, + pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientPoolDropGuard<N>>>, + ) -> Result<(), BlockDownloadError> { + tracing::debug!("Checking for free peers"); + + // This value might be slightly behind but that's ok. + let ChainSvcResponse::CumulativeDifficulty(current_cumulative_difficulty) = self + .our_chain_svc + .ready() + .await? + .call(ChainSvcRequest::CumulativeDifficulty) + .await? + else { + panic!("Chain service returned wrong response."); + }; + + let PeerSyncResponse::PeersToSyncFrom(peers) = self + .peer_sync_svc + .ready() + .await? + .call(PeerSyncRequest::PeersToSyncFrom { + current_cumulative_difficulty, + block_needed: None, + }) + .await? + else { + panic!("Peer sync service returned wrong response."); + }; + + tracing::debug!("Response received from peer sync service"); + + for client in self.client_pool.borrow_clients(&peers) { + pending_peers + .entry(client.info.pruning_seed) + .or_default() + .push(client); + } + + self.check_pending_peers(chain_tracker, pending_peers).await; + + Ok(()) + } + + /// Handles a response to a request to get blocks from a peer. + async fn handle_download_batch_res( + &mut self, + start_height: u64, + res: Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError>, + chain_tracker: &mut ChainTracker<N>, + pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientPoolDropGuard<N>>>, + ) -> Result<(), BlockDownloadError> { + tracing::debug!("Handling block download response"); + + match res { + Err(e) => { + if matches!(e, BlockDownloadError::ChainInvalid) { + // If the chain was invalid ban the peer who told us about it and error here to stop the + // block downloader. + self.inflight_requests.get(&start_height).inspect(|entry| { + tracing::warn!( + "Received an invalid chain from peer: {}, exiting block downloader (it should be restarted).", + entry.peer_who_told_us + ); + entry.peer_who_told_us_handle.ban_peer(LONG_BAN); + }); + + return Err(e); + } + + // Add the request to the failed list. + if let Some(batch) = self.inflight_requests.get_mut(&start_height) { + tracing::debug!("Error downloading batch: {e}"); + + batch.failures += 1; + if batch.failures > MAX_DOWNLOAD_FAILURES { + tracing::debug!( + "Too many errors downloading blocks, stopping the block downloader." + ); + return Err(BlockDownloadError::TimedOut); + } + + self.failed_batches.push(Reverse(start_height)); + } + + Ok(()) + } + Ok((client, block_batch)) => { + // Remove the batch from the inflight batches. + if self.inflight_requests.remove(&start_height).is_none() { + tracing::debug!("Already retrieved batch"); + // If it was already retrieved then there is nothing else to do. + pending_peers + .entry(client.info.pruning_seed) + .or_default() + .push(client); + + self.check_pending_peers(chain_tracker, pending_peers).await; + + return Ok(()); + }; + + // If the batch is higher than the last time we updated `amount_of_blocks_to_request`, update it + // again. + if start_height > self.amount_of_blocks_to_request_updated_at { + self.amount_of_blocks_to_request = calculate_next_block_batch_size( + block_batch.size, + block_batch.blocks.len(), + self.config.target_batch_size, + ); + + tracing::debug!( + "Updating batch size of new batches, new size: {}", + self.amount_of_blocks_to_request + ); + + self.amount_of_blocks_to_request_updated_at = start_height; + } + + self.block_queue + .add_incoming_batch( + ReadyQueueBatch { + start_height, + block_batch, + }, + self.inflight_requests.first_key_value().map(|(k, _)| *k), + ) + .await?; + + pending_peers + .entry(client.info.pruning_seed) + .or_default() + .push(client); + + self.check_pending_peers(chain_tracker, pending_peers).await; + + Ok(()) + } + } + } + + /// Starts the main loop of the block downloader. + async fn run(mut self) -> Result<(), BlockDownloadError> { + let mut chain_tracker = initial_chain_search( + &self.client_pool, + self.peer_sync_svc.clone(), + &mut self.our_chain_svc, + ) + .await?; + + let mut pending_peers = BTreeMap::new(); + + tracing::info!("Attempting to download blocks from peers, this may take a while."); + + let mut check_client_pool_interval = interval(self.config.check_client_pool_interval); + check_client_pool_interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + + self.check_for_free_clients(&mut chain_tracker, &mut pending_peers) + .await?; + + loop { + tokio::select! { + _ = check_client_pool_interval.tick() => { + tracing::debug!("Checking client pool for free peers, timer fired."); + self.check_for_free_clients(&mut chain_tracker, &mut pending_peers).await?; + + // If we have no inflight requests, and we have had too many empty chain entries in a row assume the top has been found. + if self.inflight_requests.is_empty() && self.amount_of_empty_chain_entries >= EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED { + tracing::debug!("Failed to find any more chain entries, probably fround the top"); + return Ok(()); + } + } + Some(res) = self.block_download_tasks.join_next() => { + let BlockDownloadTaskResponse { + start_height, + result + } = res.expect("Download batch future panicked"); + + self.handle_download_batch_res(start_height, result, &mut chain_tracker, &mut pending_peers).await?; + + // If we have no inflight requests, and we have had too many empty chain entries in a row assume the top has been found. + if self.inflight_requests.is_empty() && self.amount_of_empty_chain_entries >= EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED { + tracing::debug!("Failed to find any more chain entries, probably fround the top"); + return Ok(()); + } + } + Some(Ok(res)) = self.chain_entry_task.join_next() => { + match res { + Ok((client, entry)) => { + if chain_tracker.add_entry(entry).is_ok() { + tracing::debug!("Successfully added chain entry to chain tracker."); + self.amount_of_empty_chain_entries = 0; + } else { + tracing::debug!("Failed to add incoming chain entry to chain tracker."); + self.amount_of_empty_chain_entries += 1; + } + + pending_peers + .entry(client.info.pruning_seed) + .or_default() + .push(client); + + self.check_pending_peers(&mut chain_tracker, &mut pending_peers).await; + } + Err(_) => self.amount_of_empty_chain_entries += 1 + } + } + } + } + } +} + +/// The return value from the block download tasks. +struct BlockDownloadTaskResponse<N: NetworkZone> { + /// The start height of the batch. + start_height: u64, + /// A result containing the batch or an error. + result: Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError>, +} + +/// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`]. +fn client_has_block_in_range(pruning_seed: &PruningSeed, start_height: u64, length: usize) -> bool { + pruning_seed.has_full_block(start_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) + && pruning_seed.has_full_block( + start_height + u64::try_from(length).unwrap(), + CRYPTONOTE_MAX_BLOCK_HEIGHT, + ) +} + +/// Calculates the next amount of blocks to request in a batch. +/// +/// Parameters: +/// - `previous_batch_size` is the size, in bytes, of the last batch +/// - `previous_batch_len` is the amount of blocks in the last batch +/// - `target_batch_size` is the target size, in bytes, of a batch +fn calculate_next_block_batch_size( + previous_batch_size: usize, + previous_batch_len: usize, + target_batch_size: usize, +) -> usize { + // The average block size of the last batch of blocks, multiplied by 2 as a safety margin for + // future blocks. + let adjusted_average_block_size = max((previous_batch_size * 2) / previous_batch_len, 1); + + // Set the amount of blocks to request equal to our target batch size divided by the adjusted_average_block_size. + let next_batch_len = max(target_batch_size / adjusted_average_block_size, 1); + + // Cap the amount of growth to 1.5x the previous batch len, to prevent a small block causing us to request + // a huge amount of blocks. + let next_batch_len = min(next_batch_len, (previous_batch_len * 3).div_ceil(2)); + + // Cap the length to the maximum allowed. + min(next_batch_len, MAX_BLOCK_BATCH_LEN) +} diff --git a/p2p/cuprate-p2p/src/block_downloader/block_queue.rs b/p2p/cuprate-p2p/src/block_downloader/block_queue.rs new file mode 100644 index 00000000..ada28256 --- /dev/null +++ b/p2p/cuprate-p2p/src/block_downloader/block_queue.rs @@ -0,0 +1,172 @@ +use std::{cmp::Ordering, collections::BinaryHeap}; + +use async_buffer::BufferAppender; + +use super::{BlockBatch, BlockDownloadError}; + +/// A batch of blocks in the ready queue, waiting for previous blocks to come in, so they can +/// be passed into the buffer. +/// +/// The [`Eq`] and [`Ord`] impl on this type will only take into account the `start_height`, this +/// is because the block downloader will only download one chain at once so no 2 batches can have +/// the same `start_height`. +/// +/// Also, the [`Ord`] impl is reversed so older blocks (lower height) come first in a [`BinaryHeap`]. +#[derive(Debug, Clone)] +pub struct ReadyQueueBatch { + /// The start height of the batch. + pub start_height: u64, + /// The batch of blocks. + pub block_batch: BlockBatch, +} + +impl Eq for ReadyQueueBatch {} + +impl PartialEq<Self> for ReadyQueueBatch { + fn eq(&self, other: &Self) -> bool { + self.start_height.eq(&other.start_height) + } +} + +impl PartialOrd<Self> for ReadyQueueBatch { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for ReadyQueueBatch { + fn cmp(&self, other: &Self) -> Ordering { + // reverse the ordering so older blocks (lower height) come first in a [`BinaryHeap`] + self.start_height.cmp(&other.start_height).reverse() + } +} + +/// The block queue that holds downloaded block batches, adding them to the [`async_buffer`] when the +/// oldest batch has been downloaded. +pub struct BlockQueue { + /// A queue of ready batches. + ready_batches: BinaryHeap<ReadyQueueBatch>, + /// The size, in bytes, of all the batches in [`Self::ready_batches`]. + ready_batches_size: usize, + + /// The [`BufferAppender`] that gives blocks to Cuprate. + buffer_appender: BufferAppender<BlockBatch>, +} + +impl BlockQueue { + /// Creates a new [`BlockQueue`]. + pub fn new(buffer_appender: BufferAppender<BlockBatch>) -> BlockQueue { + BlockQueue { + ready_batches: BinaryHeap::new(), + ready_batches_size: 0, + buffer_appender, + } + } + + /// Returns the oldest batch that has not been put in the [`async_buffer`] yet. + pub fn oldest_ready_batch(&self) -> Option<u64> { + self.ready_batches.peek().map(|batch| batch.start_height) + } + + /// Returns the size of all the batches that have not been put into the [`async_buffer`] yet. + pub fn size(&self) -> usize { + self.ready_batches_size + } + + /// Adds an incoming batch to the queue and checks if we can push any batches into the [`async_buffer`]. + /// + /// `oldest_in_flight_start_height` should be the start height of the oldest batch that is still inflight, if + /// there are no batches inflight then this should be [`None`]. + pub async fn add_incoming_batch( + &mut self, + new_batch: ReadyQueueBatch, + oldest_in_flight_start_height: Option<u64>, + ) -> Result<(), BlockDownloadError> { + self.ready_batches_size += new_batch.block_batch.size; + self.ready_batches.push(new_batch); + + // The height to stop pushing batches into the buffer. + let height_to_stop_at = oldest_in_flight_start_height.unwrap_or(u64::MAX); + + while self + .ready_batches + .peek() + .is_some_and(|batch| batch.start_height <= height_to_stop_at) + { + let batch = self + .ready_batches + .pop() + .expect("We just checked we have a batch in the buffer"); + + let batch_size = batch.block_batch.size; + + self.ready_batches_size -= batch_size; + self.buffer_appender + .send(batch.block_batch, batch_size) + .await + .map_err(|_| BlockDownloadError::BufferWasClosed)?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use futures::StreamExt; + use std::{collections::BTreeSet, sync::Arc}; + + use proptest::{collection::vec, prelude::*}; + use tokio::sync::Semaphore; + use tokio_test::block_on; + + use monero_p2p::handles::HandleBuilder; + + use super::*; + + prop_compose! { + fn ready_batch_strategy()(start_height in 0_u64..500_000_000) -> ReadyQueueBatch { + // TODO: The permit will not be needed here when + let (_, peer_handle) = HandleBuilder::new().with_permit(Arc::new(Semaphore::new(1)).try_acquire_owned().unwrap()).build(); + + ReadyQueueBatch { + start_height, + block_batch: BlockBatch { + blocks: vec![], + size: start_height as usize, + peer_handle, + }, + } + } + } + + proptest! { + #[test] + fn block_queue_returns_items_in_order(batches in vec(ready_batch_strategy(), 0..10_000)) { + block_on(async move { + let (buffer_tx, mut buffer_rx) = async_buffer::new_buffer(usize::MAX); + + let mut queue = BlockQueue::new(buffer_tx); + + let mut sorted_batches = BTreeSet::from_iter(batches.clone()); + let mut soreted_batch_2 = sorted_batches.clone(); + + for batch in batches { + if sorted_batches.remove(&batch) { + queue.add_incoming_batch(batch, sorted_batches.last().map(|batch| batch.start_height)).await.unwrap(); + } + } + + assert_eq!(queue.size(), 0); + assert!(queue.oldest_ready_batch().is_none()); + drop(queue); + + while let Some(batch) = buffer_rx.next().await { + let last_batch = soreted_batch_2.pop_last().unwrap(); + + assert_eq!(batch.size, last_batch.block_batch.size); + } + }); + } + } +} diff --git a/p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs b/p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs new file mode 100644 index 00000000..07bad7b6 --- /dev/null +++ b/p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs @@ -0,0 +1,211 @@ +use std::{cmp::min, collections::VecDeque}; + +use fixed_bytes::ByteArrayVec; + +use monero_p2p::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone}; +use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; + +use crate::constants::MEDIUM_BAN; + +/// A new chain entry to add to our chain tracker. +#[derive(Debug)] +pub(crate) struct ChainEntry<N: NetworkZone> { + /// A list of block IDs. + pub ids: Vec<[u8; 32]>, + /// The peer who told us about this chain entry. + pub peer: InternalPeerID<N::Addr>, + /// The peer who told us about this chain entry's handle + pub handle: ConnectionHandle, +} + +/// A batch of blocks to retrieve. +#[derive(Clone)] +pub struct BlocksToRetrieve<N: NetworkZone> { + /// The block IDs to get. + pub ids: ByteArrayVec<32>, + /// The hash of the last block before this batch. + pub prev_id: [u8; 32], + /// The expected height of the first block in [`BlocksToRetrieve::ids`]. + pub start_height: u64, + /// The peer who told us about this batch. + pub peer_who_told_us: InternalPeerID<N::Addr>, + /// The peer who told us about this batch's handle. + pub peer_who_told_us_handle: ConnectionHandle, + /// The number of requests sent for this batch. + pub requests_sent: usize, + /// The number of times this batch has been requested from a peer and failed. + pub failures: usize, +} + +/// An error returned from the [`ChainTracker`]. +#[derive(Debug, Clone)] +pub enum ChainTrackerError { + /// The new chain entry is invalid. + NewEntryIsInvalid, + /// The new chain entry does not follow from the top of our chain tracker. + NewEntryDoesNotFollowChain, +} + +/// # Chain Tracker +/// +/// This struct allows following a single chain. It takes in [`ChainEntry`]s and +/// allows getting [`BlocksToRetrieve`]. +pub struct ChainTracker<N: NetworkZone> { + /// A list of [`ChainEntry`]s, in order. + entries: VecDeque<ChainEntry<N>>, + /// The height of the first block, in the first entry in [`Self::entries`]. + first_height: u64, + /// The hash of the last block in the last entry. + top_seen_hash: [u8; 32], + /// The hash of the block one below [`Self::first_height`]. + previous_hash: [u8; 32], + /// The hash of the genesis block. + our_genesis: [u8; 32], +} + +impl<N: NetworkZone> ChainTracker<N> { + /// Creates a new chain tracker. + pub fn new( + new_entry: ChainEntry<N>, + first_height: u64, + our_genesis: [u8; 32], + previous_hash: [u8; 32], + ) -> Self { + let top_seen_hash = *new_entry.ids.last().unwrap(); + let mut entries = VecDeque::with_capacity(1); + entries.push_back(new_entry); + + Self { + top_seen_hash, + entries, + first_height, + previous_hash, + our_genesis, + } + } + + /// Returns `true` if the peer is expected to have the next block after our highest seen block + /// according to their pruning seed. + pub fn should_ask_for_next_chain_entry(&self, seed: &PruningSeed) -> bool { + seed.has_full_block(self.top_height(), CRYPTONOTE_MAX_BLOCK_HEIGHT) + } + + /// Returns the simple history, the highest seen block and the genesis block. + pub fn get_simple_history(&self) -> [[u8; 32]; 2] { + [self.top_seen_hash, self.our_genesis] + } + + /// Returns the height of the highest block we are tracking. + pub fn top_height(&self) -> u64 { + let top_block_idx = self + .entries + .iter() + .map(|entry| entry.ids.len()) + .sum::<usize>(); + + self.first_height + u64::try_from(top_block_idx).unwrap() + } + + /// Returns the total number of queued batches for a certain `batch_size`. + /// + /// # Panics + /// This function panics if `batch_size` is `0`. + pub fn block_requests_queued(&self, batch_size: usize) -> usize { + self.entries + .iter() + .map(|entry| entry.ids.len().div_ceil(batch_size)) + .sum() + } + + /// Attempts to add an incoming [`ChainEntry`] to the chain tracker. + pub fn add_entry(&mut self, mut chain_entry: ChainEntry<N>) -> Result<(), ChainTrackerError> { + if chain_entry.ids.is_empty() { + // The peer must send at lest one overlapping block. + chain_entry.handle.ban_peer(MEDIUM_BAN); + return Err(ChainTrackerError::NewEntryIsInvalid); + } + + if chain_entry.ids.len() == 1 { + return Err(ChainTrackerError::NewEntryDoesNotFollowChain); + } + + if self + .entries + .back() + .is_some_and(|last_entry| last_entry.ids.last().unwrap() != &chain_entry.ids[0]) + { + return Err(ChainTrackerError::NewEntryDoesNotFollowChain); + } + + let new_entry = ChainEntry { + // ignore the first block - we already know it. + ids: chain_entry.ids.split_off(1), + peer: chain_entry.peer, + handle: chain_entry.handle, + }; + + self.top_seen_hash = *new_entry.ids.last().unwrap(); + + self.entries.push_back(new_entry); + + Ok(()) + } + + /// Returns a batch of blocks to request. + /// + /// The returned batches length will be less than or equal to `max_blocks` + pub fn blocks_to_get( + &mut self, + pruning_seed: &PruningSeed, + max_blocks: usize, + ) -> Option<BlocksToRetrieve<N>> { + if !pruning_seed.has_full_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) { + return None; + } + + let entry = self.entries.front_mut()?; + + // Calculate the ending index for us to get in this batch, it will be one of these: + // - smallest out of `max_blocks` + // - length of the batch + // - index of the next pruned block for this seed + let end_idx = min( + min(entry.ids.len(), max_blocks), + usize::try_from( + pruning_seed + .get_next_pruned_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) + .expect("We use local values to calculate height which should be below the sanity limit") + // Use a big value as a fallback if the seed does no pruning. + .unwrap_or(CRYPTONOTE_MAX_BLOCK_HEIGHT) + - self.first_height, + ) + .unwrap(), + ); + + if end_idx == 0 { + return None; + } + + let ids_to_get = entry.ids.drain(0..end_idx).collect::<Vec<_>>(); + + let blocks = BlocksToRetrieve { + ids: ids_to_get.into(), + prev_id: self.previous_hash, + start_height: self.first_height, + peer_who_told_us: entry.peer, + peer_who_told_us_handle: entry.handle.clone(), + requests_sent: 0, + failures: 0, + }; + + self.first_height += u64::try_from(end_idx).unwrap(); + // TODO: improve ByteArrayVec API. + self.previous_hash = blocks.ids[blocks.ids.len() - 1]; + + if entry.ids.is_empty() { + self.entries.pop_front(); + } + + Some(blocks) + } +} diff --git a/p2p/cuprate-p2p/src/block_downloader/download_batch.rs b/p2p/cuprate-p2p/src/block_downloader/download_batch.rs new file mode 100644 index 00000000..8cdde41e --- /dev/null +++ b/p2p/cuprate-p2p/src/block_downloader/download_batch.rs @@ -0,0 +1,199 @@ +use std::collections::HashSet; + +use monero_serai::{block::Block, transaction::Transaction}; +use rayon::prelude::*; +use tokio::time::timeout; +use tower::{Service, ServiceExt}; +use tracing::instrument; + +use cuprate_helper::asynch::rayon_spawn_async; +use fixed_bytes::ByteArrayVec; +use monero_p2p::{handles::ConnectionHandle, NetworkZone, PeerRequest, PeerResponse}; +use monero_wire::protocol::{GetObjectsRequest, GetObjectsResponse}; + +use crate::{ + block_downloader::{BlockBatch, BlockDownloadError, BlockDownloadTaskResponse}, + client_pool::ClientPoolDropGuard, + constants::{BLOCK_DOWNLOADER_REQUEST_TIMEOUT, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN}, +}; + +/// Attempts to request a batch of blocks from a peer, returning [`BlockDownloadTaskResponse`]. +#[instrument( + level = "debug", + name = "download_batch", + skip_all, + fields( + start_height = expected_start_height, + attempt = _attempt + ) +)] +pub async fn download_batch_task<N: NetworkZone>( + client: ClientPoolDropGuard<N>, + ids: ByteArrayVec<32>, + previous_id: [u8; 32], + expected_start_height: u64, + _attempt: usize, +) -> BlockDownloadTaskResponse<N> { + BlockDownloadTaskResponse { + start_height: expected_start_height, + result: request_batch_from_peer(client, ids, previous_id, expected_start_height).await, + } +} + +/// Requests a sequential batch of blocks from a peer. +/// +/// This function will validate the blocks that were downloaded were the ones asked for and that they match +/// the expected height. +async fn request_batch_from_peer<N: NetworkZone>( + mut client: ClientPoolDropGuard<N>, + ids: ByteArrayVec<32>, + previous_id: [u8; 32], + expected_start_height: u64, +) -> Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError> { + // Request the blocks. + let blocks_response = timeout(BLOCK_DOWNLOADER_REQUEST_TIMEOUT, async { + let PeerResponse::GetObjects(blocks_response) = client + .ready() + .await? + .call(PeerRequest::GetObjects(GetObjectsRequest { + blocks: ids.clone(), + pruned: false, + })) + .await? + else { + panic!("Connection task returned wrong response."); + }; + + Ok::<_, BlockDownloadError>(blocks_response) + }) + .await + .map_err(|_| BlockDownloadError::TimedOut)??; + + // Initial sanity checks + if blocks_response.blocks.len() > ids.len() { + client.info.handle.ban_peer(MEDIUM_BAN); + return Err(BlockDownloadError::PeersResponseWasInvalid); + } + + if blocks_response.blocks.len() != ids.len() { + return Err(BlockDownloadError::PeerDidNotHaveRequestedData); + } + let peer_handle = client.info.handle.clone(); + + let blocks = rayon_spawn_async(move || { + deserialize_batch( + blocks_response, + expected_start_height, + ids, + previous_id, + peer_handle, + ) + }) + .await; + + let batch = blocks.inspect_err(|e| { + // If the peers response was invalid, ban it. + if matches!(e, BlockDownloadError::PeersResponseWasInvalid) { + client.info.handle.ban_peer(MEDIUM_BAN); + } + })?; + + Ok((client, batch)) +} + +fn deserialize_batch( + blocks_response: GetObjectsResponse, + expected_start_height: u64, + requested_ids: ByteArrayVec<32>, + previous_id: [u8; 32], + peer_handle: ConnectionHandle, +) -> Result<BlockBatch, BlockDownloadError> { + let blocks = blocks_response + .blocks + .into_par_iter() + .enumerate() + .map(|(i, block_entry)| { + let expected_height = u64::try_from(i).unwrap() + expected_start_height; + + let mut size = block_entry.block.len(); + + let block = Block::read(&mut block_entry.block.as_ref()) + .map_err(|_| BlockDownloadError::PeersResponseWasInvalid)?; + + let block_hash = block.hash(); + + // Check the block matches the one requested and the peer sent enough transactions. + if requested_ids[i] != block_hash || block.txs.len() != block_entry.txs.len() { + return Err(BlockDownloadError::PeersResponseWasInvalid); + } + + // Check that the previous ID is correct for the first block. + // This is to protect use against banning the wrong peer. + // This must happen after the hash check. + if i == 0 && block.header.previous != previous_id { + tracing::warn!( + "Invalid chain, peer told us a block follows the chain when it doesn't." + ); + + // This peer probably did nothing wrong, it was the peer who told us this blockID which + // is misbehaving. + return Err(BlockDownloadError::ChainInvalid); + } + + // Check the height lines up as expected. + // This must happen after the hash check. + if !block + .number() + .is_some_and(|height| height == expected_height) + { + tracing::warn!( + "Invalid chain, expected height: {expected_height}, got height: {:?}", + block.number() + ); + + // This peer probably did nothing wrong, it was the peer who told us this blockID which + // is misbehaving. + return Err(BlockDownloadError::ChainInvalid); + } + + // Deserialize the transactions. + let txs = block_entry + .txs + .take_normal() + .ok_or(BlockDownloadError::PeersResponseWasInvalid)? + .into_iter() + .map(|tx_blob| { + size += tx_blob.len(); + + if tx_blob.len() > MAX_TRANSACTION_BLOB_SIZE { + return Err(BlockDownloadError::PeersResponseWasInvalid); + } + + Transaction::read(&mut tx_blob.as_ref()) + .map_err(|_| BlockDownloadError::PeersResponseWasInvalid) + }) + .collect::<Result<Vec<_>, _>>()?; + + // Make sure the transactions in the block were the ones the peer sent. + let mut expected_txs = block.txs.iter().collect::<HashSet<_>>(); + + for tx in &txs { + if !expected_txs.remove(&tx.hash()) { + return Err(BlockDownloadError::PeersResponseWasInvalid); + } + } + + if !expected_txs.is_empty() { + return Err(BlockDownloadError::PeersResponseWasInvalid); + } + + Ok(((block, txs), size)) + }) + .collect::<Result<(Vec<_>, Vec<_>), _>>()?; + + Ok(BlockBatch { + blocks: blocks.0, + size: blocks.1.into_iter().sum(), + peer_handle, + }) +} diff --git a/p2p/cuprate-p2p/src/block_downloader/request_chain.rs b/p2p/cuprate-p2p/src/block_downloader/request_chain.rs new file mode 100644 index 00000000..7733aef9 --- /dev/null +++ b/p2p/cuprate-p2p/src/block_downloader/request_chain.rs @@ -0,0 +1,238 @@ +use std::{mem, sync::Arc}; + +use rand::prelude::SliceRandom; +use rand::thread_rng; +use tokio::{task::JoinSet, time::timeout}; +use tower::{Service, ServiceExt}; +use tracing::{instrument, Instrument, Span}; + +use monero_p2p::{ + client::InternalPeerID, + handles::ConnectionHandle, + services::{PeerSyncRequest, PeerSyncResponse}, + NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc, +}; +use monero_wire::protocol::{ChainRequest, ChainResponse}; + +use crate::{ + block_downloader::{ + chain_tracker::{ChainEntry, ChainTracker}, + BlockDownloadError, ChainSvcRequest, ChainSvcResponse, + }, + client_pool::{ClientPool, ClientPoolDropGuard}, + constants::{ + BLOCK_DOWNLOADER_REQUEST_TIMEOUT, INITIAL_CHAIN_REQUESTS_TO_SEND, + MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MEDIUM_BAN, + }, +}; + +/// Request a chain entry from a peer. +/// +/// Because the block downloader only follows and downloads one chain we only have to send the block hash of +/// top block we have found and the genesis block, this is then called `short_history`. +pub async fn request_chain_entry_from_peer<N: NetworkZone>( + mut client: ClientPoolDropGuard<N>, + short_history: [[u8; 32]; 2], +) -> Result<(ClientPoolDropGuard<N>, ChainEntry<N>), BlockDownloadError> { + let PeerResponse::GetChain(chain_res) = client + .ready() + .await? + .call(PeerRequest::GetChain(ChainRequest { + block_ids: short_history.into(), + prune: true, + })) + .await? + else { + panic!("Connection task returned wrong response!"); + }; + + if chain_res.m_block_ids.is_empty() + || chain_res.m_block_ids.len() > MAX_BLOCKS_IDS_IN_CHAIN_ENTRY + { + client.info.handle.ban_peer(MEDIUM_BAN); + return Err(BlockDownloadError::PeersResponseWasInvalid); + } + + // We must have at least one overlapping block. + if !(chain_res.m_block_ids[0] == short_history[0] + || chain_res.m_block_ids[0] == short_history[1]) + { + client.info.handle.ban_peer(MEDIUM_BAN); + return Err(BlockDownloadError::PeersResponseWasInvalid); + } + + // If the genesis is the overlapping block then this peer does not have our top tracked block in + // its chain. + if chain_res.m_block_ids[0] == short_history[1] { + return Err(BlockDownloadError::PeerDidNotHaveRequestedData); + } + + let entry = ChainEntry { + ids: (&chain_res.m_block_ids).into(), + peer: client.info.id, + handle: client.info.handle.clone(), + }; + + Ok((client, entry)) +} + +/// Initial chain search, this function pulls [`INITIAL_CHAIN_REQUESTS_TO_SEND`] peers from the [`ClientPool`] +/// and sends chain requests to all of them. +/// +/// We then wait for their response and choose the peer who claims the highest cumulative difficulty. +#[instrument(level = "error", skip_all)] +pub async fn initial_chain_search<N: NetworkZone, S, C>( + client_pool: &Arc<ClientPool<N>>, + mut peer_sync_svc: S, + mut our_chain_svc: C, +) -> Result<ChainTracker<N>, BlockDownloadError> +where + S: PeerSyncSvc<N>, + C: Service<ChainSvcRequest, Response = ChainSvcResponse, Error = tower::BoxError>, +{ + tracing::debug!("Getting our chain history"); + // Get our history. + let ChainSvcResponse::CompactHistory { + block_ids, + cumulative_difficulty, + } = our_chain_svc + .ready() + .await? + .call(ChainSvcRequest::CompactHistory) + .await? + else { + panic!("chain service sent wrong response."); + }; + + let our_genesis = *block_ids.last().expect("Blockchain had no genesis block."); + + tracing::debug!("Getting a list of peers with higher cumulative difficulty"); + + let PeerSyncResponse::PeersToSyncFrom(mut peers) = peer_sync_svc + .ready() + .await? + .call(PeerSyncRequest::PeersToSyncFrom { + block_needed: None, + current_cumulative_difficulty: cumulative_difficulty, + }) + .await? + else { + panic!("peer sync service sent wrong response."); + }; + + tracing::debug!( + "{} peers claim they have a higher cumulative difficulty", + peers.len() + ); + + // Shuffle the list to remove any possibility of peers being able to prioritize getting picked. + peers.shuffle(&mut thread_rng()); + + let mut peers = client_pool.borrow_clients(&peers); + + let mut futs = JoinSet::new(); + + let req = PeerRequest::GetChain(ChainRequest { + block_ids: block_ids.into(), + prune: false, + }); + + tracing::debug!("Sending requests for chain entries."); + + // Send the requests. + while futs.len() < INITIAL_CHAIN_REQUESTS_TO_SEND { + let Some(mut next_peer) = peers.next() else { + break; + }; + + let cloned_req = req.clone(); + futs.spawn(timeout( + BLOCK_DOWNLOADER_REQUEST_TIMEOUT, + async move { + let PeerResponse::GetChain(chain_res) = + next_peer.ready().await?.call(cloned_req).await? + else { + panic!("connection task returned wrong response!"); + }; + + Ok::<_, tower::BoxError>(( + chain_res, + next_peer.info.id, + next_peer.info.handle.clone(), + )) + } + .instrument(Span::current()), + )); + } + + let mut res: Option<(ChainResponse, InternalPeerID<_>, ConnectionHandle)> = None; + + // Wait for the peers responses. + while let Some(task_res) = futs.join_next().await { + let Ok(Ok(task_res)) = task_res.unwrap() else { + continue; + }; + + match &mut res { + Some(res) => { + // res has already been set, replace it if this peer claims higher cumulative difficulty + if res.0.cumulative_difficulty() < task_res.0.cumulative_difficulty() { + let _ = mem::replace(res, task_res); + } + } + None => { + // res has not been set, set it now; + res = Some(task_res); + } + } + } + + let Some((chain_res, peer_id, peer_handle)) = res else { + return Err(BlockDownloadError::FailedToFindAChainToFollow); + }; + + let hashes: Vec<[u8; 32]> = (&chain_res.m_block_ids).into(); + // drop this to deallocate the [`Bytes`]. + drop(chain_res); + + tracing::debug!("Highest chin entry contained {} block Ids", hashes.len()); + + // Find the first unknown block in the batch. + let ChainSvcResponse::FindFirstUnknown(first_unknown, expected_height) = our_chain_svc + .ready() + .await? + .call(ChainSvcRequest::FindFirstUnknown(hashes.clone())) + .await? + else { + panic!("chain service sent wrong response."); + }; + + // The peer must send at least one block we already know. + if first_unknown == 0 { + peer_handle.ban_peer(MEDIUM_BAN); + return Err(BlockDownloadError::PeerSentNoOverlappingBlocks); + } + + // We know all the blocks already + // TODO: The peer could still be on a different chain, however the chain might just be too far split. + if first_unknown == hashes.len() { + return Err(BlockDownloadError::FailedToFindAChainToFollow); + } + + let previous_id = hashes[first_unknown - 1]; + + let first_entry = ChainEntry { + ids: hashes[first_unknown..].to_vec(), + peer: peer_id, + handle: peer_handle, + }; + + tracing::debug!( + "Creating chain tracker with {} new block Ids", + first_entry.ids.len() + ); + + let tracker = ChainTracker::new(first_entry, expected_height, our_genesis, previous_id); + + Ok(tracker) +} diff --git a/p2p/cuprate-p2p/src/block_downloader/tests.rs b/p2p/cuprate-p2p/src/block_downloader/tests.rs new file mode 100644 index 00000000..24360aef --- /dev/null +++ b/p2p/cuprate-p2p/src/block_downloader/tests.rs @@ -0,0 +1,323 @@ +use std::{ + fmt::{Debug, Formatter}, + future::Future, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{FutureExt, StreamExt}; +use indexmap::IndexMap; +use monero_serai::{ + block::{Block, BlockHeader}, + ringct::{RctBase, RctPrunable, RctSignatures}, + transaction::{Input, Timelock, Transaction, TransactionPrefix}, +}; +use proptest::{collection::vec, prelude::*}; +use tokio::{sync::Semaphore, time::timeout}; +use tower::{service_fn, Service}; + +use fixed_bytes::ByteArrayVec; +use monero_p2p::{ + client::{mock_client, Client, InternalPeerID, PeerInformation}, + network_zones::ClearNet, + services::{PeerSyncRequest, PeerSyncResponse}, + ConnectionDirection, NetworkZone, PeerRequest, PeerResponse, +}; +use monero_pruning::PruningSeed; +use monero_wire::{ + common::{BlockCompleteEntry, TransactionBlobs}, + protocol::{ChainResponse, GetObjectsResponse}, +}; + +use crate::{ + block_downloader::{download_blocks, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}, + client_pool::ClientPool, +}; + +proptest! { + #![proptest_config(ProptestConfig { + cases: 4, + max_shrink_iters: 10, + timeout: 60 * 1000, + .. ProptestConfig::default() + })] + + #[test] + fn test_block_downloader(blockchain in dummy_blockchain_stragtegy(), peers in 1_usize..128) { + let blockchain = Arc::new(blockchain); + + let tokio_pool = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); + + tokio_pool.block_on(async move { + timeout(Duration::from_secs(600), async move { + let client_pool = ClientPool::new(); + + let mut peer_ids = Vec::with_capacity(peers); + + for _ in 0..peers { + let client = mock_block_downloader_client(blockchain.clone()); + + peer_ids.push(client.info.id); + + client_pool.add_new_client(client); + } + + let stream = download_blocks( + client_pool, + SyncStateSvc(peer_ids) , + OurChainSvc { + genesis: *blockchain.blocks.first().unwrap().0 + }, + BlockDownloaderConfig { + buffer_size: 1_000, + in_progress_queue_size: 10_000, + check_client_pool_interval: Duration::from_secs(5), + target_batch_size: 5_000, + initial_batch_size: 1, + }); + + let blocks = stream.map(|blocks| blocks.blocks).concat().await; + + assert_eq!(blocks.len() + 1, blockchain.blocks.len()); + + for (i, block) in blocks.into_iter().enumerate() { + assert_eq!(&block, blockchain.blocks.get_index(i + 1).unwrap().1); + } + }).await + }).unwrap(); + } +} + +prop_compose! { + /// Returns a strategy to generate a [`Transaction`] that is valid for the block downloader. + fn dummy_transaction_stragtegy(height: u64) + ( + extra in vec(any::<u8>(), 0..1_000), + timelock in 0_usize..50_000_000, + ) + -> Transaction { + Transaction { + prefix: TransactionPrefix { + version: 1, + timelock: Timelock::Block(timelock), + inputs: vec![Input::Gen(height)], + outputs: vec![], + extra, + }, + signatures: vec![], + rct_signatures: RctSignatures { + base: RctBase { + fee: 0, + pseudo_outs: vec![], + encrypted_amounts: vec![], + commitments: vec![], + }, + prunable: RctPrunable::Null + }, + } + } +} + +prop_compose! { + /// Returns a strategy to generate a [`Block`] that is valid for the block downloader. + fn dummy_block_stragtegy( + height: u64, + previous: [u8; 32], + ) + ( + miner_tx in dummy_transaction_stragtegy(height), + txs in vec(dummy_transaction_stragtegy(height), 0..25) + ) + -> (Block, Vec<Transaction>) { + ( + Block { + header: BlockHeader { + major_version: 0, + minor_version: 0, + timestamp: 0, + previous, + nonce: 0, + }, + miner_tx, + txs: txs.iter().map(Transaction::hash).collect(), + }, + txs + ) + } +} + +/// A mock blockchain. +struct MockBlockchain { + blocks: IndexMap<[u8; 32], (Block, Vec<Transaction>)>, +} + +impl Debug for MockBlockchain { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str("MockBlockchain") + } +} + +prop_compose! { + /// Returns a strategy to generate a [`MockBlockchain`]. + fn dummy_blockchain_stragtegy()( + blocks in vec(dummy_block_stragtegy(0, [0; 32]), 1..50_000), + ) -> MockBlockchain { + let mut blockchain = IndexMap::new(); + + for (height, mut block) in blocks.into_iter().enumerate() { + if let Some(last) = blockchain.last() { + block.0.header.previous = *last.0; + block.0.miner_tx.prefix.inputs = vec![Input::Gen(height as u64)] + } + + blockchain.insert(block.0.hash(), block); + } + + MockBlockchain { + blocks: blockchain + } + } +} + +fn mock_block_downloader_client(blockchain: Arc<MockBlockchain>) -> Client<ClearNet> { + let semaphore = Arc::new(Semaphore::new(1)); + + let (connection_guard, connection_handle) = monero_p2p::handles::HandleBuilder::new() + .with_permit(semaphore.try_acquire_owned().unwrap()) + .build(); + + let request_handler = service_fn(move |req: PeerRequest| { + let bc = blockchain.clone(); + + async move { + match req { + PeerRequest::GetChain(chain_req) => { + let mut i = 0; + while !bc.blocks.contains_key(&chain_req.block_ids[i]) { + i += 1; + + if i == chain_req.block_ids.len() { + i -= 1; + break; + } + } + + let block_index = bc.blocks.get_index_of(&chain_req.block_ids[i]).unwrap(); + + let block_ids = bc + .blocks + .get_range(block_index..) + .unwrap() + .iter() + .map(|(id, _)| *id) + .take(200) + .collect::<Vec<_>>(); + + Ok(PeerResponse::GetChain(ChainResponse { + start_height: 0, + total_height: 0, + cumulative_difficulty_low64: 1, + cumulative_difficulty_top64: 0, + m_block_ids: block_ids.into(), + m_block_weights: vec![], + first_block: Default::default(), + })) + } + + PeerRequest::GetObjects(obj) => { + let mut res = Vec::with_capacity(obj.blocks.len()); + + for i in 0..obj.blocks.len() { + let block = bc.blocks.get(&obj.blocks[i]).unwrap(); + + let block_entry = BlockCompleteEntry { + pruned: false, + block: block.0.serialize().into(), + txs: TransactionBlobs::Normal( + block + .1 + .iter() + .map(Transaction::serialize) + .map(Into::into) + .collect(), + ), + block_weight: 0, + }; + + res.push(block_entry); + } + + Ok(PeerResponse::GetObjects(GetObjectsResponse { + blocks: res, + missed_ids: ByteArrayVec::from([]), + current_blockchain_height: 0, + })) + } + _ => panic!(), + } + } + .boxed() + }); + + let info = PeerInformation { + id: InternalPeerID::Unknown(rand::random()), + handle: connection_handle, + direction: ConnectionDirection::InBound, + pruning_seed: PruningSeed::NotPruned, + }; + + mock_client(info, connection_guard, request_handler) +} + +#[derive(Clone)] +struct SyncStateSvc<Z: NetworkZone>(Vec<InternalPeerID<Z::Addr>>); + +impl Service<PeerSyncRequest<ClearNet>> for SyncStateSvc<ClearNet> { + type Response = PeerSyncResponse<ClearNet>; + type Error = tower::BoxError; + type Future = + Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _: PeerSyncRequest<ClearNet>) -> Self::Future { + let peers = self.0.clone(); + + async move { Ok(PeerSyncResponse::PeersToSyncFrom(peers)) }.boxed() + } +} + +struct OurChainSvc { + genesis: [u8; 32], +} + +impl Service<ChainSvcRequest> for OurChainSvc { + type Response = ChainSvcResponse; + type Error = tower::BoxError; + type Future = + Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: ChainSvcRequest) -> Self::Future { + let genesis = self.genesis; + + async move { + Ok(match req { + ChainSvcRequest::CompactHistory => ChainSvcResponse::CompactHistory { + block_ids: vec![genesis], + cumulative_difficulty: 1, + }, + ChainSvcRequest::FindFirstUnknown(_) => ChainSvcResponse::FindFirstUnknown(1, 1), + ChainSvcRequest::CumulativeDifficulty => ChainSvcResponse::CumulativeDifficulty(1), + }) + } + .boxed() + } +} diff --git a/p2p/cuprate-p2p/src/client_pool.rs b/p2p/cuprate-p2p/src/client_pool.rs index 8b77f423..c5a83c8e 100644 --- a/p2p/cuprate-p2p/src/client_pool.rs +++ b/p2p/cuprate-p2p/src/client_pool.rs @@ -126,13 +126,16 @@ impl<N: NetworkZone> ClientPool<N> { pub fn borrow_clients<'a, 'b>( self: &'a Arc<Self>, peers: &'b [InternalPeerID<N::Addr>], - ) -> impl Iterator<Item = ClientPoolDropGuard<N>> + Captures<(&'a (), &'b ())> { + ) -> impl Iterator<Item = ClientPoolDropGuard<N>> + sealed::Captures<(&'a (), &'b ())> { peers.iter().filter_map(|peer| self.borrow_client(peer)) } } -/// TODO: Remove me when 2024 Rust -/// -/// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick -trait Captures<U> {} -impl<T: ?Sized, U> Captures<U> for T {} +mod sealed { + /// TODO: Remove me when 2024 Rust + /// + /// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick + pub trait Captures<U> {} + + impl<T: ?Sized, U> Captures<U> for T {} +} diff --git a/p2p/cuprate-p2p/src/constants.rs b/p2p/cuprate-p2p/src/constants.rs index 0c65386b..44dba917 100644 --- a/p2p/cuprate-p2p/src/constants.rs +++ b/p2p/cuprate-p2p/src/constants.rs @@ -12,6 +12,12 @@ pub(crate) const OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_ /// The durations of a short ban. pub(crate) const SHORT_BAN: Duration = Duration::from_secs(60 * 10); +/// The durations of a medium ban. +pub(crate) const MEDIUM_BAN: Duration = Duration::from_secs(60 * 60 * 24); + +/// The durations of a long ban. +pub(crate) const LONG_BAN: Duration = Duration::from_secs(60 * 60 * 24 * 7); + /// The default amount of time between inbound diffusion flushes. pub(crate) const DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND: Duration = Duration::from_secs(5); @@ -34,6 +40,35 @@ pub(crate) const MAX_TXS_IN_BROADCAST_CHANNEL: usize = 50; /// TODO: it might be a good idea to make this configurable. pub(crate) const INBOUND_CONNECTION_COOL_DOWN: Duration = Duration::from_millis(500); +/// The initial amount of chain requests to send to find the best chain to sync from. +pub(crate) const INITIAL_CHAIN_REQUESTS_TO_SEND: usize = 3; + +/// The enforced maximum amount of blocks to request in a batch. +/// +/// Requesting more than this will cause the peer to disconnect and potentially lead to bans. +pub(crate) const MAX_BLOCK_BATCH_LEN: usize = 100; + +/// The timeout that the block downloader will use for requests. +pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_secs(30); + +/// The maximum size of a transaction, a sanity limit that all transactions across all hard-forks must +/// be less than. +/// +/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size> +pub(crate) const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000; + +/// The maximum amount of block IDs allowed in a chain entry response. +/// +/// ref: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/cryptonote_config.h#L97> +// TODO: link to the protocol book when this section is added. +pub(crate) const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000; + +/// The amount of failures downloading a specific batch before we stop attempting to download it. +pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5; + +/// The amount of empty chain entries to receive before we assume we have found the top of the chain. +pub(crate) const EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED: usize = 5; + #[cfg(test)] mod tests { use super::*; @@ -44,4 +79,10 @@ mod tests { fn outbound_diffusion_flush_shorter_than_inbound() { assert!(DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND < DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND); } + + /// Checks that the ban time increases from short to long. + #[test] + fn ban_times_sanity_check() { + assert!(SHORT_BAN < MEDIUM_BAN && MEDIUM_BAN < LONG_BAN); + } } diff --git a/p2p/cuprate-p2p/src/lib.rs b/p2p/cuprate-p2p/src/lib.rs index 37ea32a3..aea0cac1 100644 --- a/p2p/cuprate-p2p/src/lib.rs +++ b/p2p/cuprate-p2p/src/lib.rs @@ -4,22 +4,24 @@ //! a certain [`NetworkZone`] use std::sync::Arc; +use async_buffer::BufferStream; use futures::FutureExt; use tokio::{ sync::{mpsc, watch}, task::JoinSet, }; use tokio_stream::wrappers::WatchStream; -use tower::{buffer::Buffer, util::BoxCloneService, ServiceExt}; +use tower::{buffer::Buffer, util::BoxCloneService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; use monero_p2p::{ client::Connector, client::InternalPeerID, - services::{AddressBookRequest, AddressBookResponse}, + services::{AddressBookRequest, AddressBookResponse, PeerSyncRequest}, CoreSyncSvc, NetworkZone, PeerRequestHandler, }; +mod block_downloader; mod broadcast; mod client_pool; pub mod config; @@ -28,6 +30,7 @@ mod constants; mod inbound_server; mod sync_states; +use block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}; pub use broadcast::{BroadcastRequest, BroadcastSvc}; use client_pool::ClientPoolDropGuard; pub use config::P2PConfig; @@ -87,7 +90,7 @@ where let inbound_handshaker = monero_p2p::client::HandShaker::new( address_book.clone(), - sync_states_svc, + sync_states_svc.clone(), core_sync_svc.clone(), peer_req_handler, inbound_mkr, @@ -136,6 +139,7 @@ where broadcast_svc, top_block_watch, make_connection_tx, + sync_states_svc, address_book: address_book.boxed_clone(), _background_tasks: Arc::new(background_tasks), }) @@ -156,6 +160,8 @@ pub struct NetworkInterface<N: NetworkZone> { make_connection_tx: mpsc::Sender<MakeConnectionRequest>, /// The address book service. address_book: BoxCloneService<AddressBookRequest<N>, AddressBookResponse<N>, tower::BoxError>, + /// The peer's sync states service. + sync_states_svc: Buffer<sync_states::PeerSyncSvc<N>, PeerSyncRequest<N>>, /// Background tasks that will be aborted when this interface is dropped. _background_tasks: Arc<JoinSet<()>>, } @@ -166,6 +172,26 @@ impl<N: NetworkZone> NetworkInterface<N> { self.broadcast_svc.clone() } + /// Starts the block downloader and returns a stream that will yield sequentially downloaded blocks. + pub fn block_downloader<C>( + &self, + our_chain_service: C, + config: BlockDownloaderConfig, + ) -> BufferStream<BlockBatch> + where + C: Service<ChainSvcRequest, Response = ChainSvcResponse, Error = tower::BoxError> + + Send + + 'static, + C::Future: Send + 'static, + { + block_downloader::download_blocks( + self.pool.clone(), + self.sync_states_svc.clone(), + our_chain_service, + config, + ) + } + /// Returns a stream which yields the highest seen sync state from a connected peer. pub fn top_sync_stream(&self) -> WatchStream<sync_states::NewSyncInfo> { WatchStream::from_changes(self.top_block_watch.clone()) diff --git a/p2p/monero-p2p/Cargo.toml b/p2p/monero-p2p/Cargo.toml index e416fbbb..83cfd949 100644 --- a/p2p/monero-p2p/Cargo.toml +++ b/p2p/monero-p2p/Cargo.toml @@ -10,7 +10,7 @@ default = ["borsh"] borsh = ["dep:borsh", "monero-pruning/borsh"] [dependencies] -cuprate-helper = { path = "../../helper" } +cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } monero-wire = { path = "../../net/monero-wire", features = ["tracing"] } monero-pruning = { path = "../../pruning" } diff --git a/p2p/monero-p2p/src/client.rs b/p2p/monero-p2p/src/client.rs index 02deae51..33446819 100644 --- a/p2p/monero-p2p/src/client.rs +++ b/p2p/monero-p2p/src/client.rs @@ -10,13 +10,15 @@ use tokio::{ task::JoinHandle, }; use tokio_util::sync::PollSemaphore; -use tower::Service; +use tower::{Service, ServiceExt}; +use tracing::Instrument; use cuprate_helper::asynch::InfallibleOneshotReceiver; +use monero_pruning::PruningSeed; use crate::{ - handles::ConnectionHandle, ConnectionDirection, NetworkZone, PeerError, PeerRequest, - PeerResponse, SharedError, + handles::{ConnectionGuard, ConnectionHandle}, + ConnectionDirection, NetworkZone, PeerError, PeerRequest, PeerResponse, SharedError, }; mod connection; @@ -26,7 +28,6 @@ mod timeout_monitor; pub use connector::{ConnectRequest, Connector}; pub use handshaker::{DoHandshakeRequest, HandShaker, HandshakeError}; -use monero_pruning::PruningSeed; /// An internal identifier for a given peer, will be their address if known /// or a random u128 if not. @@ -158,11 +159,70 @@ impl<Z: NetworkZone> Service<PeerRequest> for Client<Z> { permit: Some(permit), }; - self.connection_tx - .try_send(req) - .map_err(|_| ()) - .expect("poll_ready should have been called"); + if let Err(e) = self.connection_tx.try_send(req) { + // The connection task could have closed between a call to `poll_ready` and the call to + // `call`, which means if we don't handle the error here the receiver would panic. + use mpsc::error::TrySendError; + + match e { + TrySendError::Closed(req) | TrySendError::Full(req) => { + self.set_err(PeerError::ClientChannelClosed); + + let _ = req + .response_channel + .send(Err(PeerError::ClientChannelClosed.into())); + } + } + } rx.into() } } + +/// Creates a mock [`Client`] for testing purposes. +/// +/// `request_handler` will be used to handle requests sent to the [`Client`] +pub fn mock_client<Z: NetworkZone, S>( + info: PeerInformation<Z::Addr>, + connection_guard: ConnectionGuard, + mut request_handler: S, +) -> Client<Z> +where + S: crate::PeerRequestHandler, +{ + let (tx, mut rx) = mpsc::channel(1); + + let task_span = tracing::error_span!("mock_connection", addr = %info.id); + + let task_handle = tokio::spawn( + async move { + let _guard = connection_guard; + loop { + let Some(req): Option<connection::ConnectionTaskRequest> = rx.recv().await else { + tracing::debug!("Channel closed, closing mock connection"); + return; + }; + + tracing::debug!("Received new request: {:?}", req.request.id()); + let res = request_handler + .ready() + .await + .unwrap() + .call(req.request) + .await + .unwrap(); + + tracing::debug!("Sending back response"); + + let _ = req.response_channel.send(Ok(res)); + } + } + .instrument(task_span), + ); + + let timeout_task = tokio::spawn(futures::future::pending()); + let semaphore = Arc::new(Semaphore::new(1)); + let error_slot = SharedError::new(); + + Client::new(info, tx, task_handle, timeout_task, semaphore, error_slot) +} From fe1d5faac974fff5fb7a2278b40a7636ccc1c830 Mon Sep 17 00:00:00 2001 From: hinto-janai <hinto.janai@protonmail.com> Date: Fri, 21 Jun 2024 20:33:29 -0400 Subject: [PATCH 05/11] contributing: expand issue/PR sections, re-format (#186) * contributing.md: expand tracking issue/pr section * add tracking issue section * re-format sections * typos * fix links * add `.github/pull_request_template.md` * add `Pull request title and description` section * wording --- .github/pull_request_template.md | 35 +++++++ CONTRIBUTING.md | 168 +++++++++++++++++++++++-------- 2 files changed, 159 insertions(+), 44 deletions(-) create mode 100644 .github/pull_request_template.md diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..43302ade --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,35 @@ +<!-- +PR titles should be: +<AREA>: <SHORT_DESCRIPTION> + +For example: +books: fix typo +--> + +<!-- +If your pull request is long and/or has sections +that need clarifying, consider leaving a review on +your own PR with comments explaining the changes. +--> + +### What +<!-- +If applicable, close a related issue with: + +Fixes #<BUG_ISSUE_NUMBER> + +...or... + +Closes #<FEATURE_ISSUE_NUMBER> +--> + +<!-- Describe the pull request in detail. --> + +### Why +<!-- If applicable, describe why this pull request exists. --> + +### Where +<!-- If applicable, describe the places this pull request affects. --> + +### How +<!-- If applicable, describe how this pull request works. --> diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b05088a4..0c9c1f03 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,37 +4,87 @@ Thank you for wanting to help out! Cuprate is in the stage where things are likely to change quickly, so it's recommended you ask questions in our public [Matrix room](https://matrix.to/#/#cuprate:monero.social). -- [1. Submitting a pull request](#1-submitting-a-pull-request) - - [1.1 Rust toolchain](#11-rust-toolchain) - - [1.2 Draft PR](#12-draft-pr) - - [1.3 Passing CI](#13-passing-ci) - - [1.4 Ready for review](#14-ready-for-review) -- [2. Crate names](#2-crate-names) -- [3. Coding guidelines](#3-coding-guidelines) -- [4. Keeping track of issues and PRs](#4-keeping-track-of-issues-and-prs) +- [1. Submitting an issue](#1-submitting-an-issue) + - [1.1 Discussion](#11-discussion) + - [1.2 Proposal](#12-proposal) + - [1.3 Tracking issue](#13-tracking-issue) +- [2. Submitting a pull request](#2-submitting-a-pull-request) + - [2.1 Rust toolchain](#21-rust-toolchain) + - [2.2 Draft PR](#22-draft-pr) + - [2.3 Passing CI](#23-passing-ci) + - [2.4 Ready for review](#24-ready-for-review) +- [3. Keeping track of issues and PRs](#3-keeping-track-of-issues-and-prs) + - [3.1 Labels](#31-labels) + - [3.2 Tracking issues](#32-tracking-issues) +- [4. Coding guidelines](#4-coding-guidelines) + - [4.1 General guidelines](#41-general-guidelines) + - [4.2 Crate names](#42-crate-names) + - [4.3 Pull request title and description](#43-pull-request-title-and-description) - [5. Documentation](#5-documentation) - [6. Books](#6-books) - [6.1 Architecture book](#61-architecture-book) - [6.2 Protocol book](#62-protocol-book) - [6.3 User book](#63-user-book) -## 1. Submitting a pull request -Once you have found something you would like to work on by: +## 1. Submitting an issue +Before starting work, consider opening an issue for discussion. + +If you have a plan already, you can jump straight into [submitting a pull request](#2-submitting-a-pull-request). + +Otherwise, see below for issue types and what they're used for. + +### 1.1 Discussion +These are for general discussion on topics that have questions that aren't fully answered yet. + +If you would like to discuss a topic and get some feedback, consider [opening a discussion](https://github.com/Cuprate/cuprate/issues/new/choose). + +Examples: +- https://github.com/Cuprate/cuprate/issues/40 +- https://github.com/Cuprate/cuprate/issues/53 +- https://github.com/Cuprate/cuprate/issues/163 + +### 1.2 Proposal +These are formal issues that specify changes that are _almost_ ready for implementation. + +These should answer some basic questions: +- **What** is this proposal for? +- **Why** is this proposal needed? +- **Where** will this proposal make changes to? +- **How** will this proposal be implemented? + +If you have a close to fully fleshed out idea, consider [opening a proposal](https://github.com/Cuprate/cuprate/issues/new/choose). + +Opening a PR and writing the proposal in the PR description is also viable. + +Examples: +- https://github.com/Cuprate/cuprate/pull/146 +- https://github.com/Cuprate/cuprate/issues/106 +- https://github.com/Cuprate/cuprate/issues/153 +- https://github.com/Cuprate/cuprate/issues/181 + +### 1.3 Tracking issue +These are meta-issues that track an in-progress implementation. + +See [`Tracking issues`](#32-tracking-issues) for more info. + +## 2. Submitting a pull request +Once you have found something you would like to work on after: +- Discussing an idea on an [issue](#1-submitting-an-issue) - Looking at the [open issues](https://github.com/Cuprate/cuprate/issues) - Looking at issues with the [`A-help-wanted`](https://github.com/Cuprate/cuprate/issues?q=is%3Aissue+is%3Aopen+label%3AE-help-wanted) label -- or joining Cuprate's [Matrix room](https://matrix.to/#/#cuprate:monero.social) and asking +- Joining Cuprate's [Matrix room](https://matrix.to/#/#cuprate:monero.social) and asking it is recommended to make your interest on working on that thing known so people don't duplicate work. Before starting, consider reading/using Cuprate's: -- [`Documentation`](#5-documentation) (practical `cargo` docs) -- [`Books`](#6-books) (Cuprate's architecture and protocol) +- [`Documentation`](#5-documentation) +- [`Books`](#6-books) These may answer some questions you have, or may confirm an issue you would like to fix. _Note: Cuprate is currently a work-in-progress; documentation will be changing/unfinished._ -### 1.1 Rust toolchain +### 2.1 Rust toolchain Cuprate is written in [Rust](https://rust-lang.org). If you are editing code, you will need Rust's toolchain and package manager, @@ -42,12 +92,12 @@ If you are editing code, you will need Rust's toolchain and package manager, Get started with Rust here: <https://www.rust-lang.org/learn/get-started>. -### 1.2 Draft PR +### 2.2 Draft PR Consider opening a draft PR until you have passed all CI. This is also the stage where you can ask for feedback from others. Keep in mind that feedback may take time especially if the change is large. -### 1.3 Passing CI +### 2.3 Passing CI Each commit pushed in a PR will trigger our [lovely, yet pedantic CI](https://github.com/Cuprate/cuprate/blob/main/.github/workflows/ci.yml). It currently: @@ -57,7 +107,7 @@ It currently: - Runs [`clippy`](https://github.com/rust-lang/rust-clippy) (and fails on warnings) - Runs all tests - Builds all targets -- Automatically add approriate [labels](#4-keeping-track-of-issues-and-prs) to your PR +- Automatically adds approriate [labels](#31-labels) to your PR Before pushing your code, please run the following at the root of the repository: @@ -79,45 +129,25 @@ After that, ensure all other CI passes by running: **Note: in order for some tests to work, you will need to place a [`monerod`](https://www.getmonero.org/downloads/) binary at the root of the repository.** -### 1.4 Ready for review +### 2.4 Ready for review Once your PR has passed all CI and is ready to go, open it for review. Others will leave their thoughts and may ask for changes to be made. Finally, if everything looks good, we will merge your code! Thank you for contributing! -## 2. Crate names -All of Cuprate's crates (libraries) are prefixed with `cuprate-`. All directories containing crates however, are not. +## 3. Keeping track of issues and PRs +The Cuprate GitHub repository has a lot of issues and PRs to keep track of. -For example: +This section documents tools used to help with this. -| Crate Directory | Crate Name | -|--------------------|--------------------| -| `storage/database` | `cuprate-database` | -| `net/levin` | `cuprate-levin` | -| `net/wire` | `cuprate-wire` | - -## 3. Coding guidelines -This is a list of rules that are not mandated by any automation, but contributors generally follow. - -You should keep these in mind when submitting code: - -- Separate and sort imports as core, std, third-party, Cuprate crates, current crate -- Follow the [Rust API Guidelines](https://rust-lang.github.io/api-guidelines) -- `// Comment like this.` and not `//like this` -- Use `TODO` instead of `FIXME` -- Avoid `unsafe` - -And the most important rule: -- Break any and all of the above rules when it makes sense - -## 4. Keeping track of issues and PRs -The Cuprate GitHub repository has a lot of issues and PRs to keep track of. Cuprate makes use of generic labels and labels grouped by a prefixes to help with this. +### 3.1 Labels +Cuprate makes use of labels grouped by prefixes. Some labels will be [automatically added/removed](https://github.com/Cuprate/cuprate/tree/main/.github/labeler.yml) if certain file paths have been changed in a PR. The following section explains the meaning of various labels used. This section is primarily targeted at maintainers. Most contributors aren't able to set these labels. -| Labels | Description | Example | +| Prefix | Description | Example | |--------------|-------------|---------| | [A-] | The **area** of the project an issue relates to. | `A-storage`, `A-rpc`, `A-docs` | [C-] | The **category** of an issue. | `C-cleanup`, `C-optimization` @@ -135,6 +165,56 @@ This section is primarily targeted at maintainers. Most contributors aren't able [O-]: https://github.com/Cuprate/cuprate/labels?q=O [P-]: https://github.com/Cuprate/cuprate/labels?q=P +### 3.2 Tracking issues +If you are working on a larger effort, consider opening a [tracking issue](https://github.com/Cuprate/cuprate/issues/new/choose)! + +The main purpose of these are to track efforts that may contain multiple PRs and/or are generally spread out. These don't usually contain the "why", but if they do, they are brief. These contain no implementation details or the how, as those are for the issues/PRs that are being tracked. + +Examples: +- https://github.com/Cuprate/cuprate/issues/187 +- https://github.com/Cuprate/cuprate/issues/183 + +## 4. Coding guidelines +These are some rules that are not mandated by any automation, but contributors generally follow. + +### 4.1 General guidelines +General guidelines you should keep these in mind when submitting code: + +- Separate and sort imports as `core`, `std`, `third-party`, Cuprate crates, current crate +- Follow the [Rust API Guidelines](https://rust-lang.github.io/api-guidelines) +- `// Comment like this.` and not `//like this` +- Use `TODO` instead of `FIXME` +- Avoid `unsafe` + +And the most important rule: +- Break any and all of the above rules when it makes sense + +### 4.2 Crate names +All of Cuprate's crates (libraries) are prefixed with `cuprate-`. All directories containing crates however, are not. + +For example: + +| Crate Directory | Crate Name | +|--------------------|--------------------| +| `storage/database` | `cuprate-database` | +| `net/levin` | `cuprate-levin` | +| `net/wire` | `cuprate-wire` | + +### 4.3 Pull request title and description +In general, pull request titles should follow this syntax: +``` +<AREA>: <SHORT_DESCRIPTION> +``` + +For example: +``` +books: fix typo +``` + +The description of pull requests should generally follow the template laid out in [`.github/pull_request_template.md`](.github/pull_request_template.md). + +If your pull request is long and/or has sections that need clarifying, consider leaving a review on your own PR with comments explaining the changes. + ## 5. Documentation Cuprate's crates (libraries) have inline documentation. From 519d871ccbde33f1da10020408f9f60df626e994 Mon Sep 17 00:00:00 2001 From: Boog900 <boog900@tutanota.com> Date: Mon, 24 Jun 2024 00:23:39 +0000 Subject: [PATCH 06/11] books: move in protocol book (#169) * add protocol bool * update url * fix typos * Cuprate's protocol book -> Monero * Apply suggestions from code review Co-authored-by: hinto-janai <hinto.janai@protonmail.com> * fix typo * POW -> PoW * revert weird line changes --------- Co-authored-by: hinto-janai <hinto.janai@protonmail.com> --- README.md | 14 +- books/README.md | 14 +- books/protocol/README.md | 4 +- books/protocol/book.toml | 22 ++ books/protocol/src/INTRO.md | 5 + books/protocol/src/SUMMARY.md | 27 ++ books/protocol/src/consensus_rules.md | 50 +++ books/protocol/src/consensus_rules/blocks.md | 111 ++++++ .../src/consensus_rules/blocks/difficulty.md | 67 ++++ .../src/consensus_rules/blocks/miner_tx.md | 92 +++++ .../src/consensus_rules/blocks/reward.md | 45 +++ .../src/consensus_rules/blocks/weights.md | 113 ++++++ .../src/consensus_rules/genesis_block.md | 92 +++++ .../protocol/src/consensus_rules/hardforks.md | 114 ++++++ .../src/consensus_rules/transactions.md | 89 +++++ .../consensus_rules/transactions/inputs.md | 160 ++++++++ .../consensus_rules/transactions/outputs.md | 64 +++ .../consensus_rules/transactions/ring_ct.md | 89 +++++ .../transactions/ring_ct/borromean.md | 51 +++ .../transactions/ring_ct/bulletproofs+.md | 52 +++ .../transactions/ring_ct/bulletproofs.md | 55 +++ .../transactions/ring_ct/clsag.md | 41 ++ .../transactions/ring_ct/mlsag.md | 123 ++++++ .../transactions/ring_signatures.md | 39 ++ .../transactions/unlock_time.md | 68 ++++ books/protocol/src/p2p_network.md | 3 + books/protocol/src/p2p_network/epee.md | 3 + books/protocol/src/p2p_network/levin.md | 68 ++++ books/protocol/src/p2p_network/messages.md | 37 ++ books/protocol/src/pruning.md | 373 ++++++++++++++++++ books/protocol/svgbob.css | 4 + 31 files changed, 2082 insertions(+), 7 deletions(-) create mode 100644 books/protocol/book.toml create mode 100644 books/protocol/src/INTRO.md create mode 100644 books/protocol/src/SUMMARY.md create mode 100644 books/protocol/src/consensus_rules.md create mode 100644 books/protocol/src/consensus_rules/blocks.md create mode 100644 books/protocol/src/consensus_rules/blocks/difficulty.md create mode 100644 books/protocol/src/consensus_rules/blocks/miner_tx.md create mode 100644 books/protocol/src/consensus_rules/blocks/reward.md create mode 100644 books/protocol/src/consensus_rules/blocks/weights.md create mode 100644 books/protocol/src/consensus_rules/genesis_block.md create mode 100644 books/protocol/src/consensus_rules/hardforks.md create mode 100644 books/protocol/src/consensus_rules/transactions.md create mode 100644 books/protocol/src/consensus_rules/transactions/inputs.md create mode 100644 books/protocol/src/consensus_rules/transactions/outputs.md create mode 100644 books/protocol/src/consensus_rules/transactions/ring_ct.md create mode 100644 books/protocol/src/consensus_rules/transactions/ring_ct/borromean.md create mode 100644 books/protocol/src/consensus_rules/transactions/ring_ct/bulletproofs+.md create mode 100644 books/protocol/src/consensus_rules/transactions/ring_ct/bulletproofs.md create mode 100644 books/protocol/src/consensus_rules/transactions/ring_ct/clsag.md create mode 100644 books/protocol/src/consensus_rules/transactions/ring_ct/mlsag.md create mode 100644 books/protocol/src/consensus_rules/transactions/ring_signatures.md create mode 100644 books/protocol/src/consensus_rules/transactions/unlock_time.md create mode 100644 books/protocol/src/p2p_network.md create mode 100644 books/protocol/src/p2p_network/epee.md create mode 100644 books/protocol/src/p2p_network/levin.md create mode 100644 books/protocol/src/p2p_network/messages.md create mode 100644 books/protocol/src/pruning.md create mode 100644 books/protocol/svgbob.css diff --git a/README.md b/README.md index d5179730..100900d7 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ _(work-in-progress)_ </div> ## Contents + - [About](#about) - [Documentation](#documentation) - [Contributing](#contributing) @@ -27,13 +28,17 @@ TODO: add these sections someday. --> ## About -Cuprate is an effort to create an alternative [Monero](https://getmonero.org) node implementation in [Rust](http://rust-lang.org). -It will be able to independently validate Monero consensus rules, providing a layer of security and redundancy for the Monero network. +Cuprate is an effort to create an alternative [Monero](https://getmonero.org) node implementation +in [Rust](http://rust-lang.org). + +It will be able to independently validate Monero consensus rules, providing a layer of security and redundancy for the +Monero network. <!-- TODO: add some details about what Cuprate is and is not, goals, status --> ## Documentation + _Cuprate is currently a work-in-progress; documentation will be changing/unfinished._ Cuprate maintains various documentation books: @@ -41,18 +46,21 @@ Cuprate maintains various documentation books: | Book | Description | |-----------------------------------------------------------------|------------------------------------------------------------| | [Cuprate's architecture book](https://architecture.cuprate.org) | Documents Cuprate's internal architecture & implementation | -| [Cuprate's protocol book](https://monero-book.cuprate.org) | Documents the Monero protocol | +| [Monero's protocol book](https://monero-book.cuprate.org) | Documents the Monero protocol | | [Cuprate's user book](https://user.cuprate.org) | Practical user-guide for using `cuprated` | For crate (library) documentation, see the `Documentation` section in [`CONTRIBUTING.md`](CONTRIBUTING.md). ## Contributing + See [`CONTRIBUTING.md`](CONTRIBUTING.md). ## Security + Cuprate has a responsible vulnerability disclosure policy, see [`SECURITY.md`](SECURITY.md). ## License + The `binaries/` directory is licensed under AGPL-3.0, everything else is licensed under MIT. See [`LICENSE`](LICENSE) for more details. diff --git a/books/README.md b/books/README.md index d364a22f..b107929b 100644 --- a/books/README.md +++ b/books/README.md @@ -1,20 +1,26 @@ ## Books + This directory contains the source files for Cuprate's various books. The source files are edited here, and published in other repositories, see: + - [Cuprate's architecture book](https://github.com/Cuprate/architecture-book) -- [Cuprate's protocol book](https://github.com/Cuprate/monero-book) +- [Monero's protocol book](https://github.com/Cuprate/monero-book) - [Cuprate's user book](https://github.com/Cuprate/user-book) ## Build tools -Building the book(s) requires [Rust's cargo tool](https://doc.rust-lang.org/cargo/getting-started/installation.html) and [mdBook](https://github.com/rust-lang/mdBook). + +Building the book(s) requires [Rust's cargo tool](https://doc.rust-lang.org/cargo/getting-started/installation.html) +and [mdBook](https://github.com/rust-lang/mdBook). After installing `cargo`, install `mdbook` with: + ```bash cargo install mdbook ``` ## Building + To build a book, go into a book's directory and build: ```bash @@ -23,7 +29,9 @@ cd user/ mdbook build ``` -The output will be in the `book` subdirectory (`user/book` for the above example). To open the book, you can open it in your web browser like so: +The output will be in the `book` subdirectory (`user/book` for the above example). To open the book, you can open it in +your web browser like so: + ```bash mdbook build --open ``` diff --git a/books/protocol/README.md b/books/protocol/README.md index 49c63b88..9b9d7f04 100644 --- a/books/protocol/README.md +++ b/books/protocol/README.md @@ -1,6 +1,8 @@ -## Cuprate's protocol book +## Monero's protocol book + This book documents the Monero protocol. See: + - <https://monero-book.cuprate.org> - <https://github.com/Cuprate/monero-book> diff --git a/books/protocol/book.toml b/books/protocol/book.toml new file mode 100644 index 00000000..e758f651 --- /dev/null +++ b/books/protocol/book.toml @@ -0,0 +1,22 @@ +[book] +authors = ["Boog900"] +language = "en" +multilingual = false +src = "src" +title = "The Monero Book" + +[build] +create-missing = false + +[output.html] +mathjax-support = true +additional-css = ["svgbob.css"] +git-repository-url = "https://github.com/Cuprate/cuprate/tree/main/books/protocol" +git-repository-icon = "fa-github" +no-section-label = true + +[output.html.fold] +enable = true +level = 2 + +[preprocessor.svgbob] diff --git a/books/protocol/src/INTRO.md b/books/protocol/src/INTRO.md new file mode 100644 index 00000000..cd509f82 --- /dev/null +++ b/books/protocol/src/INTRO.md @@ -0,0 +1,5 @@ +#  + +This book aims to document the Monero protocol. Currently, work is being done to document Monero's consensus rules. + +This being completed as a part of [Cuprate](https://github.com/Cuprate/cuprate), the Rust Monero node. diff --git a/books/protocol/src/SUMMARY.md b/books/protocol/src/SUMMARY.md new file mode 100644 index 00000000..1a4b1f0e --- /dev/null +++ b/books/protocol/src/SUMMARY.md @@ -0,0 +1,27 @@ +# Summary + +[Introduction](./INTRO.md) + +- [Consensus Rules](./consensus_rules.md) + - [The Genesis Block](./consensus_rules/genesis_block.md) + - [Hard Forks](./consensus_rules/hardforks.md) + - [Blocks](./consensus_rules/blocks.md) + - [Difficulty](./consensus_rules/blocks/difficulty.md) + - [Weights](./consensus_rules/blocks/weights.md) + - [Block Reward](./consensus_rules/blocks/reward.md) + - [Miner Transactions](./consensus_rules/blocks/miner_tx.md) + - [Transactions](./consensus_rules/transactions.md) + - [Inputs](./consensus_rules/transactions/inputs.md) + - [Outputs](./consensus_rules/transactions/outputs.md) + - [Unlock Time](./consensus_rules/transactions/unlock_time.md) + - [Ring Signatures](./consensus_rules/transactions/ring_signatures.md) + - [Ring CT](./consensus_rules/transactions/ring_ct.md) + - [Borromean](./consensus_rules/transactions/ring_ct/borromean.md) + - [MLSAG](./consensus_rules/transactions/ring_ct/mlsag.md) + - [Bulletproofs](./consensus_rules/transactions/ring_ct/bulletproofs.md) + - [CLSAG](./consensus_rules/transactions/ring_ct/clsag.md) + - [Bulletproofs+](./consensus_rules/transactions/ring_ct/bulletproofs+.md) +- [P2P Network](./p2p_network.md) + - [Levin Protocol](./p2p_network/levin.md) + - [P2P Messages](./p2p_network/messages.md) +- [Pruning](./pruning.md) diff --git a/books/protocol/src/consensus_rules.md b/books/protocol/src/consensus_rules.md new file mode 100644 index 00000000..b06e4a8c --- /dev/null +++ b/books/protocol/src/consensus_rules.md @@ -0,0 +1,50 @@ +# Consensus Rules + +This chapter contains all of Monero's consensus rules, from genesis to now. Some rules +are complex so have been split into their own chapter. + +Rules that are not bound to consensus (relay rules) are not included here. Also we have not documented "rules" which are enforced by +(de)serialization, for example it's impossible to have a ringCT signature in a version 1 transaction, rules that are unclear if they +can be omitted or not should _always_ be included. + +## Index + +1. [The Genesis Block](./consensus_rules/genesis_block.md) +2. [Hard Forks](./consensus_rules/hardforks.md) +3. [Blocks](./consensus_rules/blocks.md) +4. [Transactions](./consensus_rules/transactions.md) + +## Definitions + +Canonically Encoded Scalar: +an Ed25519 scalar which is fully reduced mod l, where \\(l = 2^{252} + 27742317777372353535851937790883648493 \\). + +Canonically Encoded Point: +an Ed25519 point which is not the negative identity and with y coordinate fully reduced mod p, where \\(p = 2^{255} - 19 \\). + +Prime Order Point: +a point in the prime subgroup. +PoW Hash: +the hash calculated by using the active proof of work function. + +Block Hash: +the keccak hash of the block. + +Transaction Blob: +the raw bytes of a serialized transaction. + +Block Blob: +the raw bytes of a serialized block. + +Chain Height: +the amount of blocks in the chain, this is different to the height of the top block as +blocks start counting at 0. + +Ring (transactions inputs): +the group of potential outputs of which one is the true spend. + +Decoys (transactions inputs): +the fake outputs used to hide the true spend, the length of this is equal to one minus the `Rings` length. + +MixIns (transactions inputs): +another term for `Decoys` diff --git a/books/protocol/src/consensus_rules/blocks.md b/books/protocol/src/consensus_rules/blocks.md new file mode 100644 index 00000000..1137f422 --- /dev/null +++ b/books/protocol/src/consensus_rules/blocks.md @@ -0,0 +1,111 @@ +# Blocks + +## Introduction + +This chapter contains all the rules that apply to a block. Miner transactions are included in this section as the rules that apply to them +are different to normal transactions. + +## Index + +1. [Block Rules](./blocks.md#block-rules) +2. [Difficulty](./blocks/difficulty.md) +3. [Weights](./blocks/weights.md) +4. [Block Reward](./blocks/reward.md) +5. [Miner Transaction](./blocks/miner_tx.md) + +## Block Rules + +### Block Weight And Size + +The `block blob` must not be bigger than (2 * the [effective median weight](./blocks/weights.md#effective-median-weight) + 100)[^block-size-check]. + +The [block weight](./blocks/weights.md#block-weights) must not be more than 2 * +[the median weight for coinbase checks](./blocks/weights.md#median-weight-for-coinbase-checks)[^block-weight-limit]. + +### Amount Of Transactions + +The amount of transactions in a block (including the miner transaction) must be less than `0x10000000`[^max-amount-of-txs]. + +### No Duplicate Transactions + +There must be no duplicate transactions in the block or the blockchain[^no-duplicate-txs]. + +### Key Images + +There must be no duplicate key images in the block[^no-duplicate-ki], or the whole chain. + +### Previous ID + +The blocks `prev_id` must equal the `block hash` of the last block[^prev_id]. + +### PoW Function + +The proof of work function used depends on the hard-fork[^pow-func]: + +| hard-fork | PoW function | +|------------|----------------| +| 1 to 6 | CryptoNight v0 | +| 7 | CryptoNight v1 | +| 8 to 9 | CryptoNight v2 | +| 10 to 11 | CryptoNight R | +| 12 onwards | RandomX | + +> For block 202612 always return the same PoW hash, no matter the network[^202612-pow-hash]. +> +> PoW hash: `84f64766475d51837ac9efbef1926486e58563c95a19fef4aec3254f03000000` + +### Checking PoW Hash + +See [checking PoW in the difficulty chapter](./blocks/difficulty.md#checking-a-blocks-proof-of-work). + +### RandomX Seed + +The RandomX seed, which is used to set up the dataset, is a previous block hash in the blockchain. + +The seed height is 0 if the current height is below or equal to \\( 2048 + 64 \\) otherwise is got by: + +\\( seedHeight = (height - 64 - 1) \land \lnot(2048 - 1) \\) + +with \\( \land \\) being a bit-and and \\( \lnot \\) being a bit-not. + +You then get the block hash at `seedHeight` which is then the RandomX seed.[^rx-seed] + +### Version And Vote + +The block's major version must equal to the current hard-fork and the vote must be greater than or equal to the current +hard-fork[^version-vote]. + +> Vote is not always the same as the minor version, see [here](./hardforks.md#blocks-version-and-vote). + +### Timestamp + +The block's timestamp must not be more than the current UNIX time + 2 hours[^timestamp-upper-limit] and the timestamp +must not be less than +the median timestamp over the last 60 blocks[^timestamp-lower-limit], if there are less than 60 blocks in the chain then +the timestamp is always valid. + +--- + +[^block-size-check]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_core/cryptonote_core.cpp#L1684> + +[^block-weight-limit]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1418-L1428> && <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic_impl.cpp#L107> + +[^max-amount-of-txs]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/crypto/tree-hash.c#L55> + +[^no-duplicate-txs]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L5267> && <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_core/blockchain.cpp#L4319> + +[^no-duplicate-ki]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L5281> + +[^prev_id]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4150> + +[^pow-func]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_core/cryptonote_tx_utils.cpp#L689-L704> + +[^202612-pow-hash]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_core/cryptonote_tx_utils.cpp#L683> + +[^rx-seed]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/crypto/rx-slow-hash.c#L179-L186> + +[^version-vote]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/hardfork.cpp#L109> + +[^timestamp-upper-limit]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4064> + +[^timestamp-lower-limit]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4045> diff --git a/books/protocol/src/consensus_rules/blocks/difficulty.md b/books/protocol/src/consensus_rules/blocks/difficulty.md new file mode 100644 index 00000000..c6dc9c91 --- /dev/null +++ b/books/protocol/src/consensus_rules/blocks/difficulty.md @@ -0,0 +1,67 @@ +# Difficulty + +Difficulty is a measure used to keep block production at a constant rate, it is the average amount of hashes before a solution +is found. + +## Checking A Block's Proof Of Work + +To check a block's `PoW hash` you interpret the hash as a little endian integer and multiply it by the difficulty, if the result +does not overflow the hash is valid[^check-pow]: + +\\(Hash * difficulty <= MAXu256 \\) + +## Calculating Difficulty + +To calculate difficulty, Monero keeps a window of the last 735[^diff-blocks-count] timestamps and cumulative difficulties, +if there are not enough blocks, then you just use as many as possible. + +> The genesis block is skipped for these calculations[^skip-genesis] so should not be included in the timestamp / CD list but it is +> included in the cumulative difficulty of the chain. + +If the amount of blocks is less than or equal to 1 then 1 is returned as the difficulty[^amt-blocks-1]. + +The timestamps and cumulative difficulties are then shortened to 720[^diff-window] blocks so that calculations lag 15 blocks behind +the chain. + +The timestamps are then sorted, in ascending order. We now need to get a time span value to do this we first remove the outliers: + +If the number of timestamps is less than or equal to the amount of blocks we are accounting for (600: 720 - 2 * 60) then the lower +is set to 0 and the upper is set to the length of timestamps. Otherwise, if we have enough timestamps, the lower and upper is calculated +by[^calculating-lower-upper]: + +\\(lower = \frac{len(timestamps) - 600+1}{2} \\) + +\\(upper = lower + 600 \\) + +We then get the timestamp at position `lower` and take this away from the timestamp at position `upper -1` to get `timeSpan`. +If `timeSpan` is 0 we set it to 1[^timespan0]. + +We also get the cumulative difficulty at position `lower` and take this away from the cumulative difficulty at position `upper -1` to get `totalWork`. + +The next difficulty is then calculated by[^final-diff-cal]: + +\\(difficulty = \frac{totalWork * targetSeconds + timeSpan -1}{timeSpan} \\) + +## Target Seconds + +For hard-fork v1 the target seconds is 60, so one block a minute. For hard-fork 2 onwards block time is 120[^target-block-time]. + +--- + +[^check-pow]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_basic/difficulty.cpp#L196> + +[^diff-blocks-count]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_config.h#L84> + +[^skip-genesis]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_core/blockchain.cpp#L849C40-L849C65> + +[^amt-blocks-1]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_basic/difficulty.cpp#L214> + +[^diff-window]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_config.h#L81> && <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_basic/difficulty.cpp#L205> + +[^calculating-lower-upper]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_basic/difficulty.cpp#L222> + +[^timespan0]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_basic/difficulty.cpp#L231> + +[^final-diff-cal]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_basic/difficulty.cpp#L236> + +[^target-block-time]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L5512> diff --git a/books/protocol/src/consensus_rules/blocks/miner_tx.md b/books/protocol/src/consensus_rules/blocks/miner_tx.md new file mode 100644 index 00000000..74ec1f54 --- /dev/null +++ b/books/protocol/src/consensus_rules/blocks/miner_tx.md @@ -0,0 +1,92 @@ +# Miner Transaction Rules + +## Introduction + +Miner transactions are handled differently to normal transactions, see [here](../transactions.md) for the rules on normal transactions. + +## Rules + +### Version + +The transactions version must be either 1 or 2[^versions-allowed]. + +The version can be 1 or 2 up to hard-fork 12 then it must be 2[^weird-version-rules]. + +### Input + +The transaction must only have one input and it must be of type `txin_gen`[^input-type]. + +The height specified in the input must be the actual block height[^input-height]. + +### RingCT Type + +From hard-fork 12 version 2 miner transactions must have a ringCT type of `Null`[^null-ringct]. + +### Unlock Time + +The unlock time must be the current height + 60[^miner-unlock-time]. + +### Output Amounts + +The output, when summed, must not overflow[^outputs-overflow]. + +For _only_ hard-fork 3 the output amount must be a valid decomposed amount[^decomposed-amount], which means the amount must be +in [this](https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_basic/cryptonote_format_utils.cpp#L52) table. + +### Total Outputs + +The [reward from the block](./reward.md#calculating-block-reward) + the total fees must not be more than the summed output amount[^total-output-amount]. + +For hard-fork 1 and from 12 onwards the summed output amount must equal the reward + fees[^exact-output-amount] this means from 2 till 11 miners can collect +less if they want less dust. + +### Output Type + +The output type allowed depends on the hard-fork[^output-types]: + +| hard-fork | output type | +| ---------- | ------------------------------------ | +| 1 to 14 | txout_to_key | +| 15 | txout_to_key and txout_to_tagged_key | +| 16 onwards | txout_to_tagged_key | + +> For hard-fork 15 both are allowed but the transactions outputs must all be the same type. + +### Zero Amount V1 Output + +Monero does not explicitly ban zero amount V1 outputs on miner transactions but the database throws an error if a 0 amount output doesn't have a commitment +[^zero-output] meaning they are banned. + +### V2 Output Pool + +When adding version 2 miner transactions to the blockchain, put the outputs into the 0 amount pool and create dummy commitments of:[^v2-output] + +\\(commitment = G + amount * H \\) + +--- + +[^versions-allowed]: <https://github.com/monero-project/monero/blob/67d190ce7c33602b6a3b804f633ee1ddb7fbb4a1/src/cryptonote_basic/cryptonote_basic.h#L185> + +[^weird-version-rules]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1371> + +[^input-type]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1369-L1370> + +[^input-height]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1379> + +[^null-ringct]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1374> + +[^miner-unlock-time]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1385> + +[^outputs-overflow]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1388> + +[^decomposed-amount]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1409> + +[^total-output-amount]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1434> + +[^exact-output-amount]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1440-L1447> + +[^output-types]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L960> + +[^zero-output]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/blockchain_db/lmdb/db_lmdb.cpp#L1069> + +[^v2-output]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/blockchain_db/blockchain_db.cpp#L234-L241> diff --git a/books/protocol/src/consensus_rules/blocks/reward.md b/books/protocol/src/consensus_rules/blocks/reward.md new file mode 100644 index 00000000..956453af --- /dev/null +++ b/books/protocol/src/consensus_rules/blocks/reward.md @@ -0,0 +1,45 @@ +# Block Reward + +The block reward is the amount paid out to a miner for mining a block. + +## Calculating Base Block Reward + +The base block reward is the reward before factoring in the potential penalty for expanding blocks. + +To calculate the base block reward you first need the total amount of coins already generated, then define: + +[^money-supply] \\(moneySupply = 2^{64} -1 \\) + +[^emission-speed-factor] \\(emissionSpeedFactor = 20 - (targetMinutes - 1) \\) + +where `targetMinutes` is the [target block time](./difficulty.md#target-seconds) in minutes. + +The `baseReward` is then calculated by: + +[^base-reward] \\(baseReward = (moneySupply - alreadyGeneratedCoins) >> emissionSpeedFactor \\) + +If `baseReward` falls below the final subsidy (0.3 XMR / minute) them set the `baseReward` to that instead [^final-base-reward]. + +## Calculating Block Reward + +First calculate the [base block reward](#calculating-base-block-reward). + +Now we need to get the [median weight for block rewards](weights.md#median-weight-for-coinbase-checks) + +If the current block weight is not more than the median weight then the block reward is the base reward. + +Otherwise the block reward is:[^block-reward] + +\\(blockReward = baseReward * (1 - (\frac{blockWeight}{effectiveMedianWeight} -1)^2) \\) + +--- + +[^money-supply]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_config.h#L53> + +[^emission-speed-factor]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic_impl.cpp#L87> + +[^base-reward]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic_impl.cpp#L89> + +[^final-base-reward]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic_impl.cpp#L90-L93> + +[^block-reward]: <https://web.getmonero.org/library/Zero-to-Monero-2-0-0.pdf#subsection.7.3.3> && <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic_impl.cpp#L111-L127> diff --git a/books/protocol/src/consensus_rules/blocks/weights.md b/books/protocol/src/consensus_rules/blocks/weights.md new file mode 100644 index 00000000..efa4bf73 --- /dev/null +++ b/books/protocol/src/consensus_rules/blocks/weights.md @@ -0,0 +1,113 @@ +# Block Weights + +Monero's blockchain, unlike other blockchains, has dynamic block sizes which means blocks expand to handle demand. +However Monero does not allow unrestricted block growth, miners will face a penalty for expanding blocks and miners +are restricted by how much they can expand a block. + +## Index + +1. [Penalty Free Zone](weights.md#penalty-free-zone) +2. [Blocks Weight](#blocks-weight) +3. [Long Term Block Weight](#long-term-block-weight) +4. [Effective Median Weight](#effective-median-weight) +5. [Median Weight For Coinbase Checks](#median-weight-for-coinbase-checks) + +## Penalty Free Zone + +Monero sets a minimum max block weight so that miners don't get punished for expanding small blocks. + +For hf 1 this is 20000 bytes, for hf 2-4 this is 60000 and from 5 onwards this is 300000 bytes[^minimum-max-weight]. + +## Blocks Weight + +A blocks weight is the sum of all the transactions weights in a block, including the miner transaction. The block header +and transaction hashes are not included[^calculating-bw]. + +## Long Term Block Weight + +The block's long term weight is the block's weight adjusted with previous block's weights. + +### Calculating A Blocks Long Term Weight + +Up until hard-fork 10, the blocks long term weight is just the block's weight[^pre-hf-10-long-weight]. + +From hard-fork 10 onwards we first get the median long term weight over the last 100,000 blocks, if this is less than +the [penalty free zone](#penalty-free-zone) then set the median long term weight to this instead[^ltw-median]. + +Now we need to set a shot term constraint and adjusted block weight, the way we do this is different depending on the hard-fork. + +From hard-fork 10 to 14[^hf-10-14-stc]: + +\\(adjustedBlockWeight = blockWeight\\) + +\\(shortTermConstraint = medianLongTermWeight * 1.4\\) + +From 15 onwards[^hf-15-adjustments]: + +\\(adjustedBlockWeight = max(blockWeight, \frac{medianLongTermWeight}{1.7})\\) + +\\(shortTermConstraint = medianLongTermWeight * 1.7\\) + +Now the long term weight is defined as `min(adjustedBlockWeight, shortTermConstraint)`[^long-term-weight]. + +## Effective Median Weight + +The effective median weight is used to calculate block reward and to limit block size. + +### Calculating Effective Median Weight + +For any hard-fork the minimum this can be is the [penalty free zone](#penalty-free-zone)[^minimum-effective-median]. + +Up until hard-fork 10, this is done by just getting the median **block weight** over the last 100 blocks[^pre-hf-10-effective-median], if +there are less than 100 blocks just get the median over all the blocks. + +For hf 10 onwards, we first get the median **long term weight** over the last 100,000 blocks[^hf-10+-effective-median-step-1], if this median +is less than the hf 5 [penalty free zone](#penalty-free-zone) set the median to that, this is the long term median. + +Now get the median **block weight** over the last 100 blocks, this is the short term median. + +Now we can calculate the effective median, for hard-forks 10 to 14 this is done by[^effective-median]: + +\\(effectiveMedian = min(max(hf5PenaltyFreeZone, shortTermMedian), 50 * longTermMedian) \\) + +From 15 onwards this is done by: + +\\(effectiveMedian = min(max(longTermMedian, shortTermMedian), 50 * longTermMedian) \\) + +## Median Weight For Coinbase Checks + +When checking coinbase transactions and block weight Monero uses yet another median weight :). + +### Calculating Median Weight For Coinbase Checks + +Before hf 12 this is the median block weight over the last 100 blocks[^median-weight-coinbase-before-v12]. + +From hf 12 this is the [effective median weight](#effective-median-weight)[^median-weight-coinbase-from-v12] + +--- + +[^minimum-max-weight]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic_impl.cpp#L69> + +[^calculating-bw]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4289> and <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4408> + +[^pre-hf-10-long-weight]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4577> + +[^ltw-median]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4581> + +[^hf-10-14-stc]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4593> + +[^hf-15-adjustments]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4587> + +[^long-term-weight]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4595> + +[^minimum-effective-median]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4676> + +[^pre-hf-10-effective-median]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4611> + +[^hf-10+-effective-median-step-1]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4651> + +[^effective-median]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4659-L4671> + +[^median-weight-coinbase-before-v12]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1425-L1427> + +[^median-weight-coinbase-from-v12]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L1421> diff --git a/books/protocol/src/consensus_rules/genesis_block.md b/books/protocol/src/consensus_rules/genesis_block.md new file mode 100644 index 00000000..2a377b3c --- /dev/null +++ b/books/protocol/src/consensus_rules/genesis_block.md @@ -0,0 +1,92 @@ +# Genesis + +Monero has a hardcoded genesis block that gets added to the blockchain on the first run of the daemon[^first-run]. The contents of this block +are different depending on the network. + +For all networks the timestamp is set to 0, the major and minor version of the block are set to `CURRENT_BLOCK_MAJOR_VERSION` and +`CURRENT_BLOCK_MINOR_VERSION`[^version-set]. These two constants are set to 1 and 0 respectively[^version-defined]. The transaction +field is empty, and the previous block hash is not set so that field is zeroed. + +## Mainnet + +The nonce is set to 10,000 and the miner transaction is set to: +`013c01ff0001ffffffffffff03029b2e4c0281c0b02e7c53291a94d1d0cbff8883f8024f5142ee494ffbbd08807121017767aafcde9be00dcfd098715ebcf7f410daebc582fda69d24a28e9d0bc890d1` +[^mainnet-params] + +The mainnet genesis block will hash to: `418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3`. + +The final block: + +```json +{ + header: { + major_version: 1, + minor_version: 0, + timestamp: 0, + previous: [0; 32], + nonce: 10000 + }, + miner_tx: "013c01ff0001ffffffffffff03029b2e4c0281c0b02e7c53291a94d1d0cbff8883f8024f5142ee494ffbbd08807121017767aafcde9be00dcfd098715ebcf7f410daebc582fda69d24a28e9d0bc890d1", + txs: [], +} +``` + +## Testnet + +The nonce is set to 10,001 and the miner transaction is set to the same as mainnet[^testnet-params] + +The testnet genesis block will hash to `48ca7cd3c8de5b6a4d53d2861fbdaedca141553559f9be9520068053cda8430b`. + +The final block: + +```json +{ + header: { + major_version: 1, + minor_version: 0, + timestamp: 0, + previous: [0; 32], + nonce: 10001 + }, + miner_tx: "013c01ff0001ffffffffffff03029b2e4c0281c0b02e7c53291a94d1d0cbff8883f8024f5142ee494ffbbd08807121017767aafcde9be00dcfd098715ebcf7f410daebc582fda69d24a28e9d0bc890d1", + txs: [], +} +``` + +## Stagenet + +The nonce is set to 10,002 and the miner transaction is set to: +`013c01ff0001ffffffffffff0302df5d56da0c7d643ddd1ce61901c7bdc5fb1738bfe39fbe69c28a3a7032729c0f2101168d0c4ca86fb55a4cf6a36d31431be1c53a3bd7411bb24e8832410289fa6f3b` +[^stagenet-params]. + +The stagenet genesis block will hash to `76ee3cc98646292206cd3e86f74d88b4dcc1d937088645e9b0cbca84b7ce74eb`. + +The final block: + +```json +{ + header: { + major_version: 1, + minor_version: 0, + timestamp: 0, + previous: [0; 32], + nonce: 10002 + }, + miner_tx: "013c01ff0001ffffffffffff0302df5d56da0c7d643ddd1ce61901c7bdc5fb1738bfe39fbe69c28a3a7032729c0f2101168d0c4ca86fb55a4cf6a36d31431be1c53a3bd7411bb24e8832410289fa6f3b", + txs: [], +} +``` + +--- + +[^first-run]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L340> + +[^version-set]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/cryptonote_tx_utils.cpp#L663-L665> + +[^version-defined]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_config.h#L45-L46> + +[^mainnet-params]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_config.h#L231-L232> + +[^testnet-params]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_config.h#L272-L273> + +[^stagenet-params]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_config.h#L287-L288> diff --git a/books/protocol/src/consensus_rules/hardforks.md b/books/protocol/src/consensus_rules/hardforks.md new file mode 100644 index 00000000..f935ab84 --- /dev/null +++ b/books/protocol/src/consensus_rules/hardforks.md @@ -0,0 +1,114 @@ +# Hard Forks + +Monero makes use of hard-forks to update its protocol. Although it has never been used, Monero has a system in it's codebase to +allow voting for activation of a hard-fork[^hardfork-class]. It works by using the blocks `minor version` field as a voting field, +when enough blocks vote for a hard fork the fork is activated. + +Because Monero has never used hard fork voting, you don't _need_ to implement it but as it's included in the codebase, an explanation +is included here. + +## Blocks version and vote + +Monero uses the block's `major version` field as an indicator of hard-fork and the `minor version` field as an indicator of the blocks +vote. A minor version of 0 is treated as a vote for 1 as legacy blocks use to just set this field to 0[^minor-v-0]. + +The block's vote must be greater than or equal to the version, a vote higher than the maximum known hard-fork is interpreted +as a vote for the latest hard-fork[^minor-v-too-large]. So if a block is at V2 then the vote must be V2 or higher. + +## Accepting a fork + +When a hard-fork is added to Monero's protocol it must specify a `threshold`, a number between 0 and 100, this is the proportion of +blocks in the window that must vote for this fork (or a later one) for it to activate. For all current forks the threshold is 0 meaning +that no votes are needed for the fork to activate. + +Monero keeps track of a week (10,080 blocks) worth of votes[^window-size], when a new block is added Monero works backwards through the +list of hard-forks (latest to oldest) tallying the votes and checking if the number of votes is bigger than the amount needed[^accepting-hfs], +votes for later hardforks are also votes for previous hard-forks. The amount needed is calculated: + +\\( amountNeeded = \frac{windowSize * threshold + 99}{100} \\) + +If the amount of votes is greater than or equal to the amount needed and the current blockchain height is greater than or equal to the hard-fork +height the HF is activated[^accepting-hfs]. + +## Mainnet Hard-Forks [^mainnet-hfs] {#Mainnet-Hard-Forks} + +| Version | Height | Threshold | Finalized (timestamp) | +| ------- | ----------- | --------- | ------------------------ | +| 1 | 0[^v1-at-0] | 0 | Jul 04 2012 (1341378000) | +| 2 | 1009827 | 0 | Sep 20 2015 (1442763710) | +| 3 | 1141317 | 0 | Mar 21 2016 (1458558528) | +| 4 | 1220516 | 0 | Jan 05 2017 (1483574400) | +| 5 | 1288616 | 0 | Mar 14 2017 (1489520158) | +| 6 | 1400000 | 0 | Aug 18 2017 (1503046577) | +| 7 | 1546000 | 0 | Mar 17 2018 (1521303150) | +| 8 | 1685555 | 0 | Sep 02 2018 (1535889547) | +| 9 | 1686275 | 0 | Sep 02 2018 (1535889548) | +| 10 | 1788000 | 0 | Feb 10 2019 (1549792439) | +| 11 | 1788720 | 0 | Feb 15 2019 (1550225678) | +| 12 | 1978433 | 0 | Oct 18 2019 (1571419280) | +| 13 | 2210000 | 0 | Aug 23 2020 (1598180817) | +| 14 | 2210720 | 0 | Aug 24 2020 (1598180818) | +| 15 | 2688888 | 0 | Jun 30 2022 (1656629117) | +| 16 | 2689608 | 0 | Jun 30 2022 (1656629118) | + +## Testnet Hard-Forks [^testnet-hfs] {#Testnet-Hard-Forks} + +| Version | Height | Threshold | Finalized (timestamp) | +| ------- | ----------- | --------- | ------------------------------------ | +| 1 | 0[^v1-at-0] | 0 | Jul 04 2012 (1341378000) | +| 2 | 624634 | 0 | Oct 20 2015 (1445355000) | +| 3 | 800500 | 0 | Aug 28 2016 (1472415034) | +| 4 | 801219 | 0 | Aug 28 2016 (1472415035) | +| 5 | 802660 | 0 | Aug 28 2016 (1472415036 + 86400*180) | +| 6 | 971400 | 0 | Aug 02 2017 (1501709789) | +| 7 | 1057027 | 0 | Dec 02 2017 (1512211236) | +| 8 | 1057058 | 0 | Aug 02 2018 (1533211200) | +| 9 | 1057778 | 0 | Aug 03 2018 (1533297600) | +| 10 | 1154318 | 0 | Feb 14 2019 (1550153694) | +| 11 | 1155038 | 0 | Feb 15 2019 (1550225678) | +| 12 | 1308737 | 0 | Sep 27 2019 (1569582000) | +| 13 | 1543939 | 0 | Sep 02 2020 (1599069376) | +| 14 | 1544659 | 0 | Sep 02 2020 (1599069377) | +| 15 | 1982800 | 0 | May 16 2022 (1652727000) | +| 16 | 1983520 | 0 | May 17 2022 (1652813400) | + +## Stagenet Hard-Forks [^stagenet-hfs] {#Stagenet-Hard-Forks} + +| Version | Height | Threshold | Finalized (timestamp) | +| ------- | ----------- | --------- | ------------------------ | +| 1 | 0[^v1-at-0] | 0 | Jul 04 2012 (1341378000) | +| 2 | 32000 | 0 | Mar 14 2018 (1521000000) | +| 3 | 33000 | 0 | Mar 15 2018 (1521120000) | +| 4 | 34000 | 0 | Mar 16 2018 (1521240000) | +| 5 | 35000 | 0 | Mar 18 2018 (1521360000) | +| 6 | 36000 | 0 | Mar 19 2018 (1521480000) | +| 7 | 37000 | 0 | Mar 21 2018 (1521600000) | +| 8 | 176456 | 0 | Sep 24 2018 (1537821770) | +| 9 | 177176 | 0 | Sep 24 2018 (1537821771) | +| 10 | 269000 | 0 | Feb 14 2019 (1550153694) | +| 11 | 269720 | 0 | Feb 15 2019 (1550225678) | +| 12 | 454721 | 0 | Oct 18 2019 (1571419280) | +| 13 | 675405 | 0 | Aug 23 2020 (1598180817) | +| 14 | 676125 | 0 | Aug 23 2020 (1598180818) | +| 15 | 1151000 | 0 | Jun 30 2022 (1656629117) | +| 16 | 1151720 | 0 | Jun 30 2022 (1656629118) | + +--- + +[^hardfork-class]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/hardfork.h> + +[^minor-v-0]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/hardfork.cpp#L47> + +[^minor-v-too-large]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/hardfork.cpp#L99> + +[^window-size]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/hardfork.h#L51> + +[^accepting-hfs]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/hardfork.cpp#L311> + +[^mainnet-hfs]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/hardforks/hardforks.cpp#L34> + +[^v1-at-0]: Monero C++ sets this to 1 even though the [genesis block](genesis_block.md) has a major version of 1. + +[^testnet-hfs]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/hardforks/hardforks.cpp#L80> + +[^stagenet-hfs]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/hardforks/hardforks.cpp#L107> diff --git a/books/protocol/src/consensus_rules/transactions.md b/books/protocol/src/consensus_rules/transactions.md new file mode 100644 index 00000000..944b53fc --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions.md @@ -0,0 +1,89 @@ +# Transaction Rules + +## Introduction + +This chapter does not include miner, coinbase, transactions as they are handled elsewhere, the rules for them are under [blocks](blocks.md) + +## Index + +1. [Miscellaneous Rules](#miscellaneous-rules) +2. [Input Rules](./transactions/inputs.md) +3. [Output Rules](./transactions/outputs.md) +4. [Unlock Time](./transactions/unlock_time.md) +5. [Ring Signatures](./transactions/ring_signatures.md) +6. [RingCT](./transactions/ring_ct.md) + +## Miscellaneous Rules + +### Version + +Version 0 is never allowed[^tx-v0]. + +The max transaction version is 1 up to hard fork 4 then the max is 2[^max-tx-version]. + +The minimum tx version is 1 up until version 6 then if the [number of un-mixable inputs](#minimum-decoys) +is 0 the minimum is 2 otherwise 1[^min-tx-version] so a version 1 transaction is allowed if the amount +it's spending does not have enough outputs with the same amount to mix with. + +### Transaction Size + +The size of the `transaction blob` must not be bigger than 1 million bytes[^tx-size-limit]. + +From v8, the transaction's _weight_ must not be bigger than half of the [block penalty free zone](./blocks/weights.md#penalty-free-zone) minus 600[^tx-weight_limit]. + +#### Calculating Transaction Weight + +For all transactions that don't use bulletproofs or bulletproofs+ the weight is just the length of the transaction blob.[^weight-pre-bp] + +For bulletproofs(+) transactions we add a "clawback" onto the transaction. + +To calculate the "clawback" we fist define a `bpBase` which is the size of a 2 output proof, normalized to 1 proof by dividing by 2[^bp-base]: + +for bulletproofs: \\(fields = 9\\) + +for bulletproofs+: \\(fields = 6\\) + +\\(bpBase = \frac{(32 * (fields + 7 * 2))}{2}\\) + +Next we calculate the size of the bulletproofs(+) field by first getting the first power of 2 above or equal to the number of outputs: `firstPower2AboveNumbOuts`. + +If `firstPower2AboveNumbOuts` is <= 2 then the \\(clawback = 0\\)[^fp2-less-than-2]. + +Next define the number of L and R elements[^lr-elements]: \\(nlr = firstPower2AboveNumbOuts + 6\\) + +now the size of the bulletproofs(+) field is[^bp+-size]: + +\\(bpSize = 32 * (fields + 2 * nlr)\\) + +now the `clawback` is[^clawback]: + +\\( clawback = \frac{(bpBase * firstPower2AboveNumbOuts - bpSize) * 4}{ 5} \\) + +To get the transaction weight now you just get the length of the transaction blob and add this `clawback`[^bp-tx-weight]. + +--- + +[^tx-v0]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/tx_pool.cpp#L152> + +[^max-tx-version]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3418> + +[^min-tx-version]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3425> + +[^tx-size-limit]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/cryptonote_core.cpp#L791> +and <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic_impl.cpp#L78> + +[^tx-weight_limit]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/tx_pool.cpp#L117> && <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/tx_pool.cpp#L221> + +[^weight-pre-bp]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L447-L453> + +[^bp-base]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L110C40-L110C40> + +[^fp2-less-than-2]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L112> + +[^lr-elements]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L117> + +[^bp+-size]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L118> + +[^clawback]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L122> + +[^bp-tx-weight]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L457> diff --git a/books/protocol/src/consensus_rules/transactions/inputs.md b/books/protocol/src/consensus_rules/transactions/inputs.md new file mode 100644 index 00000000..d8cb79f4 --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/inputs.md @@ -0,0 +1,160 @@ +# Transaction Inputs + +## Introduction + +These rules apply to transaction inputs, excluding miner transactions. + +## Index + +1. [Necessary Functions/Definitions](#functionsdefinitions) +2. [Rules](#rules) + +## Necessary Functions/Definitions + +### Default Minimum Decoys + +This is the default number of decoys an input must at least have. + +> There are exceptions to this being the minimum decoy size for all transactions. See further down in [Rules](#rules). + +| Hard-Fork | Minimum Decoys[^min-decoys] | +| --------- | --------------------------- | +| 1 | N/A | +| 2 to 5 | 2 | +| 6 | 4 | +| 7 | 6 | +| 8 to 14 | 10 | +| 15+ | 15 | + +### Minimum And Maximum Decoys Used + +To check a transaction input's `ring` size we must first get the minimum and maximum number of `decoys` +used in the transactions inputs[^min-max-decoys]. + +So if this was our transactions: + +| Input | 1 | 2 | 3 | +| --------- | -- | - | -- | +| Ring size | 12 | 8 | 16 | + +The minimum and maximum amount of decoys would be 7 and 15 respectively. + +### Mixable And Un-Mixable Inputs + +A mixable input is one that has enough outputs on the chain with the same amount to be able to build a ring with the +minimum amount of decoys needed. + +A ringCT input, aka an output with 0 amount, is always considered mixable[^0-amt-mixable]. + +For other inputs you first get the amount of outputs on chain with that amount and check if that's less than or equal +to the [default minimum amount of decoys](#default-minimum-decoys) if it is then the input is un-mixable otherwise it is +mixable[^check-mixability]. + +## Rules + +### No Empty Inputs + +The transaction must have at least 1 input[^no-empty-ins]. + +### No Empty decoys + +All inputs must have decoys[^empty-decoys]. + +### Input Type + +All inputs must be of type `txin_to_key`[^input-types]. + +### Inputs Must Not Overflow + +The inputs when summed must not overflow a `u64` and the outputs when summed must not either[^amount-overflow]. + +### Unique Ring Members + +From hard-fork 6, all ring members in an input must be unique, this is done by checking that +no `key_offset` after the first is 0[^unique-ring]. + +### Unique Key Image + +The key image must be unique in a transaction[^key-images-in-tx] and the whole chain [^key-images-in-chain]. + +### Torsion Free Key Image + +The key image must be a canonical prime order point[^torsion-free-keyimage]. + +### Minimum Decoys + +These rules are in effect from hard fork 2. + +First you get the [minimum number of decoys used in the transaction](#minimum-and-maximum-decoys-used). + +Then you get the [amount of mixable and un-mixable inputs](#mixable-and-unmixable-inputs). + +Now get the [default minimum decoys allowed for the current hard-fork](#default-minimum-decoys). + +If the minimum amount of decoys used in the transaction is less than the default minimum decoys allowed then the transaction is only +allowed if there is at least one input which is un-mixable[^tx-without-minimum-decoys]. + +If there is an un-mixable then the transaction is not allowed to have more than 1 mixable input as well. + +Special rules[^min-decoys-special-rules]: + +- For hard-fork 15, both 10 and 15 decoys are allowed. +- From hard-fork 8 upwards, the minimum amount of decoys used in a transaction must be equal to the minimum allowed. + +### Equal Number Of Decoys + +From hard-fork 12, all inputs must have the same number of decoys[^equal-decoys]. + +### Sorted Inputs + +From hard-fork 7, the inputs must be sorted by key image, in descending lexicographic order[^sorted-kis]. + +### 10 Block Lock + +From hard-fork 12, all ring members must be at least 10 blocks old[^minimum-out-age]. + +### The Output Must Exist + +The output a transaction references must exist in the chain[^output-must-exist]. + +### The Output Must Not Be Locked + +The outputs, which are referenced in the inputs, unlock time must have passed, see the [chapter on unlock time](./unlock_time.md). + +--- + +[^min-decoys]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3345> + +[^min-max-decoys]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3369-L3373> + +[^0-amt-mixable]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3357> + +[^check-mixability]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3361-L3367> + +[^no-empty-ins]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/cryptonote_core.cpp#L1125> + +[^empty-decoys]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3473> + +[^input-types]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L844> + +[^amount-overflow]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L871> + +[^unique-ring]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/cryptonote_core.cpp#L1309> + +[^key-images-in-tx]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/cryptonote_core.cpp#L1297> + +[^key-images-in-chain]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3475> + +[^torsion-free-keyimage]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/cryptonote_core.cpp#L1324> + +[^tx-without-minimum-decoys]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3392> + +[^min-decoys-special-rules]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3406-L3410> + +[^equal-decoys]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3378> + +[^sorted-kis]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3435> + +[^minimum-out-age]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3533> + +[^output-must-exist]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3995> diff --git a/books/protocol/src/consensus_rules/transactions/outputs.md b/books/protocol/src/consensus_rules/transactions/outputs.md new file mode 100644 index 00000000..da67bf19 --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/outputs.md @@ -0,0 +1,64 @@ +# Transaction Outputs + +## Introduction + +These rules apply to transaction outputs, excluding miner transaction outputs. + +## Rules + +### Outputs Must Not Overflow + +The outputs when summed must not overflow a u64[^amount-overflow]. + +### Output Amount + +For version 1, txs sum of the outputs must be less than the sum of the inputs, the difference between the +inputs and the outputs is then the fee.[^more-in-than-out] The amount of each output must also not be zero.[^zero-output] + +From hard-fork 2, version 1 transaction output amounts also must be validly decomposed[^decomposed-amounts]. +A valid decomposed amount is an amount contained in [this table](https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L52) + +For version 2, txs all outputs must have a zero amount.[^v2-output-amount] + +### Output Keys Canonical + +All output public keys must be `canonical points`[^output-key-canonical]. + +> This was added as a rule in hard-fork 4 but that check is redundant as it was done before that. +> So how did invalid keys get on the chain? [miner txs](./blocks/miner_tx.md). + +### Output Type + +The output type allowed depends on the hard-fork[^output-types]: + +| hard-fork | output type | +| ---------- | ------------------------------------ | +| 1 to 14 | txout_to_key | +| 15 | txout_to_key and txout_to_tagged_key | +| 16 onwards | txout_to_tagged_key | + +> For hard-fork 15, both are allowed but the transactions outputs must all be the same type[^same-output-type]. + +### 2 Outputs + +From hard-fork 12, version 2 transactions (RCT) must have 2 outputs[^minimum-2-outs]. + +--- + +[^amount-overflow]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L871> + +[^more-in-than-out]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/cryptonote_core.cpp#L1163> and <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/tx_pool.cpp#L190-L204> + +[^zero-output]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L862> + +[^decomposed-amounts]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3048> + +[^v2-output-amount]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_core/blockchain.cpp#L3059> + +[^output-key-canonical]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L865> + +[^output-types]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L960> + +[^same-output-type]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_format_utils.cpp#L984> + +[^minimum-2-outs]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_core/blockchain.cpp#L3324> diff --git a/books/protocol/src/consensus_rules/transactions/ring_ct.md b/books/protocol/src/consensus_rules/transactions/ring_ct.md new file mode 100644 index 00000000..b0471692 --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/ring_ct.md @@ -0,0 +1,89 @@ +# Ring Confidential Transactions + +## Introduction + +Ring confidential transactions are version 2 Monero transactions which keep amounts hidden. They were activated at hard-fork 4. There are multiple +types of RingCT transactions that were activated and deprecated at different hard-forks. + +## Definitions + +OutPK: +A pedersen commitment to the output amount. + +Pseudo-outs: +A pedersen commitment to the true spends amount with a different mask, such that the sum of the pseudo-outs is the same as the sum of the outPKs + fee * H. + +## Index + +1. [Rules That Apply To All Types](#rules-that-apply-to-all-types) +2. [Simple Types Rules](#simple-types-rules) +3. [Borromean Rules](./ring_ct/borromean.md) +4. [MLSAG Rules](./ring_ct/mlsag.md) +5. [Bulletproofs Rules](./ring_ct/bulletproofs.md) +6. [CLSAG Rules](./ring_ct/clsag.md) +7. [Bulletproofs+ Rules](./ring_ct/bulletproofs+.md) + +## Rules That Apply To All Types + +### Type + +RingCT type define the proofs used in the transaction, the ringCT types allowed depend on the hard-fork: + +| Type (Name) | Short description | Hard Fork allowed | Hard Fork disallowed | +| ---------------- | --------------------------------------------------------------------- | ---------------------------------------------------------- | --------------------------------------------------------------------- | +| 0 (NULL) | No ringCT signatures, used for coinbase transactions | 4 (only miner transactions) [^first-three-type-activation] | Still allowed | +| 1 (Full) | A single aggregate MLSAG signature with borromean range proofs | 4 [^first-three-type-activation] | 9 [^bulletproof-activated-borromean-disallowed] | +| 2 (Simple) | MLSAG signatures per input with borromean range proofs | 4 [^first-three-type-activation] | 9 [^bulletproof-activated-borromean-disallowed] | +| 3 (Bulletproof) | MLSAG signatures per input with a single bulletproof for all outputs | 8 [^bulletproof-activated-borromean-disallowed] | 11 [^bulletproof2-activated-bulletproof-disallowed] | +| 4 (Bulletproof2) | Uses the same signatures as type 3 | 10 [^bulletproof2-activated-bulletproof-disallowed] | 14 (except 2 transactions) [^clsag-activated-bulletproof2-disallowed] | +| 5 (CLSAG) | CLSAG signatures per input with a single bulletproof for all outputs | 13 [^clsag-activated-bulletproof2-disallowed] | 16 [^bulletproof+-activated-clsag-disallowed] | +| 6 (Bulletproof+) | CLSAG signatures per input with a single bulletproof+ for all outputs | 15 [^bulletproof+-activated-clsag-disallowed] | Still allowed | +| 6+ | Future type not currently allowed | Not allowed [^future-rct-types] | Not allowed | + +There are 2 type 4 RCT transactions that are allowed after hard-fork 13, this was due to a bug in which transactions added to the txpool before a fork +were not being checked for new fork rules they are: +`c5151944f0583097ba0c88cd0f43e7fabb3881278aa2f73b3b0a007c5d34e910` and `6f2f117cde6fbcf8d4a6ef8974fcac744726574ac38cf25d3322c996b21edd4c`[^grandfathered-txs]. + +### OutPKs Valid Points + +All outPKs must be canonically encoded points[^outPKs-valid-points]. + +## Simple Types Rules + +These rules apply to all RCT "simple" types, which are all except type "FULL". + +### Pseudo-outs Valid Points + +This rule applies to the pseudo-outs, from type 3 (Bulletproof) the pseudo-outs field moved to the prunable RCT section from the non-prunable section. + +The pseudo-outs must all be canonically encoded points[^pseudo-outs-valid-points]. + +### Pseudo-outs OutPKs Balance + +The sum of the pseudo-outs must equal the sum of the OutPKs + fee * H:[^simple-amounts-balance] + +\\(\sum PseudoOuts == \sum outPK + fee * H \\) + +--- + +[^first-three-type-activation]: There is no direct code allowing these types of RingCT, these are the original types that got activated when version 2 transactions +got activated + +[^bulletproof-activated-borromean-disallowed]: <https://github.com/monero-project/monero/blob/master/src/cryptonote_core/blockchain.cpp#L3083-L3107> + +[^bulletproof2-activated-bulletproof-disallowed]: <https://github.com/monero-project/monero/blob/master/src/cryptonote_core/blockchain.cpp#L3108-L3130> + +[^clsag-activated-bulletproof2-disallowed]: <https://github.com/monero-project/monero/blob/master/src/cryptonote_core/blockchain.cpp#L3132-L3166> + +[^bulletproof+-activated-clsag-disallowed]: <https://github.com/monero-project/monero/blob/master/src/cryptonote_core/blockchain.cpp#L3168-L3193> + +[^future-rct-types]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctTypes.h#L335> + +[^grandfathered-txs]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_core/blockchain.cpp#L3150> + +[^outPKs-valid-points]: For simple types: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L1444>, +For type FULL: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L829-L829> + +[^pseudo-outs-valid-points]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L1449> + +[^simple-amounts-balance]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L1453> diff --git a/books/protocol/src/consensus_rules/transactions/ring_ct/borromean.md b/books/protocol/src/consensus_rules/transactions/ring_ct/borromean.md new file mode 100644 index 00000000..a0c61c46 --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/ring_ct/borromean.md @@ -0,0 +1,51 @@ +# Borromean Rules + +## Introduction + +These rules apply to all ringCT types that use Borromean ring signatures to prove an output amount is in the correct range. + +## Rules + +### Number Of Borromean Range Proofs + +The amount of Borromean range proofs must be the same as the number of outputs.[^numb-borro] + +### Ci Valid Points + +Each Ci (bit commitment) must be canonically encoded points.[^ci-valid-points] + +### Sum Ci + +For a range proof at a certain index the sum of each Ci must equal the outPK at that index.[^sum-ci] + +### Borromean Scalar Encoding + +Monero does not check that the scalars `s0` and `s1` are reduced this leads to them, if not reduced, being interpreted as a different scalar by the `slide` function +which calculates the 5-NAF of the number. The `slide` function restricts its output to 256 bytes however if the last bit is set on the input this could lead to the +5-NAF of the scalar being 257 bytes long. There are scalars on the chain which have this behavior.[^scalar-report] + +The scalar `ee` must be a fully reduced scalar as it is compared against the raw bytes of an output from the `hash_to_scalar` function.[^s0-s1-ee-encoding] + +### The Borromean Ring Must Be Valid + +To verify a Borromean ring signature is valid you must first set up the public keys that the ring will be verified with, one member of the ring will be a Ci the +other will be (\\(Ci - H * 2^X \\)), where X is the index of the Ci. By setting up the ring like this the prover will only know the discreet log of a +ring member if either the Ci is a commitment to 0 or \\(2^X\\)[^public-key-setup]. + +After setting up the public keys the actual borromean rings must be valid.[^ring-valid] + +--- + +[^numb-borro]: <https://github.com/monero-project/monero/blame/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctTypes.h#L480> + +[^ci-valid-points]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L581> + +[^sum-ci]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L590> + +[^scalar-report]: <https://www.moneroinflation.com/static/data_py/report_scalars_df.pdf> + +[^s0-s1-ee-encoding]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L213-L222> + +[^public-key-setup]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L574-L577> + +[^ring-valid]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L208> diff --git a/books/protocol/src/consensus_rules/transactions/ring_ct/bulletproofs+.md b/books/protocol/src/consensus_rules/transactions/ring_ct/bulletproofs+.md new file mode 100644 index 00000000..57d037b4 --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/ring_ct/bulletproofs+.md @@ -0,0 +1,52 @@ +# Bulletproofs+ Rules + +## Introduction + +These rules apply to all ringCT types that use bulletproofs+. + +## Rules + +### L & R Length + +The Length of the L & R fields must be the same, they must both be equal to \\( 6 + log_2(firstPower2AboveNumbOuts) \\).[^L-R-Size] + +Where `firstPower2AboveNumbOuts` is the first power of 2 above or equal to the amount of outputs in the transaction, so: + +If outputs = 3, firstPower2AboveNumbOuts = 4. + +If outputs = 8, firstPower2AboveNumbOuts = 8. + +### Number Of Bulletproofs + +There must only be one bulletproof in a transaction.[^one-bulletproof+] + +### Max Outputs + +The amount of outputs in the transaction must not be more than 16 [^max-outputs] + +### Canonical Encoding + +`r1`, `s2`, `d1` must all be canonically encoded, reduced, scalars.[^scalars-reduced] All the points of `V`, `L` and `R` must be canonically encoded and `A1`, `B` and +`A` must canonically encoded points.[^canonical-points] + +### At Least One Output + +There must be at least one element of V, which is constructed from the outPKs which must have the same number of elements as the outputs.[^one-out] + +### The Bulletproof Must Be Valid + +The bulletproof must pass verification. [^bulletproof+-valid] + +[^L-R-Size]: <https://github.com/monero-project/monero/blob/master/src/ringct/rctTypes.cpp#L300-L304> && <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs_plus.cc#L850> + +[^one-bulletproof+]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L173> + +[^max-outputs]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_core/cryptonote_core.cpp#L887> + +[^scalars-reduced]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs_plus.cc#L825-L827> + +[^canonical-points]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs_plus.cc#L931-L939> && <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctOps.cpp#L415> + +[^one-out]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs_plus.cc#L829> + +[^bulletproof+-valid]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs_plus.cc#L799> diff --git a/books/protocol/src/consensus_rules/transactions/ring_ct/bulletproofs.md b/books/protocol/src/consensus_rules/transactions/ring_ct/bulletproofs.md new file mode 100644 index 00000000..83faf105 --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/ring_ct/bulletproofs.md @@ -0,0 +1,55 @@ +# Bulletproofs Rules + +## Introduction + +These rules apply to all ringCT types that use bulletproofs. + +## Rules + +### L & R Length + +The Length of the L & R fields must be the same, they must both be equal to \\( 6 + log_2(firstPower2AboveNumbOuts) \\).[^L-R-Size] + +Where `firstPower2AboveNumbOuts` is the first power of 2 above or equal to the amount of outputs in the transaction, so: + +If outputs = 3, firstPower2AboveNumbOuts = 4. + +If outputs = 8, firstPower2AboveNumbOuts = 8. + +### Number Of Bulletproofs + +There must only be one bulletproof in a transaction.[^one-bulletproof] + +### Max Outputs + +The amount of outputs in the transaction must not be more 16 [^max-outputs] + +### At Least One Output + +There must be at least one element of V, which is constructed from the outPKs which must have the same number of elements as the outputs.[^one-out] + +### Canonical Encoding + +`taux`, `mu`, `a`, `b`, `t` must all be fully reduced scalars[^canonical-scalars]. + +All the elements of `V`, `L`, `R` and `A`, `T1`, `T2` and `S` must all be valid, canonically encoded points.[^canonical-points] + +### The Bulletproof Must Be Valid + +The bulletproof must pass verification. [^bulletproof-valid] + +--- + +[^L-R-Size]: <https://github.com/monero-project/monero/blob/master/src/ringct/rctTypes.cpp#L300-L304> && <https://github.com/monero-project/monero/blob/master/src/ringct/bulletproofs.cc#L862-L863> + +[^one-bulletproof]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_basic/cryptonote_format_utils.cpp#L197> + +[^max-outputs]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/cryptonote_core/cryptonote_core.cpp#L877> + +[^one-out]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs.cc#L839> + +[^canonical-scalars]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs.cc#L833-L837> + +[^canonical-points]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs.cc#L919-L930> + +[^bulletproof-valid]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/bulletproofs.cc#L810> diff --git a/books/protocol/src/consensus_rules/transactions/ring_ct/clsag.md b/books/protocol/src/consensus_rules/transactions/ring_ct/clsag.md new file mode 100644 index 00000000..df7ef40d --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/ring_ct/clsag.md @@ -0,0 +1,41 @@ +# CLSAG Rules + +## Introduction + +These rules apply to all ringCT types that use CLSAG signatures. + +## Rules + +### Number Of CLSAGs + +There must be the same number of CLSAG signatures as there are inputs.[^numb-clsags] + +### `s` Size + +The `s` field must have has many elements as the amount of ring members.[^s-size] + +### Canonical Encoding + +All `s` scalars must be fully reduced, the `c1` scalar must be fully reduced[^scalars-reduced] and the `D` point must be canonically encoded.[^D-canonical] + +### Key Images Not Identity + +The key image and 8 * `D`, the commitment key image, must both not be the identity point.[^kis-not-identity] + +### The CLSAG Signature Must Be Correctly Formed + +The signature must be valid.[^clsag-valid] + +--- + +[^numb-clsags]: <https://github.com/monero-project/monero/blame/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctTypes.h#L496> + +[^s-size]: <https://github.com/monero-project/monero/blame/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L880> + +[^scalars-reduced]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L881-L883> + +[^D-canonical]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L894> + +[^kis-not-identity]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L895> and <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L884> + +[^clsag-valid]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L872> diff --git a/books/protocol/src/consensus_rules/transactions/ring_ct/mlsag.md b/books/protocol/src/consensus_rules/transactions/ring_ct/mlsag.md new file mode 100644 index 00000000..6ca97b65 --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/ring_ct/mlsag.md @@ -0,0 +1,123 @@ +# MLSAG Rules + +## Introduction + +These rules are split into 3 sections: Full, Simple and Both. Full is for RCT type Full and Simple are for the other RCT types +that use MLSAG signatures. + +> Simple is not just for RCT type Simple! + +## Index + +1. [Full Rules](#full-rules) +2. [Simple Rules](#simple-rules) + +## Full Rules + +### Creating The Ring Matrix (Full) + +For RCT type full the ring matrix contains every inputs ring members: [^full-matrix] + +(The signer owns a whole column) + +```bob + .-------.-------.-------.- - - -. + | I1 R1 | I1 R2 | I1 R3 | ..... | + | I2 R1 | I2 R2 | I2 R3 | ..... | + | I3 R1 | I3 R2 | I3 R3 | ..... | + ..... ..... ..... ..... + | A | A | A | ..... | <-. + '-------'-------'-------'-------' | + | +I = Input | +R = Ring member | +A = Pedersen Commitment | +``` + +The last row contains: \\(\sum CommitmentsAtIndex - \sum outPK - fee * H \\) [^full-last-row] + +Where CommitmentsAtIndex are the ring members commitments in that column. + +Which means that for the true spends column the entry in the last row will be commitment to 0. + +By structuring the matrix like this the true spend has to be a the same index in each inputs ring, +which is not good for privacy. + +### Number Of Ring Members + +There must be the same amount of ring members in each inputs ring.[^full-numb-ring-members] + +### One MLSAGs + +There must be only one MLSAG signature.[^numb-mlsags] + +## Simple Rules + +### Creating The Ring Matrix (Simple) + +For simple RCT types the ring matrix only contains the ring members of a single input: [^simple-matrix] + +```bob + .-------.-------.-------.- - - -. + | IX R1 | IX R2 | IX R3 | ..... | + | A | A | A | ..... | <-. + '-------'-------'-------'- - - -' | + | +I = Input | +R = Ring member | +A = Pedersen Commitment | +``` + +The last row contains the ring members commitment minus the pseudo-out for this input.[^simple-last-row] + +### Simple Number Of MLSAGs + +There must be the same amount of MLSAG signatures as there are inputs.[^numb-mlsags] + +## Rules That Apply To Both + +### More Than One Ring Member + +There must be more than one ring member.[^more-than-one-ring-member] + +### SS Size + +The ss field must be the same length as the key matrix[^ss-size] and each ss member lengths must be the same as the matrix's rows. [^ss-member-size] + +### SS, CC Canonical Encoding + +Every ss element and cc must be fully reduced scalars.[^ss-cc-reduced] + +### Key Images Not Identity + +All the key images must not be equal to the identity point.[^ki-not-identity] + +### The MLSAG Signature Must Be Correct + +The signature must be valid.[^mlsag-valid] + +--- + +[^full-matrix]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L802> + +[^full-last-row]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L827-L833> + +[^full-numb-ring-members]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L810> + +[^numb-mlsags]: <https://github.com/monero-project/monero/blame/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctTypes.h#L537-L540C28>s + +[^simple-matrix]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L841> + +[^simple-last-row]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L861-L864> + +[^more-than-one-ring-member]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L462> + +[^ss-size]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L469> + +[^ss-member-size]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L471> + +[^ss-cc-reduced]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L477-L480> + +[^ki-not-identity]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L487> + +[^mlsag-valid]: <https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/src/ringct/rctSigs.cpp#L460> diff --git a/books/protocol/src/consensus_rules/transactions/ring_signatures.md b/books/protocol/src/consensus_rules/transactions/ring_signatures.md new file mode 100644 index 00000000..e93b5f8c --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/ring_signatures.md @@ -0,0 +1,39 @@ +# Transaction Version 1 Rules + +## Introduction + +These rules apply only to version 1, pre-ringCT, transactions. + +## Rules + +### Amount Of Ring Signatures + +The amount of ring signatures must be the same as the number of inputs[^amt-of-ring-sigs]. + +### Amount Of Signatures In A Ring + +For a ring signature at a certain index, the input at that same index must have the same amount of ring members as the ring signature has signatures[^amt-of-sigs]. + +### Signatures Must Be Canonical + +Every signatures c and r value must be `canonical scalars`[^canonical-sig]. + +### Ring Members Must Be Valid Points + +All outputs used as ring members must be valid canonical points[^valid-members]. + +### The Ring Signature Must Be Valid + +The ring signature must be correctly formed[^ring-sig-correct]. + +--- + +[^amt-of-ring-sigs]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3485> and <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic.h#L266> + +[^amt-of-sigs]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3999> and <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_basic/cryptonote_basic.h#L271-L282> + +[^canonical-sig]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/crypto/crypto.cpp#L735> + +[^valid-members]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/crypto/crypto.cpp#L738> + +[^ring-sig-correct]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/crypto/crypto.cpp#L711> diff --git a/books/protocol/src/consensus_rules/transactions/unlock_time.md b/books/protocol/src/consensus_rules/transactions/unlock_time.md new file mode 100644 index 00000000..f466d1b7 --- /dev/null +++ b/books/protocol/src/consensus_rules/transactions/unlock_time.md @@ -0,0 +1,68 @@ +# Unlock Time + +To spend an output the output's unlock time must have passed. + +## Interpreting An Unlock Time + +The unlock time is just a 64 bit unsigned number. It is interpreted as a block height if less than 500,000,000 otherwise it's a Unix timestamp[^interpreting-unlock-time]. + +## Checking The Output Is Unlocked + +### Block Height + +First you get the top blocks height and add one, we do this because we are checking if +the transaction is allowed in the next block not the last. + +We now check if this height is greater than or equal to the unlock time if it is then +accept the block[^height-accepting]. + +### Timestamp + +#### Getting The Current Time + +Before hard-fork 13, this was done by just getting the computer's time, from hf 13 onwards, we use +an average over the last blocks[^getting-time]. + +Monero uses the last 60 blocks to get an average, if the `chain height` is less than +60, just use the current time[^height-less-60]. + +First you get the median timestamp of the last 60 blocks. We then project this +timestamp to match approximately when the block being validated will appear, to do +this we do[^median-timestamp]: + +\\(adjustedMedian = median + \frac{(TimestampWindow + 1) * DifficultyTarget}{2} \\) + +where: + +\\(TimestampWindow = 60\\) + +\\(DifficultyTarget = 120\\) + +You then get the top block's timestamp and add the target seconds per block[^adjusting-top-block]. + +The timestamp we use is then the minimum out of the adjusted median and adjusted most +recent timestamp[^minimum-timestamp]. + +### Checking Timestamp Has Passed + +Now with our timestamp we add the [target seconds](../blocks/difficulty.md#target-seconds) +per block and check if this is more than or equal to the unlock +time[^checking-timestamp]. + +--- + +[^interpreting-unlock-time]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3921> + +[^height-accepting]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3925> + +[^getting-time]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3933> + +[^height-less-60]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4011> + +[^median-timestamp]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4024-L4028> + +[^adjusting-top-block]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4032> + +[^minimum-timestamp]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L4036> + +[^checking-timestamp]: <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/cryptonote_core/blockchain.cpp#L3934> diff --git a/books/protocol/src/p2p_network.md b/books/protocol/src/p2p_network.md new file mode 100644 index 00000000..e0d9a799 --- /dev/null +++ b/books/protocol/src/p2p_network.md @@ -0,0 +1,3 @@ +# P2P Network + +This chapter contains descriptions of Monero's peer to peer network, including messages, flows, expected responses, etc. diff --git a/books/protocol/src/p2p_network/epee.md b/books/protocol/src/p2p_network/epee.md new file mode 100644 index 00000000..2f8161d8 --- /dev/null +++ b/books/protocol/src/p2p_network/epee.md @@ -0,0 +1,3 @@ +# Epee Binary Format + +The epee binary format is described here: TODO diff --git a/books/protocol/src/p2p_network/levin.md b/books/protocol/src/p2p_network/levin.md new file mode 100644 index 00000000..de746606 --- /dev/null +++ b/books/protocol/src/p2p_network/levin.md @@ -0,0 +1,68 @@ +# Levin Protocol + +This chapter describes the levin protocol. + +## Buckets + +A Bucket is a single piece of data that the levin protocol parser can decode, it will contain a p2p message or it will be part of a chain +of buckets that will be combined into a single message. + +### Bucket Format + +| Field | Type | Size (bytes) | +| ------ | ----------------------------- | ------------ | +| Header | [BucketHeader](#bucketheader) | 33 | +| Body | bytes | dynamic | + +### BucketHeader + +Format: + +| Field | Type | Size (bytes) | +| ---------------- | ------ | ------------ | +| Signature | LE u64 | 8 | +| Size | LE u64 | 8 | +| Expect Response | bool | 1 | +| Command | LE u32 | 4 | +| Return Code | LE i32 | 4 | +| Flags | LE u32 | 4 | +| Protocol Version | LE u32 | 4 | + +#### Signature + +The signature field is fixed for every bucket and is used to tell apart peers running different protocols. + +Its value should be `0x0101010101012101` + +#### Size + +This field represents the size of the buckets body. + +#### Expect Response + +Messages with the expect response field set must be responded to in order, other messages are still allowed in between responses. + +#### Command + +This field is an identifier for what specific message the bucket's body contains. + +#### Return Code + +This field represents the status of the response from the peer, requests and notifications should set this to `0` and successful +responses should be `1`. + +#### Flags + +This is a bit-flag field that determines what type of bucket this is: + +| Type | Bits set | +| -------------- | ----------- | +| Request | `0000_0001` | +| Response | `0000_0010` | +| Start Fragment | `0000_0100` | +| End Fragment | `0000_1000` | +| Dummy | `0000_1100` | + +#### Protocol Version + +This is a fixed value of 1. diff --git a/books/protocol/src/p2p_network/messages.md b/books/protocol/src/p2p_network/messages.md new file mode 100644 index 00000000..c3f18287 --- /dev/null +++ b/books/protocol/src/p2p_network/messages.md @@ -0,0 +1,37 @@ +# P2P Messages + +This chapter contains every P2P message. + +## Index + +## Types + +Types used in multiple P2P messages. + +### Support Flags + +Support flags specify any protocol extensions the peer supports, currently only the first bit is used: + +`FLUFFY_BLOCKS = 1` - for if the peer supports receiving fluffy blocks. + +### Basic Node Data + +| Fields | Type (Epee Type) | Description | +| ---------------------- | ------------------------------------- | ---------------------------------------------------------------------------------------- | +| `network_id` | A UUID (String) | A fixed constant value for a specific network (mainnet,testnet,stagenet) | +| `my_port` | u32 (u32) | The peer's inbound port, if the peer does not want inbound connections this should be `0` | +| `rpc_port` | u16 (u16) | The peer's RPC port, if the peer does not want inbound connections this should be `0` | +| `rpc_credits_per_hash` | u32 (u32) | TODO | +| `peer_id` | u64 (u64) | A fixed ID for the node, set to 1 for anonymity networks | +| `support_flags` | [support flags](#support-flags) (u32) | Specifies any protocol extensions the peer supports | + +## Messages + +### Handshake Requests + +levin command: 1001 + +| Fields | Type (Epee Type) | Description | +| ----------- | -------------------------------------------- | ----------- | +| `node_data` | [basic node data](#basic-node-data) (Object) | | +| | | | diff --git a/books/protocol/src/pruning.md b/books/protocol/src/pruning.md new file mode 100644 index 00000000..fe444a8a --- /dev/null +++ b/books/protocol/src/pruning.md @@ -0,0 +1,373 @@ +# Pruning + +Monero pruning works by having 8 possible pruning seeds, the seed chosen will decide what part of the blockchain's signing data your node will keep. Each pruned peer generates their pruning seed randomly. + +## Stripes + +This is the amount of different blockchain portions that a pruned peer could keep. For Monero this is currently 8, this means the blockchain's signing data is split into 8 portions. + +## Stripes Size + +Depending on your stripe (and therefore your seed) `monerod` will store, in a cyclic manner, a portion of blocks while discarding the ones that are out of your stripe. The stripe's size is amount of blocks before another stripe will have to store their portion of blocks, it is set at 4096. That means that in terms of a block's height, the first pruning stripe will store blocks 0 to 4095, the second stripes will store blocks 4096 to 8191, the third stripe will store blocks 8192 to 12288... etc. While a specific stripe is storing a portion of the blockchain, nodes with another stripe can just discard them. This is shown in the table below: + +| stripe | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | +| ---------------- | ------------- | ----------- | ------------ | -- | -- | -- | -- | -- | +| will have blocks | 0 - 4095 | 4096 - 8191 | 8192 - 12287 | .. | .. | .. | .. | .. | +| | 32768 - 36863 | .. | .. | .. | .. | .. | .. | .. | +| | .. | .. | .. | .. | .. | .. | .. | .. | + +## Tip Blocks + +Blocks within 5500 of the tip of the chain will not be pruned. + +## Generating Pruning Seeds + +The function in Monero to generate pruning seeds: + +```c++ +uint32_t make_pruning_seed(uint32_t stripe, uint32_t log_stripes) +{ + CHECK_AND_ASSERT_THROW_MES(log_stripes <= PRUNING_SEED_LOG_STRIPES_MASK, "log_stripes out of range"); + CHECK_AND_ASSERT_THROW_MES(stripe > 0 && stripe <= (1ul << log_stripes), "stripe out of range"); + return (log_stripes << PRUNING_SEED_LOG_STRIPES_SHIFT) | ((stripe - 1) << PRUNING_SEED_STRIPE_SHIFT); +} +``` + +This function takes in a stripe which is number 1 to 8 including(1 & 8) and a log_stripes which is log2 of the amount of different stripes (8) which is 3. + +The constants used in this function: + +```c++ +static constexpr uint32_t PRUNING_SEED_LOG_STRIPES_SHIFT = 7; +static constexpr uint32_t PRUNING_SEED_LOG_STRIPES_MASK = 0x7; +static constexpr uint32_t PRUNING_SEED_STRIPE_SHIFT = 0; +``` + +The possible inputs/outputs of this function (`log_stripes` is always 3) + +| input (stripe) | output (seed) | +| -------------- | ------------- | +| 1 | 384 | +| 2 | 385 | +| 3 | 386 | +| 4 | 387 | +| 5 | 388 | +| 6 | 389 | +| 7 | 390 | +| 8 | 391 | + +## Getting A Seed's Log Stripes + +Monero currently only accepts a log stripes value of 3 and will reject any peers that use a different value. The function to calculate a seed's log stripes is: + +```c++ +constexpr inline uint32_t get_pruning_log_stripes(uint32_t pruning_seed) { + return (pruning_seed >> PRUNING_SEED_LOG_STRIPES_SHIFT) & PRUNING_SEED_LOG_STRIPES_MASK; +} +``` + +This will only return 3 for all currently valid Monero seeds. + +## Getting A Seed's Pruning Stripe + +The seed's pruning stripe corresponds, as explain earlier, to the range of blocks we keep. This is the function that gets the stripe from the pruning seed: + +```c++ +inline uint32_t get_pruning_stripe(uint32_t pruning_seed) { + if (pruning_seed == 0) return 0; + return 1 + ((pruning_seed >> PRUNING_SEED_STRIPE_SHIFT) & PRUNING_SEED_STRIPE_MASK); } +``` + +A pruning seed of 0 means no pruning. This function is just the inverse of [Generating Pruning Seeds](#generating-pruning-seeds) so the inputs/outputs of this will just be the other way round. + +## Getting A Block's Pruning Stripe + +A Block's pruning stripe is the stripe that corresponds to keeping that block so for blocks 0 to 4095 this will be 1, for blocks 4096 to 8191 this will be 2. +The function in Monero to get the pruning stripe that corresponds to keeping that block is: + +```c++ +uint32_t get_pruning_stripe(uint64_t block_height, uint64_t blockchain_height, uint32_t log_stripes) +{ + if (block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height) + return 0; + return ((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & (uint64_t)((1ul << log_stripes) - 1)) + 1; +} +``` + +[Pruning Stripe Size](#stripes-size) + +This function takes in a number (`block_height`) and outputs a number 0 to 8. Zero is a special case for if the block_height is within Tip Blocks, this means every seed should keep this block. For 1 to 8 the output will rotate every 4096 so if I input 0 the output is 1 and if I input 4096 the output is 2 and so +on... + +#### explaining what the function is doing in depth: + +As you can see, this function first checks if the block_height is within Tip Blocks and returns 0, because every seed will have this block. + +`((1ul << log_stripes) - 1)` This sets the last 3 bits: `0000 0111` so when we bitand we +remove every other bit. + +`(block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE)`: + +- for any block 0 to 4095 dividing by 4096 will output 0 (stripe: 1) +- for any block 4096 to 8191 dividing by 4096 will output 1 (stripe: 2) +- for any blocks 32768 to 36863 dividing by 4096 will output 8 (stripe: 1) + +Here's an issue, we need the strips to be cyclic. A result of 8 should give an output of 1, and a result of 455 should give an output of 5. +To do so we just use the modulo operation. (8 mod 8 = 1, 455 mod 8 = 5) In binary operation, if the divisor is a power of two, then this is +equivalent to bitand the value with the divisor -1: + +This is why if we bitand this with 7 (0000 0111), this then becomes: + +- 0 to 4095 would be 0 +- 4096 to 8191 would be 1 +- 32768 to 36863 would be 0 + +now we are close, all we have to do now to get the stripe is add 1 + +## Getting A Block's Pruning Seed + +The Block's pruning seed is the seed that will keep that block. This is the function in Monero: + +```c++ +uint32_t get_pruning_seed(uint64_t block_height, uint64_t blockchain_height, uint32_t log_stripes) +{ + const uint32_t stripe = get_pruning_stripe(block_height, blockchain_height, log_stripes); + if (stripe == 0) + return 0; + return make_pruning_seed(stripe, log_stripes); +} +``` + +This is simple, a call to [`get_pruning_stripe`](#getting-a-blocks-pruning-stripe) and passing that stripe into [`make_pruning_seed`](#generating-pruning-seeds) + +## Getting The Next Un-pruned Block + +For a particular seed and block height we can calculate what the height of the next un-pruned block will +be. The function to do this in Monero is: + +```c++ +uint64_t get_next_unpruned_block_height(uint64_t block_height, uint64_t blockchain_height, uint32_t pruning_seed) +{ + CHECK_AND_ASSERT_MES(block_height <= CRYPTONOTE_MAX_BLOCK_NUMBER+1, block_height, "block_height too large"); + CHECK_AND_ASSERT_MES(blockchain_height <= CRYPTONOTE_MAX_BLOCK_NUMBER+1, block_height, "blockchain_height too large"); + const uint32_t stripe = get_pruning_stripe(pruning_seed); + if (stripe == 0) + return block_height; + if (block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height) + return block_height; + const uint32_t seed_log_stripes = get_pruning_log_stripes(pruning_seed); + const uint64_t log_stripes = seed_log_stripes ? seed_log_stripes : CRYPTONOTE_PRUNING_LOG_STRIPES; + const uint64_t mask = (1ul << log_stripes) - 1; + const uint32_t block_pruning_stripe = ((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & mask) + 1; + if (block_pruning_stripe == stripe) + return block_height; + const uint64_t cycles = ((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) >> log_stripes); + const uint64_t cycle_start = cycles + ((stripe > block_pruning_stripe) ? 0 : 1); + const uint64_t h = cycle_start * (CRYPTONOTE_PRUNING_STRIPE_SIZE << log_stripes) + (stripe - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE; + if (h + CRYPTONOTE_PRUNING_TIP_BLOCKS > blockchain_height) + return blockchain_height < CRYPTONOTE_PRUNING_TIP_BLOCKS ? 0 : blockchain_height - CRYPTONOTE_PRUNING_TIP_BLOCKS; + CHECK_AND_ASSERT_MES(h >= block_height, block_height, "h < block_height, unexpected"); + return h; +} +``` + +As you can see this is a monstrous function + +#### explaining what the function is doing in depth: + +```c++ +const uint32_t stripe = get_pruning_stripe(pruning_seed); +if (stripe == 0) + return block_height; +if (block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height) + return block_height; +``` + +This is calculating the [stripe](#getting-a-seeds-pruning-stripe) of the inputted pruning seed, remember if the seed/stripe is `0` that means no pruning so we can return the current +height as the next un-pruned height and similarly if the block's height is within [Tip Blocks](#tip-blocks) of the blockchain's height that also means the block won't be pruned. + +```c++ +const uint32_t seed_log_stripes = get_pruning_log_stripes(pruning_seed); +const uint64_t log_stripes = seed_log_stripes ? seed_log_stripes : CRYPTONOTE_PRUNING_LOG_STRIPES; +const uint64_t mask = (1ul << log_stripes) - 1; +``` + +This is calculating the [log stripes](#getting-a-seeds-log-stripes) of the seed, although Monero currently only allows a log stripes of 3 in the future a higher number could be allowed so this function accounts for that. + +If the seed's log stripes are zero this will set it to `CRYPTONOTE_PRUNING_LOG_STRIPES` which is currently `3`. + +Then this sets the value of `mask` to one less than the amount of [stripes](#stripes), for Monero the amount of stripes is 8 so `mask` will be 7. + +```c++ +const uint32_t block_pruning_stripe = ((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & mask) + 1; +if (block_pruning_stripe == stripe) + return block_height; +``` + +This calculates the [block's pruning stripe](#getting-a-blocks-pruning-stripe) using the same method that we saw in [this](#getting-a-blocks-pruning-stripe) function. + +This then checks if the block's stripe is the same as the seed stripe, if you remember if a seed and block have the same stripe that means the seed will keep the block, so we can just return the entered `block_height`. + +```c++ +const uint64_t cycles = ((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) >> log_stripes); +``` + +This calculates how many cycles of this table we have done: + +| stripe | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | +| -------- | ------------- | ------------ | ------------ | -- | -- | -- | -- | -- | +| cycle 0: | 0 - 4095 | 4096 - 8,191 | 8192 - 12287 | .. | .. | .. | .. | .. | +| cycle 1: | 32768 - 36863 | .. | .. | .. | .. | .. | .. | .. | +| cycle 2: | .. | .. | .. | .. | .. | .. | .. | .. | +| | .. | | | | | | | | + +If we think about what this is doing, this makes sense: + +## \\(cycles = \frac{block height}{CRYPTONOTE PRUNING STRIPE SIZE} * \frac{1}{2^{log stripes}} \\) + +for normal Monero pruning this is the same as: + +## \\(cycles = \frac{block height}{4096 * 2^{3}} = \frac{block height}{32768}\\) + +```c++ +const uint64_t cycle_start = cycles + ((stripe > block_pruning_stripe) ? 0 : 1); +``` + +This checks if we are a past our seeds stripe in a cycle and if we are past it we add +one to the number of cycles to get `cycles_start` which is the start of the cycle our +stripe will next be storing blocks in. + +```c++ +const uint64_t h = cycle_start * (CRYPTONOTE_PRUNING_STRIPE_SIZE << log_stripes) + (stripe - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE; +``` + +If you remember from the table [here](#stripes-size) each stripe will keep a part of the blockchain in a cyclic manner, which replates every 32,768. + +- so stripe 1 will keep `numb_of_cycles * 32768 + 0 * 4096` +- so stripe 2 will keep `numb_of_cycles * 32768 + 1 * 4096` +- so stripe 3 will keep `numb_of_cycles * 32768 + 2 * 4096` + +Each stripe will stop keeping blocks at one less than the next stripes start. + +This can be formalized into the equation: + +`numb_of_cycles * blocks_in_a_cycle + (stripe - 1) * stripe_size` + +which also equals: + +`numb_of_cycles * (stripe_size * amt_of_stripes) + (stripe - 1) * stripe_size` + +Knowing this, let's split this into 2 parts: + +#### Part 1: + +```c++ +cycle_start * (CRYPTONOTE_PRUNING_STRIPE_SIZE << log_stripes) +``` + +This gets the block height at the start of the `cycle_start` cycle, so if `cycle_start` was: + +- `0` the height would be `0` +- `1` the height would be `32768` +- `2` the height would be `65536` + +Which is: `numb_of_cycles * blocks_in_a_cycle`.</br> +For normal Monero pruning: `numb_of_cycles * (4096 * 8)` + +#### Part 2: + +```c++ +(stripe - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE +``` + +This gets how many blocks from the start of a cycle until the seeds stripe starts. + +For example if the seed's stripe was: + +- `1` the amount of blocks would be `0` +- `2` the amount of blocks would be `4096` +- `3` the amount of blocks would be `8192` + +which is: `(stripe-1) * stripe_size` + +As you can see if we add the amount of blocks until the start of a cycle (`numb_of_cycles * blocks_in_a_cycle`) to the amount of blocks into a cycle the until the seed's stripe "kicks in" (`(stripe-1) * stripe_size`) we will get the next un-pruned height. + +```c++ +if (h + CRYPTONOTE_PRUNING_TIP_BLOCKS > blockchain_height) + return blockchain_height < CRYPTONOTE_PRUNING_TIP_BLOCKS ? 0 : blockchain_height - CRYPTONOTE_PRUNING_TIP_BLOCKS; +CHECK_AND_ASSERT_MES(h >= block_height, block_height, "h < block_height, unexpected"); +return h; +``` + +We now have to check if the height we calculated is above the [tip blocks](#tip-blocks), if it is we get the starting height of the tip blocks and return that or if it isn't over +the tip blocks we can just return the calculated height. Yay, we are done! + +## Getting The Next Pruned Block + +For a particular seed and block height we can calculate what the height of the next pruned block will +be. The function to do this in Monero is: + +```c++ +uint64_t get_next_pruned_block_height(uint64_t block_height, uint64_t blockchain_height, uint32_t pruning_seed) +{ + const uint32_t stripe = get_pruning_stripe(pruning_seed); + if (stripe == 0) + return blockchain_height; + if (block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height) + return blockchain_height; + const uint32_t seed_log_stripes = get_pruning_log_stripes(pruning_seed); + const uint64_t log_stripes = seed_log_stripes ? seed_log_stripes : CRYPTONOTE_PRUNING_LOG_STRIPES; + const uint64_t mask = (1ul << log_stripes) - 1; + const uint32_t block_pruning_seed = ((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & mask) + 1; + if (block_pruning_seed != stripe) + return block_height; + const uint32_t next_stripe = 1 + (block_pruning_seed & mask); + return get_next_unpruned_block_height(block_height, blockchain_height, tools::make_pruning_seed(next_stripe, log_stripes)); +} +``` + +#### explaining what the function is doing in depth: + +```c++ +const uint32_t stripe = get_pruning_stripe(pruning_seed); +if (stripe == 0) + return blockchain_height; +if (block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height) + return blockchain_height; +``` + +This is calculating the [stripe](#getting-a-seeds-pruning-stripe) of the inputted pruning seed, remember if the seed/stripe is `0` that means no pruning so we can return the blockchain height as the next un-pruned height and similarly if the block's height is within [Tip Blocks](#tip-blocks) of the blockchain's height that also means the block won't be pruned. + +Returning the blockchain's height means the next pruned block doesn't currently exist, its bigger than or equal to blockchain_height - CRYPTONOTE_PRUNING_TIP_BLOCKS or it means it +will never exist in the case of a zero pruning seed. + +```c++ +const uint32_t seed_log_stripes = get_pruning_log_stripes(pruning_seed); +const uint64_t log_stripes = seed_log_stripes ? seed_log_stripes : CRYPTONOTE_PRUNING_LOG_STRIPES; +const uint64_t mask = (1ul << log_stripes) - 1; +``` + +This is calculating the [log stripes](#getting-a-seeds-log-stripes) of the seed, although Monero currently only allows a log stripes of 3 in the future a higher number could be allowed so this function accounts for that. + +If the seed's log stripes are zero this will set it to `CRYPTONOTE_PRUNING_LOG_STRIPES` which is currently `3`. + +Then this sets the value of `mask` to one less than the amount of [stripes](#stripes), for Monero the amount of stripes is 8 so `mask` will be 7. + +```c++ +const uint32_t block_pruning_seed = ((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & mask) + 1; +if (block_pruning_seed != stripe) + return block_height; +``` + +> There is a typo here it should be block_pruning_stripe, think of this as foreshadowing what we are about to do + +This calculates the [blocks pruning ~~seed~~ STRIPE](#getting-a-blocks-pruning-stripe) using the same method that we saw in [this](#getting-a-blocks-pruning-stripe) function. + +This then checks if the block's stripe is NOT the same as the seed stripe, if you remember if a seed and block don't have the same stripe that means the seed will prune that block, so we can just return the entered `block_height`. + +```c++ +const uint32_t next_stripe = 1 + (block_pruning_seed & mask); +return get_next_unpruned_block_height(block_height, blockchain_height, tools::make_pruning_seed(next_stripe, log_stripes)); +``` + +Because the seed's stripe == the block's stripe we need to work out when our stripe ends (when the next stripe starts to get the next pruned block). To do this we can simply calculate the next stripe, make a [new pruning seed](#generating-pruning-seeds) and pass in that seed, which has a stripe one more than ours, into [get next un-pruned block](#getting-the-next-unpruned-block) to get the start of the next stripe's un-pruned set and therefore the start of our next pruned set. diff --git a/books/protocol/svgbob.css b/books/protocol/svgbob.css new file mode 100644 index 00000000..c5c1be64 --- /dev/null +++ b/books/protocol/svgbob.css @@ -0,0 +1,4 @@ +/* Ensure text is legible in all themes. */ +svg text { + fill: var(--fg); +} \ No newline at end of file From 4b93dbec4c235803342dc3da0163257a1bf22feb Mon Sep 17 00:00:00 2001 From: hinto-janai <hinto.janai@protonmail.com> Date: Sun, 23 Jun 2024 21:30:47 -0400 Subject: [PATCH 07/11] workspace: enforce crate/directory naming scheme (#164) * rename all directories and crates * fix all `use` * fix doc link * `dandelion/` -> `dandelion-tower/` * fix epee-encoding test * fix `json-rpc` * fix pruning * crate import fixes * fix leftover merge conflicts * fix `epee-encoding` --- Cargo.lock | 304 +++++++++--------- Cargo.toml | 16 +- consensus/fast-sync/Cargo.toml | 2 +- consensus/rules/Cargo.toml | 2 +- consensus/rules/src/blocks.rs | 2 +- cryptonight/Cargo.toml | 2 +- net/epee-encoding/Cargo.toml | 6 +- net/epee-encoding/src/lib.rs | 14 +- net/epee-encoding/src/macros.rs | 42 +-- net/epee-encoding/src/value.rs | 2 +- net/epee-encoding/src/varint.rs | 4 +- net/epee-encoding/tests/alt_name.rs | 2 +- net/epee-encoding/tests/duplicate_key.rs | 2 +- net/epee-encoding/tests/epee_default.rs | 2 +- net/epee-encoding/tests/flattened.rs | 2 +- net/epee-encoding/tests/options.rs | 2 +- net/epee-encoding/tests/p2p.rs | 2 +- net/epee-encoding/tests/rpc.rs | 2 +- net/epee-encoding/tests/seq.rs | 2 +- net/epee-encoding/tests/stack_overflow.rs | 2 +- net/fixed-bytes/Cargo.toml | 2 +- net/levin/Cargo.toml | 2 +- net/levin/tests/fragmented_message.rs | 2 +- net/{monero-wire => wire}/Cargo.toml | 10 +- net/{monero-wire => wire}/src/lib.rs | 6 +- .../src/network_address.rs | 4 +- .../src/network_address/epee_builder.rs | 27 +- net/{monero-wire => wire}/src/p2p.rs | 17 +- net/{monero-wire => wire}/src/p2p/admin.rs | 18 +- net/{monero-wire => wire}/src/p2p/common.rs | 20 +- net/{monero-wire => wire}/src/p2p/protocol.rs | 19 +- p2p/address-book/Cargo.toml | 8 +- p2p/address-book/src/book.rs | 4 +- p2p/address-book/src/book/tests.rs | 4 +- p2p/address-book/src/lib.rs | 6 +- p2p/address-book/src/peer_list.rs | 4 +- p2p/address-book/src/peer_list/tests.rs | 6 +- p2p/address-book/src/store.rs | 2 +- p2p/async-buffer/Cargo.toml | 2 +- p2p/async-buffer/tests/basic.rs | 2 +- p2p/{dandelion => dandelion-tower}/Cargo.toml | 2 +- .../src/config.rs | 0 p2p/{dandelion => dandelion-tower}/src/lib.rs | 0 .../src/pool.rs | 0 .../src/router.rs | 0 .../src/tests/mod.rs | 0 .../src/tests/pool.rs | 0 .../src/tests/router.rs | 0 .../src/traits.rs | 0 p2p/{monero-p2p => p2p-core}/Cargo.toml | 8 +- p2p/{monero-p2p => p2p-core}/src/client.rs | 2 +- .../src/client/connection.rs | 8 +- .../src/client/connector.rs | 0 .../src/client/handshaker.rs | 6 +- .../src/client/timeout_monitor.rs | 3 +- p2p/{monero-p2p => p2p-core}/src/constants.rs | 0 p2p/{monero-p2p => p2p-core}/src/error.rs | 2 +- p2p/{monero-p2p => p2p-core}/src/handles.rs | 0 p2p/{monero-p2p => p2p-core}/src/lib.rs | 2 +- .../src/network_zones.rs | 0 .../src/network_zones/clear.rs | 2 +- p2p/{monero-p2p => p2p-core}/src/protocol.rs | 2 +- .../src/protocol/try_from.rs | 2 +- p2p/{monero-p2p => p2p-core}/src/services.rs | 8 +- .../tests/fragmented_handshake.rs | 4 +- p2p/{monero-p2p => p2p-core}/tests/handles.rs | 2 +- .../tests/handshake.rs | 4 +- .../tests/sending_receiving.rs | 4 +- p2p/{monero-p2p => p2p-core}/tests/utils.rs | 4 +- p2p/{cuprate-p2p => p2p}/Cargo.toml | 12 +- .../src/block_downloader.rs | 8 +- .../src/block_downloader/block_queue.rs | 6 +- .../src/block_downloader/chain_tracker.rs | 6 +- .../src/block_downloader/download_batch.rs | 6 +- .../src/block_downloader/request_chain.rs | 4 +- .../src/block_downloader/tests.rs | 10 +- p2p/{cuprate-p2p => p2p}/src/broadcast.rs | 10 +- p2p/{cuprate-p2p => p2p}/src/client_pool.rs | 2 +- .../src/client_pool/disconnect_monitor.rs | 2 +- .../src/client_pool/drop_guard_client.rs | 2 +- p2p/{cuprate-p2p => p2p}/src/config.rs | 6 +- .../src/connection_maintainer.rs | 2 +- p2p/{cuprate-p2p => p2p}/src/constants.rs | 0 .../src/inbound_server.rs | 2 +- p2p/{cuprate-p2p => p2p}/src/lib.rs | 10 +- p2p/{cuprate-p2p => p2p}/src/sync_states.rs | 14 +- pruning/Cargo.toml | 2 +- pruning/src/lib.rs | 2 +- rpc/json-rpc/Cargo.toml | 2 +- rpc/json-rpc/README.md | 10 +- rpc/json-rpc/src/error/code.rs | 12 +- rpc/json-rpc/src/error/object.rs | 16 +- rpc/json-rpc/src/id.rs | 14 +- rpc/json-rpc/src/request.rs | 6 +- rpc/json-rpc/src/response.rs | 14 +- rpc/json-rpc/src/version.rs | 4 +- .../Cargo.toml | 0 .../src/lib.rs | 0 .../Cargo.toml | 2 +- .../src/lib.rs | 0 .../Cargo.toml | 2 +- .../README.md | 0 .../src/backend/heed/database.rs | 0 .../src/backend/heed/env.rs | 0 .../src/backend/heed/error.rs | 0 .../src/backend/heed/mod.rs | 0 .../src/backend/heed/storable.rs | 0 .../src/backend/heed/transaction.rs | 0 .../src/backend/heed/types.rs | 0 .../src/backend/mod.rs | 0 .../src/backend/redb/database.rs | 0 .../src/backend/redb/env.rs | 0 .../src/backend/redb/error.rs | 0 .../src/backend/redb/mod.rs | 0 .../src/backend/redb/storable.rs | 0 .../src/backend/redb/transaction.rs | 0 .../src/backend/redb/types.rs | 0 .../src/backend/tests.rs | 0 .../src/config/backend.rs | 0 .../src/config/config.rs | 0 .../src/config/mod.rs | 0 .../src/config/reader_threads.rs | 0 .../src/config/sync_mode.rs | 0 .../src/constants.rs | 0 .../src/database.rs | 0 .../src/env.rs | 0 .../src/error.rs | 0 .../src/free.rs | 0 .../src/key.rs | 0 .../src/lib.rs | 0 .../src/ops/block.rs | 0 .../src/ops/blockchain.rs | 0 .../src/ops/key_image.rs | 0 .../src/ops/macros.rs | 0 .../src/ops/mod.rs | 0 .../src/ops/output.rs | 0 .../src/ops/property.rs | 2 +- .../src/ops/tx.rs | 0 .../src/resize.rs | 0 .../src/service/free.rs | 0 .../src/service/mod.rs | 0 .../src/service/read.rs | 0 .../src/service/tests.rs | 0 .../src/service/types.rs | 0 .../src/service/write.rs | 0 .../src/storable.rs | 0 .../src/table.rs | 0 .../src/tables.rs | 0 .../src/tests.rs | 0 .../src/transaction.rs | 0 .../src/types.rs | 0 .../src/unsafe_sendable.rs | 0 storage/database/Cargo.toml | 2 +- storage/{cuprate-txpool => txpool}/Cargo.toml | 0 storage/{cuprate-txpool => txpool}/src/lib.rs | 0 test-utils/Cargo.toml | 4 +- test-utils/src/test_netzone.rs | 4 +- 157 files changed, 434 insertions(+), 410 deletions(-) rename net/{monero-wire => wire}/Cargo.toml (67%) rename net/{monero-wire => wire}/src/lib.rs (88%) rename net/{monero-wire => wire}/src/network_address.rs (95%) rename net/{monero-wire => wire}/src/network_address/epee_builder.rs (75%) rename net/{monero-wire => wire}/src/p2p.rs (96%) rename net/{monero-wire => wire}/src/p2p/admin.rs (99%) rename net/{monero-wire => wire}/src/p2p/common.rs (92%) rename net/{monero-wire => wire}/src/p2p/protocol.rs (99%) rename p2p/{dandelion => dandelion-tower}/Cargo.toml (96%) rename p2p/{dandelion => dandelion-tower}/src/config.rs (100%) rename p2p/{dandelion => dandelion-tower}/src/lib.rs (100%) rename p2p/{dandelion => dandelion-tower}/src/pool.rs (100%) rename p2p/{dandelion => dandelion-tower}/src/router.rs (100%) rename p2p/{dandelion => dandelion-tower}/src/tests/mod.rs (100%) rename p2p/{dandelion => dandelion-tower}/src/tests/pool.rs (100%) rename p2p/{dandelion => dandelion-tower}/src/tests/router.rs (100%) rename p2p/{dandelion => dandelion-tower}/src/traits.rs (100%) rename p2p/{monero-p2p => p2p-core}/Cargo.toml (84%) rename p2p/{monero-p2p => p2p-core}/src/client.rs (99%) rename p2p/{monero-p2p => p2p-core}/src/client/connection.rs (97%) rename p2p/{monero-p2p => p2p-core}/src/client/connector.rs (100%) rename p2p/{monero-p2p => p2p-core}/src/client/handshaker.rs (99%) rename p2p/{monero-p2p => p2p-core}/src/client/timeout_monitor.rs (99%) rename p2p/{monero-p2p => p2p-core}/src/constants.rs (100%) rename p2p/{monero-p2p => p2p-core}/src/error.rs (96%) rename p2p/{monero-p2p => p2p-core}/src/handles.rs (100%) rename p2p/{monero-p2p => p2p-core}/src/lib.rs (99%) rename p2p/{monero-p2p => p2p-core}/src/network_zones.rs (100%) rename p2p/{monero-p2p => p2p-core}/src/network_zones/clear.rs (98%) rename p2p/{monero-p2p => p2p-core}/src/protocol.rs (99%) rename p2p/{monero-p2p => p2p-core}/src/protocol/try_from.rs (98%) rename p2p/{monero-p2p => p2p-core}/src/services.rs (95%) rename p2p/{monero-p2p => p2p-core}/tests/fragmented_handshake.rs (99%) rename p2p/{monero-p2p => p2p-core}/tests/handles.rs (97%) rename p2p/{monero-p2p => p2p-core}/tests/handshake.rs (98%) rename p2p/{monero-p2p => p2p-core}/tests/sending_receiving.rs (94%) rename p2p/{monero-p2p => p2p-core}/tests/utils.rs (97%) rename p2p/{cuprate-p2p => p2p}/Cargo.toml (77%) rename p2p/{cuprate-p2p => p2p}/src/block_downloader.rs (99%) rename p2p/{cuprate-p2p => p2p}/src/block_downloader/block_queue.rs (96%) rename p2p/{cuprate-p2p => p2p}/src/block_downloader/chain_tracker.rs (97%) rename p2p/{cuprate-p2p => p2p}/src/block_downloader/download_batch.rs (97%) rename p2p/{cuprate-p2p => p2p}/src/block_downloader/request_chain.rs (98%) rename p2p/{cuprate-p2p => p2p}/src/block_downloader/tests.rs (97%) rename p2p/{cuprate-p2p => p2p}/src/broadcast.rs (98%) rename p2p/{cuprate-p2p => p2p}/src/client_pool.rs (99%) rename p2p/{cuprate-p2p => p2p}/src/client_pool/disconnect_monitor.rs (96%) rename p2p/{cuprate-p2p => p2p}/src/client_pool/drop_guard_client.rs (95%) rename p2p/{cuprate-p2p => p2p}/src/config.rs (93%) rename p2p/{cuprate-p2p => p2p}/src/connection_maintainer.rs (99%) rename p2p/{cuprate-p2p => p2p}/src/constants.rs (100%) rename p2p/{cuprate-p2p => p2p}/src/inbound_server.rs (99%) rename p2p/{cuprate-p2p => p2p}/src/lib.rs (95%) rename p2p/{cuprate-p2p => p2p}/src/sync_states.rs (97%) rename rpc/{cuprate-rpc-interface => rpc-interface}/Cargo.toml (100%) rename rpc/{cuprate-rpc-interface => rpc-interface}/src/lib.rs (100%) rename rpc/{monero-rpc-types => rpc-types}/Cargo.toml (90%) rename rpc/{monero-rpc-types => rpc-types}/src/lib.rs (100%) rename storage/{cuprate-blockchain => blockchain}/Cargo.toml (98%) rename storage/{cuprate-blockchain => blockchain}/README.md (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/heed/database.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/heed/env.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/heed/error.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/heed/mod.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/heed/storable.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/heed/transaction.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/heed/types.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/mod.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/redb/database.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/redb/env.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/redb/error.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/redb/mod.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/redb/storable.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/redb/transaction.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/redb/types.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/backend/tests.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/config/backend.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/config/config.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/config/mod.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/config/reader_threads.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/config/sync_mode.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/constants.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/database.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/env.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/error.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/free.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/key.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/lib.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/ops/block.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/ops/blockchain.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/ops/key_image.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/ops/macros.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/ops/mod.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/ops/output.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/ops/property.rs (97%) rename storage/{cuprate-blockchain => blockchain}/src/ops/tx.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/resize.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/service/free.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/service/mod.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/service/read.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/service/tests.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/service/types.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/service/write.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/storable.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/table.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/tables.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/tests.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/transaction.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/types.rs (100%) rename storage/{cuprate-blockchain => blockchain}/src/unsafe_sendable.rs (100%) rename storage/{cuprate-txpool => txpool}/Cargo.toml (100%) rename storage/{cuprate-txpool => txpool}/src/lib.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index e83e38a1..07997288 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,16 +56,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" -[[package]] -name = "async-buffer" -version = "0.1.0" -dependencies = [ - "futures", - "pin-project", - "thiserror", - "tokio", -] - [[package]] name = "async-lock" version = "3.4.0" @@ -469,12 +459,32 @@ dependencies = [ ] [[package]] -name = "cryptonight-cuprate" +name = "cuprate-address-book" version = "0.1.0" dependencies = [ - "cc", - "hex", + "borsh", + "cuprate-p2p-core", + "cuprate-pruning", + "cuprate-test-utils", + "cuprate-wire", + "futures", + "indexmap 2.2.6", + "rand", "thiserror", + "tokio", + "tokio-util", + "tower", + "tracing", +] + +[[package]] +name = "cuprate-async-buffer" +version = "0.1.0" +dependencies = [ + "futures", + "pin-project", + "thiserror", + "tokio", ] [[package]] @@ -487,6 +497,7 @@ dependencies = [ "cfg-if", "crossbeam", "cuprate-helper", + "cuprate-pruning", "cuprate-test-utils", "cuprate-types", "curve25519-dalek", @@ -494,7 +505,6 @@ dependencies = [ "heed", "hex", "hex-literal", - "monero-pruning", "monero-serai", "page_size", "paste", @@ -542,7 +552,7 @@ name = "cuprate-consensus-rules" version = "0.1.0" dependencies = [ "crypto-bigint", - "cryptonight-cuprate", + "cuprate-cryptonight", "cuprate-helper", "curve25519-dalek", "dalek-ff-group", @@ -559,6 +569,46 @@ dependencies = [ "tracing", ] +[[package]] +name = "cuprate-cryptonight" +version = "0.1.0" +dependencies = [ + "cc", + "hex", + "thiserror", +] + +[[package]] +name = "cuprate-dandelion-tower" +version = "0.1.0" +dependencies = [ + "futures", + "proptest", + "rand", + "rand_distr", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tracing", +] + +[[package]] +name = "cuprate-database" +version = "0.0.0" + +[[package]] +name = "cuprate-epee-encoding" +version = "0.5.0" +dependencies = [ + "bytes", + "cuprate-fixed-bytes", + "hex", + "paste", + "ref-cast", + "thiserror", +] + [[package]] name = "cuprate-fast-sync" version = "0.1.0" @@ -579,6 +629,14 @@ dependencies = [ "tower", ] +[[package]] +name = "cuprate-fixed-bytes" +version = "0.1.0" +dependencies = [ + "bytes", + "thiserror", +] + [[package]] name = "cuprate-helper" version = "0.1.0" @@ -594,24 +652,49 @@ dependencies = [ "windows", ] +[[package]] +name = "cuprate-json-rpc" +version = "0.0.0" +dependencies = [ + "pretty_assertions", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "cuprate-levin" +version = "0.1.0" +dependencies = [ + "bitflags 2.5.0", + "bytes", + "futures", + "proptest", + "rand", + "thiserror", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "cuprate-p2p" version = "0.1.0" dependencies = [ - "async-buffer", "bytes", + "cuprate-address-book", + "cuprate-async-buffer", + "cuprate-fixed-bytes", "cuprate-helper", + "cuprate-p2p-core", + "cuprate-pruning", "cuprate-test-utils", + "cuprate-wire", "dashmap", - "fixed-bytes", "futures", "hex", "indexmap 2.2.6", - "monero-address-book", - "monero-p2p", - "monero-pruning", "monero-serai", - "monero-wire", "pin-project", "proptest", "rand", @@ -626,10 +709,43 @@ dependencies = [ "tracing", ] +[[package]] +name = "cuprate-p2p-core" +version = "0.1.0" +dependencies = [ + "async-trait", + "borsh", + "cuprate-helper", + "cuprate-pruning", + "cuprate-test-utils", + "cuprate-wire", + "futures", + "hex", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "cuprate-pruning" +version = "0.1.0" +dependencies = [ + "borsh", + "thiserror", +] + [[package]] name = "cuprate-rpc-interface" version = "0.0.0" +[[package]] +name = "cuprate-rpc-types" +version = "0.0.0" + [[package]] name = "cuprate-test-utils" version = "0.1.0" @@ -638,13 +754,13 @@ dependencies = [ "borsh", "bytes", "cuprate-helper", + "cuprate-p2p-core", "cuprate-types", + "cuprate-wire", "futures", "hex", "hex-literal", - "monero-p2p", "monero-serai", - "monero-wire", "pretty_assertions", "serde", "serde_json", @@ -665,6 +781,19 @@ dependencies = [ "monero-serai", ] +[[package]] +name = "cuprate-wire" +version = "0.1.0" +dependencies = [ + "bitflags 2.5.0", + "bytes", + "cuprate-epee-encoding", + "cuprate-fixed-bytes", + "cuprate-levin", + "hex", + "thiserror", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -710,21 +839,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "dandelion-tower" -version = "0.1.0" -dependencies = [ - "futures", - "proptest", - "rand", - "rand_distr", - "thiserror", - "tokio", - "tokio-util", - "tower", - "tracing", -] - [[package]] name = "dashmap" version = "5.5.3" @@ -738,10 +852,6 @@ dependencies = [ "parking_lot_core", ] -[[package]] -name = "database" -version = "0.0.0" - [[package]] name = "diff" version = "0.1.13" @@ -808,18 +918,6 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" -[[package]] -name = "epee-encoding" -version = "0.5.0" -dependencies = [ - "bytes", - "fixed-bytes", - "hex", - "paste", - "ref-cast", - "thiserror", -] - [[package]] name = "equivalent" version = "1.0.1" @@ -880,14 +978,6 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" -[[package]] -name = "fixed-bytes" -version = "0.1.0" -dependencies = [ - "bytes", - "thiserror", -] - [[package]] name = "flexible-transcript" version = "0.3.2" @@ -1299,16 +1389,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "json-rpc" -version = "0.0.0" -dependencies = [ - "pretty_assertions", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "keccak" version = "0.1.5" @@ -1324,21 +1404,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -[[package]] -name = "levin-cuprate" -version = "0.1.0" -dependencies = [ - "bitflags 2.5.0", - "bytes", - "futures", - "proptest", - "rand", - "thiserror", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "libc" version = "0.2.155" @@ -1442,25 +1507,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "monero-address-book" -version = "0.1.0" -dependencies = [ - "borsh", - "cuprate-test-utils", - "futures", - "indexmap 2.2.6", - "monero-p2p", - "monero-pruning", - "monero-wire", - "rand", - "thiserror", - "tokio", - "tokio-util", - "tower", - "tracing", -] - [[package]] name = "monero-generators" version = "0.4.0" @@ -1474,39 +1520,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "monero-p2p" -version = "0.1.0" -dependencies = [ - "async-trait", - "borsh", - "cuprate-helper", - "cuprate-test-utils", - "futures", - "hex", - "monero-pruning", - "monero-wire", - "thiserror", - "tokio", - "tokio-stream", - "tokio-util", - "tower", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "monero-pruning" -version = "0.1.0" -dependencies = [ - "borsh", - "thiserror", -] - -[[package]] -name = "monero-rpc-types" -version = "0.0.0" - [[package]] name = "monero-serai" version = "0.1.4-alpha" @@ -1540,19 +1553,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "monero-wire" -version = "0.1.0" -dependencies = [ - "bitflags 2.5.0", - "bytes", - "epee-encoding", - "fixed-bytes", - "hex", - "levin-cuprate", - "thiserror", -] - [[package]] name = "multiexp" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 7be28732..8891b83b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,21 +10,21 @@ members = [ "net/epee-encoding", "net/fixed-bytes", "net/levin", - "net/monero-wire", - "p2p/cuprate-p2p", - "p2p/dandelion", - "p2p/monero-p2p", + "net/wire", + "p2p/p2p", + "p2p/p2p-core", + "p2p/dandelion-tower", "p2p/async-buffer", "p2p/address-book", - "storage/cuprate-blockchain", - "storage/cuprate-txpool", + "storage/blockchain", + "storage/txpool", "storage/database", "pruning", "test-utils", "types", "rpc/json-rpc", - "rpc/monero-rpc-types", - "rpc/cuprate-rpc-interface", + "rpc/rpc-types", + "rpc/rpc-interface", ] [profile.release] diff --git a/consensus/fast-sync/Cargo.toml b/consensus/fast-sync/Cargo.toml index 3a0754a3..32fce11d 100644 --- a/consensus/fast-sync/Cargo.toml +++ b/consensus/fast-sync/Cargo.toml @@ -10,7 +10,7 @@ path = "src/create.rs" [dependencies] clap = { workspace = true, features = ["derive", "std"] } -cuprate-blockchain = { path = "../../storage/cuprate-blockchain" } +cuprate-blockchain = { path = "../../storage/blockchain" } cuprate-consensus = { path = ".." } cuprate-consensus-rules = { path = "../rules" } cuprate-types = { path = "../../types" } diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 0b8c35db..fd86a61e 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -12,7 +12,7 @@ rayon = ["dep:rayon"] [dependencies] cuprate-helper = { path = "../../helper", default-features = false, features = ["std"] } -cryptonight-cuprate = {path = "../../cryptonight"} +cuprate-cryptonight = {path = "../../cryptonight"} monero-serai = { workspace = true, features = ["std"] } multiexp = { workspace = true, features = ["std", "batch"] } diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index 8e1b345e..cb0e3e45 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -3,7 +3,7 @@ use std::collections::HashSet; use crypto_bigint::{CheckedMul, U256}; use monero_serai::block::Block; -use cryptonight_cuprate::*; +use cuprate_cryptonight::*; use crate::{ current_unix_timestamp, diff --git a/cryptonight/Cargo.toml b/cryptonight/Cargo.toml index 7cd1cd41..e2701142 100644 --- a/cryptonight/Cargo.toml +++ b/cryptonight/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "cryptonight-cuprate" +name = "cuprate-cryptonight" version = "0.1.0" edition = "2021" description = "A wrapper around Monero's CryptoNight hash function." diff --git a/net/epee-encoding/Cargo.toml b/net/epee-encoding/Cargo.toml index 8bae8579..7feac004 100644 --- a/net/epee-encoding/Cargo.toml +++ b/net/epee-encoding/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "epee-encoding" +name = "cuprate-epee-encoding" version = "0.5.0" edition = "2021" license = "MIT" @@ -12,10 +12,10 @@ rust-version = "1.60" [features] default = ["std"] -std = ["dep:thiserror", "bytes/std", "fixed-bytes/std"] +std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"] [dependencies] -fixed-bytes = { path = "../fixed-bytes", default-features = false } +cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false } paste = "1.0.14" ref-cast = "1.0.22" diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index 92046d32..5b64315e 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -8,7 +8,7 @@ //! //! example without macro: //! ```rust -//! # use epee_encoding::{EpeeObject, EpeeObjectBuilder, read_epee_value, write_field, to_bytes, from_bytes}; +//! # use cuprate_epee_encoding::{EpeeObject, EpeeObjectBuilder, read_epee_value, write_field, to_bytes, from_bytes}; //! # use bytes::{Buf, BufMut}; //! //! pub struct Test { @@ -21,7 +21,7 @@ //! } //! //! impl EpeeObjectBuilder<Test> for __TestEpeeBuilder { -//! fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> epee_encoding::error::Result<bool> { +//! fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> cuprate_epee_encoding::error::Result<bool> { //! match name { //! "val" => {self.val = Some(read_epee_value(r)?);} //! _ => return Ok(false), @@ -29,10 +29,10 @@ //! Ok(true) //! } //! -//! fn finish(self) -> epee_encoding::error::Result<Test> { +//! fn finish(self) -> cuprate_epee_encoding::error::Result<Test> { //! Ok( //! Test { -//! val: self.val.ok_or_else(|| epee_encoding::error::Error::Format("Required field was not found!"))? +//! val: self.val.ok_or_else(|| cuprate_epee_encoding::error::Error::Format("Required field was not found!"))? //! } //! ) //! } @@ -45,7 +45,7 @@ //! 1 //! } //! -//! fn write_fields<B: BufMut>(self, w: &mut B) -> epee_encoding::error::Result<()> { +//! fn write_fields<B: BufMut>(self, w: &mut B) -> cuprate_epee_encoding::error::Result<()> { //! // write the fields //! write_field(self.val, "val", w) //! } @@ -233,7 +233,7 @@ fn write_epee_value<T: EpeeValue, B: BufMut>(val: T, w: &mut B) -> Result<()> { /// let t: [u8; 8] = [3, 0, 0, 0, 1, 0, 0, 0]; /// let mut w = vec![]; /// -/// epee_encoding::write_bytes(t, &mut w).unwrap(); +/// cuprate_epee_encoding::write_bytes(t, &mut w).unwrap(); /// /// assert_eq!(w.len(), 9); // length of bytes + bytes /// assert_eq!(w[1..], t); @@ -274,7 +274,7 @@ pub fn write_bytes<T: AsRef<[u8]>, B: BufMut>(t: T, w: &mut B) -> Result<()> { /// let mut w = vec![]; /// /// let iter: std::vec::IntoIter<u64> = vec.into_iter(); -/// epee_encoding::write_iterator(iter, &mut w).unwrap(); +/// cuprate_epee_encoding::write_iterator(iter, &mut w).unwrap(); /// /// assert_eq!(w.len(), 17); /// assert_eq!(w[1..9], [3, 0, 0, 0, 0, 0, 0, 0]); diff --git a/net/epee-encoding/src/macros.rs b/net/epee-encoding/src/macros.rs index 1ec0446e..38dcc45d 100644 --- a/net/epee-encoding/src/macros.rs +++ b/net/epee-encoding/src/macros.rs @@ -10,7 +10,7 @@ pub use paste::paste; /// // see: <https://github.com/rust-lang/rust/issues/64079> /// mod visibility { /// -/// use epee_encoding::epee_object; +/// use cuprate_epee_encoding::epee_object; /// /// struct Example { /// a: u8 @@ -30,7 +30,7 @@ pub use paste::paste; /// // see: <https://github.com/rust-lang/rust/issues/64079> /// mod visibility { /// -/// use epee_encoding::epee_object; +/// use cuprate_epee_encoding::epee_object; /// /// struct Example { /// a: u8, @@ -60,7 +60,7 @@ pub use paste::paste; /// c: u8 as u8, /// // `=> read_fn, write_fn, should_write_fn,` allows you to specify alt field encoding functions. /// // for the required args see the default functions, which are used here: -/// d: u8 => epee_encoding::read_epee_value, epee_encoding::write_field, <u8 as epee_encoding::EpeeValue>::should_write, +/// d: u8 => cuprate_epee_encoding::read_epee_value, cuprate_epee_encoding::write_field, <u8 as cuprate_epee_encoding::EpeeValue>::should_write, /// // `!flatten` can be used on fields which are epee objects, and it flattens the fields of that object into this object. /// // So for this example `e_f` will not appear in the data but e will. /// // You can't use the other options with this. @@ -126,25 +126,25 @@ macro_rules! epee_object { $(!flatten: $flat_field: ident: $flat_ty:ty ,)* ) => { - epee_encoding::macros::paste!( + cuprate_epee_encoding::macros::paste!( #[allow(non_snake_case)] mod [<__epee_builder_ $obj>] { use super::*; #[derive(Default)] pub struct [<__Builder $obj>] { - $($field: Option<epee_encoding::epee_object!(@internal_field_type $ty, $($ty_as)?)>,)* - $($flat_field: <$flat_ty as epee_encoding::EpeeObject>::Builder,)* + $($field: Option<cuprate_epee_encoding::epee_object!(@internal_field_type $ty, $($ty_as)?)>,)* + $($flat_field: <$flat_ty as cuprate_epee_encoding::EpeeObject>::Builder,)* } - impl epee_encoding::EpeeObjectBuilder<$obj> for [<__Builder $obj>] { - fn add_field<B: epee_encoding::macros::bytes::Buf>(&mut self, name: &str, b: &mut B) -> epee_encoding::error::Result<bool> { + impl cuprate_epee_encoding::EpeeObjectBuilder<$obj> for [<__Builder $obj>] { + fn add_field<B: cuprate_epee_encoding::macros::bytes::Buf>(&mut self, name: &str, b: &mut B) -> cuprate_epee_encoding::error::Result<bool> { match name { - $(epee_encoding::epee_object!(@internal_field_name $field, $($alt_name)?) => { + $(cuprate_epee_encoding::epee_object!(@internal_field_name $field, $($alt_name)?) => { if core::mem::replace(&mut self.$field, Some( - epee_encoding::epee_object!(@internal_try_right_then_left epee_encoding::read_epee_value(b)?, $($read_fn(b)?)?) + cuprate_epee_encoding::epee_object!(@internal_try_right_then_left cuprate_epee_encoding::read_epee_value(b)?, $($read_fn(b)?)?) )).is_some() { - Err(epee_encoding::error::Error::Value(format!("Duplicate field in data: {}", epee_encoding::epee_object!(@internal_field_name$field, $($alt_name)?))))?; + Err(cuprate_epee_encoding::error::Error::Value(format!("Duplicate field in data: {}", cuprate_epee_encoding::epee_object!(@internal_field_name$field, $($alt_name)?))))?; } Ok(true) },)* @@ -159,12 +159,12 @@ macro_rules! epee_object { } } - fn finish(self) -> epee_encoding::error::Result<$obj> { + fn finish(self) -> cuprate_epee_encoding::error::Result<$obj> { Ok( $obj { $( $field: { - let epee_default_value = epee_encoding::epee_object!(@internal_try_right_then_left epee_encoding::EpeeValue::epee_default_value(), $({ + let epee_default_value = cuprate_epee_encoding::epee_object!(@internal_try_right_then_left cuprate_epee_encoding::EpeeValue::epee_default_value(), $({ let _ = $should_write_fn; None })?); @@ -173,7 +173,7 @@ macro_rules! epee_object { $(.or(Some($default)))? .or(epee_default_value) $(.map(<$ty_as>::into))? - .ok_or(epee_encoding::error::Error::Value(format!("Missing field in data: {}", epee_encoding::epee_object!(@internal_field_name$field, $($alt_name)?))))? + .ok_or(cuprate_epee_encoding::error::Error::Value(format!("Missing field in data: {}", cuprate_epee_encoding::epee_object!(@internal_field_name$field, $($alt_name)?))))? }, )* @@ -187,16 +187,16 @@ macro_rules! epee_object { } } - impl epee_encoding::EpeeObject for $obj { + impl cuprate_epee_encoding::EpeeObject for $obj { type Builder = [<__epee_builder_ $obj>]::[<__Builder $obj>]; fn number_of_fields(&self) -> u64 { let mut fields = 0; $( - let field = epee_encoding::epee_object!(@internal_try_right_then_left &self.$field, $(<&$ty_as>::from(&self.$field))? ); + let field = cuprate_epee_encoding::epee_object!(@internal_try_right_then_left &self.$field, $(<&$ty_as>::from(&self.$field))? ); - if $((field) != &$default &&)? epee_encoding::epee_object!(@internal_try_right_then_left epee_encoding::EpeeValue::should_write, $($should_write_fn)?)(field ) + if $((field) != &$default &&)? cuprate_epee_encoding::epee_object!(@internal_try_right_then_left cuprate_epee_encoding::EpeeValue::should_write, $($should_write_fn)?)(field ) { fields += 1; } @@ -209,13 +209,13 @@ macro_rules! epee_object { fields } - fn write_fields<B: epee_encoding::macros::bytes::BufMut>(self, w: &mut B) -> epee_encoding::error::Result<()> { + fn write_fields<B: cuprate_epee_encoding::macros::bytes::BufMut>(self, w: &mut B) -> cuprate_epee_encoding::error::Result<()> { $( - let field = epee_encoding::epee_object!(@internal_try_right_then_left self.$field, $(<$ty_as>::from(self.$field))? ); + let field = cuprate_epee_encoding::epee_object!(@internal_try_right_then_left self.$field, $(<$ty_as>::from(self.$field))? ); - if $(field != $default &&)? epee_encoding::epee_object!(@internal_try_right_then_left epee_encoding::EpeeValue::should_write, $($should_write_fn)?)(&field ) + if $(field != $default &&)? cuprate_epee_encoding::epee_object!(@internal_try_right_then_left cuprate_epee_encoding::EpeeValue::should_write, $($should_write_fn)?)(&field ) { - epee_encoding::epee_object!(@internal_try_right_then_left epee_encoding::write_field, $($write_fn)?)((field), epee_encoding::epee_object!(@internal_field_name$field, $($alt_name)?), w)?; + cuprate_epee_encoding::epee_object!(@internal_try_right_then_left cuprate_epee_encoding::write_field, $($write_fn)?)((field), cuprate_epee_encoding::epee_object!(@internal_field_name$field, $($alt_name)?), w)?; } )* diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 0dcd45a8..094f0ef1 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -6,7 +6,7 @@ use core::fmt::Debug; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use fixed_bytes::{ByteArray, ByteArrayVec}; +use cuprate_fixed_bytes::{ByteArray, ByteArrayVec}; use crate::{ io::{checked_read_primitive, checked_write_primitive}, diff --git a/net/epee-encoding/src/varint.rs b/net/epee-encoding/src/varint.rs index e574ba85..ae9c5697 100644 --- a/net/epee-encoding/src/varint.rs +++ b/net/epee-encoding/src/varint.rs @@ -10,7 +10,7 @@ const FITS_IN_FOUR_BYTES: u64 = 2_u64.pow(32 - SIZE_OF_SIZE_MARKER) - 1; /// Read an epee variable sized number from `r`. /// /// ```rust -/// use epee_encoding::read_varint; +/// use cuprate_epee_encoding::read_varint; /// /// assert_eq!(read_varint(&mut [252].as_slice()).unwrap(), 63); /// assert_eq!(read_varint(&mut [1, 1].as_slice()).unwrap(), 64); @@ -41,7 +41,7 @@ pub fn read_varint<B: Buf>(r: &mut B) -> Result<u64> { /// Write an epee variable sized number into `w`. /// /// ```rust -/// use epee_encoding::write_varint; +/// use cuprate_epee_encoding::write_varint; /// /// let mut buf = vec![]; /// diff --git a/net/epee-encoding/tests/alt_name.rs b/net/epee-encoding/tests/alt_name.rs index 6122c242..8a9bc6fa 100644 --- a/net/epee-encoding/tests/alt_name.rs +++ b/net/epee-encoding/tests/alt_name.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes, to_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; struct AltName { val: u8, diff --git a/net/epee-encoding/tests/duplicate_key.rs b/net/epee-encoding/tests/duplicate_key.rs index 5fe32ac8..c1b3148f 100644 --- a/net/epee-encoding/tests/duplicate_key.rs +++ b/net/epee-encoding/tests/duplicate_key.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes}; struct T { a: u8, diff --git a/net/epee-encoding/tests/epee_default.rs b/net/epee-encoding/tests/epee_default.rs index 016b68f4..c221b28e 100644 --- a/net/epee-encoding/tests/epee_default.rs +++ b/net/epee-encoding/tests/epee_default.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes, to_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; pub struct Optional { val: u8, diff --git a/net/epee-encoding/tests/flattened.rs b/net/epee-encoding/tests/flattened.rs index ef92e5e8..a737370f 100644 --- a/net/epee-encoding/tests/flattened.rs +++ b/net/epee-encoding/tests/flattened.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes, to_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; struct Child { val: u64, diff --git a/net/epee-encoding/tests/options.rs b/net/epee-encoding/tests/options.rs index f8cb4969..5bae9a96 100644 --- a/net/epee-encoding/tests/options.rs +++ b/net/epee-encoding/tests/options.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes, to_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; use std::ops::Deref; #[derive(Clone)] diff --git a/net/epee-encoding/tests/p2p.rs b/net/epee-encoding/tests/p2p.rs index 386cc1a1..2f74ef6f 100644 --- a/net/epee-encoding/tests/p2p.rs +++ b/net/epee-encoding/tests/p2p.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes, to_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; #[derive(Eq, PartialEq, Debug, Clone)] pub struct SupportFlags(u32); diff --git a/net/epee-encoding/tests/rpc.rs b/net/epee-encoding/tests/rpc.rs index 4d0848f0..973498e2 100644 --- a/net/epee-encoding/tests/rpc.rs +++ b/net/epee-encoding/tests/rpc.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes, to_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; #[derive(Clone, Debug, PartialEq)] struct BaseResponse { diff --git a/net/epee-encoding/tests/seq.rs b/net/epee-encoding/tests/seq.rs index 9d3189a1..a4685d0f 100644 --- a/net/epee-encoding/tests/seq.rs +++ b/net/epee-encoding/tests/seq.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes}; struct ObjSeq { seq: Vec<ObjSeq>, diff --git a/net/epee-encoding/tests/stack_overflow.rs b/net/epee-encoding/tests/stack_overflow.rs index 395b7570..c53420a6 100644 --- a/net/epee-encoding/tests/stack_overflow.rs +++ b/net/epee-encoding/tests/stack_overflow.rs @@ -1,4 +1,4 @@ -use epee_encoding::{epee_object, from_bytes}; +use cuprate_epee_encoding::{epee_object, from_bytes}; struct D { val: u8, diff --git a/net/fixed-bytes/Cargo.toml b/net/fixed-bytes/Cargo.toml index c144c78f..b592a09e 100644 --- a/net/fixed-bytes/Cargo.toml +++ b/net/fixed-bytes/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "fixed-bytes" +name = "cuprate-fixed-bytes" version = "0.1.0" edition = "2021" license = "MIT" diff --git a/net/levin/Cargo.toml b/net/levin/Cargo.toml index d7ca94b5..13deabea 100644 --- a/net/levin/Cargo.toml +++ b/net/levin/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "levin-cuprate" +name = "cuprate-levin" version = "0.1.0" edition = "2021" description = "A crate for working with the Levin protocol in Rust." diff --git a/net/levin/tests/fragmented_message.rs b/net/levin/tests/fragmented_message.rs index 45ff3a0f..7799a719 100644 --- a/net/levin/tests/fragmented_message.rs +++ b/net/levin/tests/fragmented_message.rs @@ -8,7 +8,7 @@ use tokio::{ }; use tokio_util::codec::{FramedRead, FramedWrite}; -use levin_cuprate::{ +use cuprate_levin::{ message::make_fragmented_messages, BucketBuilder, BucketError, LevinBody, LevinCommand, LevinMessageCodec, MessageType, Protocol, }; diff --git a/net/monero-wire/Cargo.toml b/net/wire/Cargo.toml similarity index 67% rename from net/monero-wire/Cargo.toml rename to net/wire/Cargo.toml index 882b3644..c71a77b8 100644 --- a/net/monero-wire/Cargo.toml +++ b/net/wire/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "monero-wire" +name = "cuprate-wire" version = "0.1.0" edition = "2021" license = "MIT" @@ -8,12 +8,12 @@ repository = "https://github.com/SyntheticBird45/cuprate/tree/main/net/monero-wi [features] default = [] -tracing = ["levin-cuprate/tracing"] +tracing = ["cuprate-levin/tracing"] [dependencies] -levin-cuprate = {path="../levin"} -epee-encoding = { path = "../epee-encoding" } -fixed-bytes = { path = "../fixed-bytes" } +cuprate-levin = { path = "../levin" } +cuprate-epee-encoding = { path = "../epee-encoding" } +cuprate-fixed-bytes = { path = "../fixed-bytes" } bitflags = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] } diff --git a/net/monero-wire/src/lib.rs b/net/wire/src/lib.rs similarity index 88% rename from net/monero-wire/src/lib.rs rename to net/wire/src/lib.rs index 27e6481d..45a2405c 100644 --- a/net/monero-wire/src/lib.rs +++ b/net/wire/src/lib.rs @@ -25,11 +25,11 @@ pub mod network_address; pub mod p2p; -pub use levin_cuprate::BucketError; +pub use cuprate_levin::BucketError; pub use network_address::{NetZone, NetworkAddress}; pub use p2p::*; // re-export. -pub use levin_cuprate as levin; +pub use cuprate_levin as levin; -pub type MoneroWireCodec = levin_cuprate::codec::LevinMessageCodec<Message>; +pub type MoneroWireCodec = cuprate_levin::codec::LevinMessageCodec<Message>; diff --git a/net/monero-wire/src/network_address.rs b/net/wire/src/network_address.rs similarity index 95% rename from net/monero-wire/src/network_address.rs rename to net/wire/src/network_address.rs index 900ae61d..632739af 100644 --- a/net/monero-wire/src/network_address.rs +++ b/net/wire/src/network_address.rs @@ -18,7 +18,7 @@ //! I2p. Currently this module only has IPv(4/6). //! use bytes::BufMut; -use epee_encoding::EpeeObject; +use cuprate_epee_encoding::EpeeObject; use std::{hash::Hash, net, net::SocketAddr}; mod epee_builder; @@ -45,7 +45,7 @@ impl EpeeObject for NetworkAddress { 2 } - fn write_fields<B: BufMut>(self, w: &mut B) -> epee_encoding::Result<()> { + fn write_fields<B: BufMut>(self, w: &mut B) -> cuprate_epee_encoding::Result<()> { TaggedNetworkAddress::from(self).write_fields(w) } } diff --git a/net/monero-wire/src/network_address/epee_builder.rs b/net/wire/src/network_address/epee_builder.rs similarity index 75% rename from net/monero-wire/src/network_address/epee_builder.rs rename to net/wire/src/network_address/epee_builder.rs index 0678b22e..36db8241 100644 --- a/net/monero-wire/src/network_address/epee_builder.rs +++ b/net/wire/src/network_address/epee_builder.rs @@ -1,7 +1,7 @@ use bytes::Buf; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; -use epee_encoding::{epee_object, EpeeObjectBuilder}; +use cuprate_epee_encoding::{epee_object, EpeeObjectBuilder}; use thiserror::Error; use crate::NetworkAddress; @@ -19,19 +19,28 @@ epee_object!( ); impl EpeeObjectBuilder<NetworkAddress> for TaggedNetworkAddress { - fn add_field<B: Buf>(&mut self, name: &str, b: &mut B) -> epee_encoding::Result<bool> { + fn add_field<B: Buf>(&mut self, name: &str, b: &mut B) -> cuprate_epee_encoding::Result<bool> { match name { "type" => { - if std::mem::replace(&mut self.ty, Some(epee_encoding::read_epee_value(b)?)) - .is_some() + if std::mem::replace( + &mut self.ty, + Some(cuprate_epee_encoding::read_epee_value(b)?), + ) + .is_some() { - return Err(epee_encoding::Error::Format("Duplicate field in data.")); + return Err(cuprate_epee_encoding::Error::Format( + "Duplicate field in data.", + )); } Ok(true) } "addr" => { - if std::mem::replace(&mut self.addr, epee_encoding::read_epee_value(b)?).is_some() { - return Err(epee_encoding::Error::Format("Duplicate field in data.")); + if std::mem::replace(&mut self.addr, cuprate_epee_encoding::read_epee_value(b)?) + .is_some() + { + return Err(cuprate_epee_encoding::Error::Format( + "Duplicate field in data.", + )); } Ok(true) } @@ -39,9 +48,9 @@ impl EpeeObjectBuilder<NetworkAddress> for TaggedNetworkAddress { } } - fn finish(self) -> epee_encoding::Result<NetworkAddress> { + fn finish(self) -> cuprate_epee_encoding::Result<NetworkAddress> { self.try_into() - .map_err(|_| epee_encoding::Error::Value("Invalid network address".to_string())) + .map_err(|_| cuprate_epee_encoding::Error::Value("Invalid network address".to_string())) } } diff --git a/net/monero-wire/src/p2p.rs b/net/wire/src/p2p.rs similarity index 96% rename from net/monero-wire/src/p2p.rs rename to net/wire/src/p2p.rs index 7a2b7de6..0d448e4f 100644 --- a/net/monero-wire/src/p2p.rs +++ b/net/wire/src/p2p.rs @@ -20,8 +20,8 @@ use std::fmt::Formatter; use bytes::{Buf, BytesMut}; -use epee_encoding::epee_object; -use levin_cuprate::{ +use cuprate_epee_encoding::epee_object; +use cuprate_levin::{ BucketBuilder, BucketError, LevinBody, LevinCommand as LevinCommandTrait, MessageType, }; @@ -154,22 +154,23 @@ impl From<LevinCommand> for u32 { } } -fn decode_message<B: Buf, T: epee_encoding::EpeeObject, Ret>( +fn decode_message<B: Buf, T: cuprate_epee_encoding::EpeeObject, Ret>( ret: impl FnOnce(T) -> Ret, buf: &mut B, ) -> Result<Ret, BucketError> { - let t = epee_encoding::from_bytes(buf).map_err(|e| BucketError::BodyDecodingError(e.into()))?; + let t = cuprate_epee_encoding::from_bytes(buf) + .map_err(|e| BucketError::BodyDecodingError(e.into()))?; Ok(ret(t)) } -fn build_message<T: epee_encoding::EpeeObject>( +fn build_message<T: cuprate_epee_encoding::EpeeObject>( id: LevinCommand, val: T, builder: &mut BucketBuilder<LevinCommand>, ) -> Result<(), BucketError> { builder.set_command(id); builder.set_body( - epee_encoding::to_bytes(val) + cuprate_epee_encoding::to_bytes(val) .map(BytesMut::freeze) .map_err(|e| BucketError::BodyDecodingError(e.into()))?, ); @@ -280,13 +281,13 @@ impl RequestMessage { C::Handshake => decode_message(RequestMessage::Handshake, buf)?, C::TimedSync => decode_message(RequestMessage::TimedSync, buf)?, C::Ping => { - epee_encoding::from_bytes::<EmptyMessage, _>(buf) + cuprate_epee_encoding::from_bytes::<EmptyMessage, _>(buf) .map_err(|e| BucketError::BodyDecodingError(e.into()))?; RequestMessage::Ping } C::SupportFlags => { - epee_encoding::from_bytes::<EmptyMessage, _>(buf) + cuprate_epee_encoding::from_bytes::<EmptyMessage, _>(buf) .map_err(|e| BucketError::BodyDecodingError(e.into()))?; RequestMessage::SupportFlags diff --git a/net/monero-wire/src/p2p/admin.rs b/net/wire/src/p2p/admin.rs similarity index 99% rename from net/monero-wire/src/p2p/admin.rs rename to net/wire/src/p2p/admin.rs index 95ffef2d..173c2938 100644 --- a/net/monero-wire/src/p2p/admin.rs +++ b/net/wire/src/p2p/admin.rs @@ -19,7 +19,7 @@ //! protocol messages. use bytes::Bytes; -use epee_encoding::epee_object; +use cuprate_epee_encoding::epee_object; use super::common::{BasicNodeData, CoreSyncData, PeerListEntryBase, PeerSupportFlags}; @@ -134,7 +134,8 @@ mod tests { 186, 15, 178, 70, 173, 170, 187, 31, 70, 50, 227, 11, 116, 111, 112, 95, 118, 101, 114, 115, 105, 111, 110, 8, 1, ]; - let handshake: HandshakeRequest = epee_encoding::from_bytes(&mut &bytes[..]).unwrap(); + let handshake: HandshakeRequest = + cuprate_epee_encoding::from_bytes(&mut &bytes[..]).unwrap(); let basic_node_data = BasicNodeData { my_port: 0, network_id: [ @@ -161,8 +162,9 @@ mod tests { assert_eq!(basic_node_data, handshake.node_data); assert_eq!(core_sync_data, handshake.payload_data); - let mut encoded_bytes = epee_encoding::to_bytes(handshake.clone()).unwrap(); - let handshake_2: HandshakeRequest = epee_encoding::from_bytes(&mut encoded_bytes).unwrap(); + let mut encoded_bytes = cuprate_epee_encoding::to_bytes(handshake.clone()).unwrap(); + let handshake_2: HandshakeRequest = + cuprate_epee_encoding::from_bytes(&mut encoded_bytes).unwrap(); assert_eq!(handshake, handshake_2); } @@ -938,7 +940,8 @@ mod tests { 181, 216, 193, 135, 23, 186, 168, 207, 119, 86, 235, 11, 116, 111, 112, 95, 118, 101, 114, 115, 105, 111, 110, 8, 16, ]; - let handshake: HandshakeResponse = epee_encoding::from_bytes(&mut &bytes[..]).unwrap(); + let handshake: HandshakeResponse = + cuprate_epee_encoding::from_bytes(&mut &bytes[..]).unwrap(); let basic_node_data = BasicNodeData { my_port: 18080, @@ -967,9 +970,10 @@ mod tests { assert_eq!(core_sync_data, handshake.payload_data); assert_eq!(250, handshake.local_peerlist_new.len()); - let mut encoded_bytes = epee_encoding::to_bytes(handshake.clone()).unwrap(); + let mut encoded_bytes = cuprate_epee_encoding::to_bytes(handshake.clone()).unwrap(); - let handshake_2: HandshakeResponse = epee_encoding::from_bytes(&mut encoded_bytes).unwrap(); + let handshake_2: HandshakeResponse = + cuprate_epee_encoding::from_bytes(&mut encoded_bytes).unwrap(); assert_eq!(handshake, handshake_2); } diff --git a/net/monero-wire/src/p2p/common.rs b/net/wire/src/p2p/common.rs similarity index 92% rename from net/monero-wire/src/p2p/common.rs rename to net/wire/src/p2p/common.rs index 74babefe..91adb908 100644 --- a/net/monero-wire/src/p2p/common.rs +++ b/net/wire/src/p2p/common.rs @@ -18,8 +18,8 @@ use bitflags::bitflags; use bytes::{Buf, BufMut, Bytes}; -use epee_encoding::{epee_object, EpeeValue, InnerMarker}; -use fixed_bytes::ByteArray; +use cuprate_epee_encoding::{epee_object, EpeeValue, InnerMarker}; +use cuprate_fixed_bytes::ByteArray; use crate::NetworkAddress; @@ -241,12 +241,12 @@ epee_object!( txs: TransactionBlobs = TransactionBlobs::None => tx_blob_read, tx_blob_write, should_write_tx_blobs, ); -fn tx_blob_read<B: Buf>(b: &mut B) -> epee_encoding::Result<TransactionBlobs> { - let marker = epee_encoding::read_marker(b)?; +fn tx_blob_read<B: Buf>(b: &mut B) -> cuprate_epee_encoding::Result<TransactionBlobs> { + let marker = cuprate_epee_encoding::read_marker(b)?; match marker.inner_marker { InnerMarker::Object => Ok(TransactionBlobs::Pruned(Vec::read(b, &marker)?)), InnerMarker::String => Ok(TransactionBlobs::Normal(Vec::read(b, &marker)?)), - _ => Err(epee_encoding::Error::Value( + _ => Err(cuprate_epee_encoding::Error::Value( "Invalid marker for tx blobs".to_string(), )), } @@ -256,11 +256,15 @@ fn tx_blob_write<B: BufMut>( val: TransactionBlobs, field_name: &str, w: &mut B, -) -> epee_encoding::Result<()> { +) -> cuprate_epee_encoding::Result<()> { if should_write_tx_blobs(&val) { match val { - TransactionBlobs::Normal(bytes) => epee_encoding::write_field(bytes, field_name, w)?, - TransactionBlobs::Pruned(obj) => epee_encoding::write_field(obj, field_name, w)?, + TransactionBlobs::Normal(bytes) => { + cuprate_epee_encoding::write_field(bytes, field_name, w)? + } + TransactionBlobs::Pruned(obj) => { + cuprate_epee_encoding::write_field(obj, field_name, w)? + } TransactionBlobs::None => (), } } diff --git a/net/monero-wire/src/p2p/protocol.rs b/net/wire/src/p2p/protocol.rs similarity index 99% rename from net/monero-wire/src/p2p/protocol.rs rename to net/wire/src/p2p/protocol.rs index 4dc9a928..5e95a4f8 100644 --- a/net/monero-wire/src/p2p/protocol.rs +++ b/net/wire/src/p2p/protocol.rs @@ -20,8 +20,8 @@ use bytes::Bytes; -use epee_encoding::{container_as_blob::ContainerAsBlob, epee_object}; -use fixed_bytes::{ByteArray, ByteArrayVec}; +use cuprate_epee_encoding::{container_as_blob::ContainerAsBlob, epee_object}; +use cuprate_fixed_bytes::{ByteArray, ByteArrayVec}; use super::common::BlockCompleteEntry; @@ -705,13 +705,14 @@ mod tests { 248, 248, 91, 110, 107, 144, 12, 175, 253, 21, 121, 28, ]; - let new_transactions: NewTransactions = epee_encoding::from_bytes(&mut &bytes[..]).unwrap(); + let new_transactions: NewTransactions = + cuprate_epee_encoding::from_bytes(&mut &bytes[..]).unwrap(); assert_eq!(4, new_transactions.txs.len()); - let mut encoded_bytes = epee_encoding::to_bytes(new_transactions.clone()).unwrap(); + let mut encoded_bytes = cuprate_epee_encoding::to_bytes(new_transactions.clone()).unwrap(); let new_transactions_2: NewTransactions = - epee_encoding::from_bytes(&mut encoded_bytes).unwrap(); + cuprate_epee_encoding::from_bytes(&mut encoded_bytes).unwrap(); assert_eq!(new_transactions, new_transactions_2); } @@ -1057,10 +1058,12 @@ mod tests { 101, 110, 116, 95, 98, 108, 111, 99, 107, 99, 104, 97, 105, 110, 95, 104, 101, 105, 103, 104, 116, 5, 209, 45, 42, 0, 0, 0, 0, 0, ]; - let fluffy_block: NewFluffyBlock = epee_encoding::from_bytes(&mut &bytes[..]).unwrap(); + let fluffy_block: NewFluffyBlock = + cuprate_epee_encoding::from_bytes(&mut &bytes[..]).unwrap(); - let mut encoded_bytes = epee_encoding::to_bytes(fluffy_block.clone()).unwrap(); - let fluffy_block_2: NewFluffyBlock = epee_encoding::from_bytes(&mut encoded_bytes).unwrap(); + let mut encoded_bytes = cuprate_epee_encoding::to_bytes(fluffy_block.clone()).unwrap(); + let fluffy_block_2: NewFluffyBlock = + cuprate_epee_encoding::from_bytes(&mut encoded_bytes).unwrap(); assert_eq!(fluffy_block, fluffy_block_2); } diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index 9b24c022..9cff78a8 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "monero-address-book" +name = "cuprate-address-book" version = "0.1.0" edition = "2021" license = "MIT" @@ -7,9 +7,9 @@ authors = ["Boog900"] [dependencies] -monero-pruning = { path = "../../pruning" } -monero-wire = { path= "../../net/monero-wire" } -monero-p2p = { path = "../monero-p2p" } +cuprate-pruning = { path = "../../pruning" } +cuprate-wire = { path= "../../net/wire" } +cuprate-p2p-core = { path = "../p2p-core" } tower = { workspace = true, features = ["util"] } tokio = { workspace = true, features = ["time", "fs", "rt"]} diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index 2f0617e9..b6ab07ad 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -19,13 +19,13 @@ use tokio::{ use tokio_util::time::DelayQueue; use tower::Service; -use monero_p2p::{ +use cuprate_p2p_core::{ client::InternalPeerID, handles::ConnectionHandle, services::{AddressBookRequest, AddressBookResponse, ZoneSpecificPeerListEntryBase}, NetZoneAddress, NetworkZone, }; -use monero_pruning::PruningSeed; +use cuprate_pruning::PruningSeed; use crate::{peer_list::PeerList, store::save_peers_to_disk, AddressBookConfig, AddressBookError}; diff --git a/p2p/address-book/src/book/tests.rs b/p2p/address-book/src/book/tests.rs index 1cb0fc85..11f31868 100644 --- a/p2p/address-book/src/book/tests.rs +++ b/p2p/address-book/src/book/tests.rs @@ -3,8 +3,8 @@ use std::{path::PathBuf, sync::Arc, time::Duration}; use futures::StreamExt; use tokio::{sync::Semaphore, time::interval}; -use monero_p2p::handles::HandleBuilder; -use monero_pruning::PruningSeed; +use cuprate_p2p_core::handles::HandleBuilder; +use cuprate_pruning::PruningSeed; use super::{AddressBook, ConnectionPeerEntry, InternalPeerID}; use crate::{peer_list::tests::make_fake_peer_list, AddressBookConfig, AddressBookError}; diff --git a/p2p/address-book/src/lib.rs b/p2p/address-book/src/lib.rs index 51f83ddc..1ce659f1 100644 --- a/p2p/address-book/src/lib.rs +++ b/p2p/address-book/src/lib.rs @@ -2,8 +2,8 @@ //! //! This module holds the logic for persistent peer storage. //! Cuprates address book is modeled as a [`tower::Service`] -//! The request is [`AddressBookRequest`](monero_p2p::services::AddressBookRequest) and the response is -//! [`AddressBookResponse`](monero_p2p::services::AddressBookResponse). +//! The request is [`AddressBookRequest`](cuprate_p2p_core::services::AddressBookRequest) and the response is +//! [`AddressBookResponse`](cuprate_p2p_core::services::AddressBookResponse). //! //! Cuprate, like monerod, actually has multiple address books, one //! for each [`NetworkZone`]. This is to reduce the possibility of @@ -13,7 +13,7 @@ //! use std::{io::ErrorKind, path::PathBuf, time::Duration}; -use monero_p2p::NetworkZone; +use cuprate_p2p_core::NetworkZone; mod book; mod peer_list; diff --git a/p2p/address-book/src/peer_list.rs b/p2p/address-book/src/peer_list.rs index 2aaf432a..e2a15d8a 100644 --- a/p2p/address-book/src/peer_list.rs +++ b/p2p/address-book/src/peer_list.rs @@ -3,8 +3,8 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use indexmap::IndexMap; use rand::prelude::*; -use monero_p2p::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress, NetworkZone}; -use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; +use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress, NetworkZone}; +use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; #[cfg(test)] pub mod tests; diff --git a/p2p/address-book/src/peer_list/tests.rs b/p2p/address-book/src/peer_list/tests.rs index 7aba0a20..8d2d2200 100644 --- a/p2p/address-book/src/peer_list/tests.rs +++ b/p2p/address-book/src/peer_list/tests.rs @@ -2,11 +2,9 @@ use std::collections::HashSet; use rand::Rng; -use monero_p2p::services::ZoneSpecificPeerListEntryBase; -use monero_pruning::PruningSeed; - +use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress}; +use cuprate_pruning::PruningSeed; use cuprate_test_utils::test_netzone::{TestNetZone, TestNetZoneAddr}; -use monero_p2p::NetZoneAddress; use super::PeerList; diff --git a/p2p/address-book/src/store.rs b/p2p/address-book/src/store.rs index c15e0a77..94b0ec24 100644 --- a/p2p/address-book/src/store.rs +++ b/p2p/address-book/src/store.rs @@ -3,7 +3,7 @@ use std::fs; use borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize}; use tokio::task::{spawn_blocking, JoinHandle}; -use monero_p2p::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress, NetworkZone}; +use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress, NetworkZone}; use crate::{peer_list::PeerList, AddressBookConfig}; diff --git a/p2p/async-buffer/Cargo.toml b/p2p/async-buffer/Cargo.toml index 59f04301..e39acc32 100644 --- a/p2p/async-buffer/Cargo.toml +++ b/p2p/async-buffer/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "async-buffer" +name = "cuprate-async-buffer" version = "0.1.0" edition = "2021" license = "MIT" diff --git a/p2p/async-buffer/tests/basic.rs b/p2p/async-buffer/tests/basic.rs index 93717300..38340a11 100644 --- a/p2p/async-buffer/tests/basic.rs +++ b/p2p/async-buffer/tests/basic.rs @@ -1,6 +1,6 @@ use futures::{FutureExt, StreamExt}; -use async_buffer::new_buffer; +use cuprate_async_buffer::new_buffer; #[tokio::test] async fn async_buffer_send_rec() { diff --git a/p2p/dandelion/Cargo.toml b/p2p/dandelion-tower/Cargo.toml similarity index 96% rename from p2p/dandelion/Cargo.toml rename to p2p/dandelion-tower/Cargo.toml index e5d7e340..5e2fec53 100644 --- a/p2p/dandelion/Cargo.toml +++ b/p2p/dandelion-tower/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "dandelion-tower" +name = "cuprate-dandelion-tower" version = "0.1.0" edition = "2021" license = "MIT" diff --git a/p2p/dandelion/src/config.rs b/p2p/dandelion-tower/src/config.rs similarity index 100% rename from p2p/dandelion/src/config.rs rename to p2p/dandelion-tower/src/config.rs diff --git a/p2p/dandelion/src/lib.rs b/p2p/dandelion-tower/src/lib.rs similarity index 100% rename from p2p/dandelion/src/lib.rs rename to p2p/dandelion-tower/src/lib.rs diff --git a/p2p/dandelion/src/pool.rs b/p2p/dandelion-tower/src/pool.rs similarity index 100% rename from p2p/dandelion/src/pool.rs rename to p2p/dandelion-tower/src/pool.rs diff --git a/p2p/dandelion/src/router.rs b/p2p/dandelion-tower/src/router.rs similarity index 100% rename from p2p/dandelion/src/router.rs rename to p2p/dandelion-tower/src/router.rs diff --git a/p2p/dandelion/src/tests/mod.rs b/p2p/dandelion-tower/src/tests/mod.rs similarity index 100% rename from p2p/dandelion/src/tests/mod.rs rename to p2p/dandelion-tower/src/tests/mod.rs diff --git a/p2p/dandelion/src/tests/pool.rs b/p2p/dandelion-tower/src/tests/pool.rs similarity index 100% rename from p2p/dandelion/src/tests/pool.rs rename to p2p/dandelion-tower/src/tests/pool.rs diff --git a/p2p/dandelion/src/tests/router.rs b/p2p/dandelion-tower/src/tests/router.rs similarity index 100% rename from p2p/dandelion/src/tests/router.rs rename to p2p/dandelion-tower/src/tests/router.rs diff --git a/p2p/dandelion/src/traits.rs b/p2p/dandelion-tower/src/traits.rs similarity index 100% rename from p2p/dandelion/src/traits.rs rename to p2p/dandelion-tower/src/traits.rs diff --git a/p2p/monero-p2p/Cargo.toml b/p2p/p2p-core/Cargo.toml similarity index 84% rename from p2p/monero-p2p/Cargo.toml rename to p2p/p2p-core/Cargo.toml index 83cfd949..f434d51a 100644 --- a/p2p/monero-p2p/Cargo.toml +++ b/p2p/p2p-core/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "monero-p2p" +name = "cuprate-p2p-core" version = "0.1.0" edition = "2021" license = "MIT" @@ -7,12 +7,12 @@ authors = ["Boog900"] [features] default = ["borsh"] -borsh = ["dep:borsh", "monero-pruning/borsh"] +borsh = ["dep:borsh", "cuprate-pruning/borsh"] [dependencies] cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } -monero-wire = { path = "../../net/monero-wire", features = ["tracing"] } -monero-pruning = { path = "../../pruning" } +cuprate-wire = { path = "../../net/wire", features = ["tracing"] } +cuprate-pruning = { path = "../../pruning" } tokio = { workspace = true, features = ["net", "sync", "macros", "time"]} tokio-util = { workspace = true, features = ["codec"] } diff --git a/p2p/monero-p2p/src/client.rs b/p2p/p2p-core/src/client.rs similarity index 99% rename from p2p/monero-p2p/src/client.rs rename to p2p/p2p-core/src/client.rs index 33446819..0e81d964 100644 --- a/p2p/monero-p2p/src/client.rs +++ b/p2p/p2p-core/src/client.rs @@ -14,7 +14,7 @@ use tower::{Service, ServiceExt}; use tracing::Instrument; use cuprate_helper::asynch::InfallibleOneshotReceiver; -use monero_pruning::PruningSeed; +use cuprate_pruning::PruningSeed; use crate::{ handles::{ConnectionGuard, ConnectionHandle}, diff --git a/p2p/monero-p2p/src/client/connection.rs b/p2p/p2p-core/src/client/connection.rs similarity index 97% rename from p2p/monero-p2p/src/client/connection.rs rename to p2p/p2p-core/src/client/connection.rs index 266dcf7f..341d8c09 100644 --- a/p2p/monero-p2p/src/client/connection.rs +++ b/p2p/p2p-core/src/client/connection.rs @@ -17,7 +17,7 @@ use tokio::{ use tokio_stream::wrappers::ReceiverStream; use tower::ServiceExt; -use monero_wire::{LevinCommand, Message, ProtocolMessage}; +use cuprate_wire::{LevinCommand, Message, ProtocolMessage}; use crate::{ constants::{REQUEST_TIMEOUT, SENDING_TIMEOUT}, @@ -241,7 +241,7 @@ where /// The main-loop for when we are in [`State::WaitingForRequest`]. async fn state_waiting_for_request<Str>(&mut self, stream: &mut Str) -> Result<(), PeerError> where - Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin, + Str: FusedStream<Item = Result<Message, cuprate_wire::BucketError>> + Unpin, { tracing::debug!("waiting for peer/client request."); @@ -274,7 +274,7 @@ where /// The main-loop for when we are in [`State::WaitingForResponse`]. async fn state_waiting_for_response<Str>(&mut self, stream: &mut Str) -> Result<(), PeerError> where - Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin, + Str: FusedStream<Item = Result<Message, cuprate_wire::BucketError>> + Unpin, { tracing::debug!("waiting for peer response."); @@ -306,7 +306,7 @@ where /// `eager_protocol_messages` are protocol messages that we received during a handshake. pub async fn run<Str>(mut self, mut stream: Str, eager_protocol_messages: Vec<ProtocolMessage>) where - Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin, + Str: FusedStream<Item = Result<Message, cuprate_wire::BucketError>> + Unpin, { tracing::debug!( "Handling eager messages len: {}", diff --git a/p2p/monero-p2p/src/client/connector.rs b/p2p/p2p-core/src/client/connector.rs similarity index 100% rename from p2p/monero-p2p/src/client/connector.rs rename to p2p/p2p-core/src/client/connector.rs diff --git a/p2p/monero-p2p/src/client/handshaker.rs b/p2p/p2p-core/src/client/handshaker.rs similarity index 99% rename from p2p/monero-p2p/src/client/handshaker.rs rename to p2p/p2p-core/src/client/handshaker.rs index 03f3f563..1071b339 100644 --- a/p2p/monero-p2p/src/client/handshaker.rs +++ b/p2p/p2p-core/src/client/handshaker.rs @@ -20,8 +20,8 @@ use tokio::{ use tower::{Service, ServiceExt}; use tracing::{info_span, Instrument}; -use monero_pruning::{PruningError, PruningSeed}; -use monero_wire::{ +use cuprate_pruning::{PruningError, PruningSeed}; +use cuprate_wire::{ admin::{ HandshakeRequest, HandshakeResponse, PingResponse, SupportFlagsResponse, PING_OK_RESPONSE_STATUS_TEXT, @@ -586,7 +586,7 @@ async fn wait_for_message<Z: NetworkZone>( peer_sink: &mut Z::Sink, peer_stream: &mut Z::Stream, - eager_protocol_messages: &mut Vec<monero_wire::ProtocolMessage>, + eager_protocol_messages: &mut Vec<cuprate_wire::ProtocolMessage>, our_basic_node_data: &BasicNodeData, ) -> Result<Message, HandshakeError> { diff --git a/p2p/monero-p2p/src/client/timeout_monitor.rs b/p2p/p2p-core/src/client/timeout_monitor.rs similarity index 99% rename from p2p/monero-p2p/src/client/timeout_monitor.rs rename to p2p/p2p-core/src/client/timeout_monitor.rs index dcdf85d7..db261b4d 100644 --- a/p2p/monero-p2p/src/client/timeout_monitor.rs +++ b/p2p/p2p-core/src/client/timeout_monitor.rs @@ -5,7 +5,6 @@ use std::sync::Arc; use futures::channel::oneshot; -use monero_wire::admin::TimedSyncRequest; use tokio::{ sync::{mpsc, Semaphore}, time::{interval, MissedTickBehavior}, @@ -13,6 +12,8 @@ use tokio::{ use tower::ServiceExt; use tracing::instrument; +use cuprate_wire::admin::TimedSyncRequest; + use crate::{ client::{connection::ConnectionTaskRequest, InternalPeerID}, constants::{MAX_PEERS_IN_PEER_LIST_MESSAGE, TIMEOUT_INTERVAL}, diff --git a/p2p/monero-p2p/src/constants.rs b/p2p/p2p-core/src/constants.rs similarity index 100% rename from p2p/monero-p2p/src/constants.rs rename to p2p/p2p-core/src/constants.rs diff --git a/p2p/monero-p2p/src/error.rs b/p2p/p2p-core/src/error.rs similarity index 96% rename from p2p/monero-p2p/src/error.rs rename to p2p/p2p-core/src/error.rs index e74a2bb6..65303adc 100644 --- a/p2p/monero-p2p/src/error.rs +++ b/p2p/p2p-core/src/error.rs @@ -45,7 +45,7 @@ pub enum PeerError { #[error("inner service error: {0}")] ServiceError(#[from] tower::BoxError), #[error("bucket error: {0}")] - BucketError(#[from] monero_wire::BucketError), + BucketError(#[from] cuprate_wire::BucketError), #[error("handshake error: {0}")] Handshake(#[from] crate::client::HandshakeError), #[error("i/o error: {0}")] diff --git a/p2p/monero-p2p/src/handles.rs b/p2p/p2p-core/src/handles.rs similarity index 100% rename from p2p/monero-p2p/src/handles.rs rename to p2p/p2p-core/src/handles.rs diff --git a/p2p/monero-p2p/src/lib.rs b/p2p/p2p-core/src/lib.rs similarity index 99% rename from p2p/monero-p2p/src/lib.rs rename to p2p/p2p-core/src/lib.rs index 13ecf4aa..8703d59e 100644 --- a/p2p/monero-p2p/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -16,7 +16,7 @@ use std::{fmt::Debug, future::Future, hash::Hash, pin::Pin}; use futures::{Sink, Stream}; -use monero_wire::{ +use cuprate_wire::{ levin::LevinMessage, network_address::NetworkAddressIncorrectZone, BucketError, Message, NetworkAddress, }; diff --git a/p2p/monero-p2p/src/network_zones.rs b/p2p/p2p-core/src/network_zones.rs similarity index 100% rename from p2p/monero-p2p/src/network_zones.rs rename to p2p/p2p-core/src/network_zones.rs diff --git a/p2p/monero-p2p/src/network_zones/clear.rs b/p2p/p2p-core/src/network_zones/clear.rs similarity index 98% rename from p2p/monero-p2p/src/network_zones/clear.rs rename to p2p/p2p-core/src/network_zones/clear.rs index c77f1333..192e3637 100644 --- a/p2p/monero-p2p/src/network_zones/clear.rs +++ b/p2p/p2p-core/src/network_zones/clear.rs @@ -11,7 +11,7 @@ use tokio::net::{ }; use tokio_util::codec::{FramedRead, FramedWrite}; -use monero_wire::MoneroWireCodec; +use cuprate_wire::MoneroWireCodec; use crate::{NetZoneAddress, NetworkZone}; diff --git a/p2p/monero-p2p/src/protocol.rs b/p2p/p2p-core/src/protocol.rs similarity index 99% rename from p2p/monero-p2p/src/protocol.rs rename to p2p/p2p-core/src/protocol.rs index 10157ae6..172038f8 100644 --- a/p2p/monero-p2p/src/protocol.rs +++ b/p2p/p2p-core/src/protocol.rs @@ -22,7 +22,7 @@ //! Request: NewFluffyBlock, Response: None, //! Request: NewTransactions, Response: None //! -use monero_wire::{ +use cuprate_wire::{ admin::{ HandshakeRequest, HandshakeResponse, PingResponse, SupportFlagsResponse, TimedSyncRequest, TimedSyncResponse, diff --git a/p2p/monero-p2p/src/protocol/try_from.rs b/p2p/p2p-core/src/protocol/try_from.rs similarity index 98% rename from p2p/monero-p2p/src/protocol/try_from.rs rename to p2p/p2p-core/src/protocol/try_from.rs index 02a5233e..8e3d026a 100644 --- a/p2p/monero-p2p/src/protocol/try_from.rs +++ b/p2p/p2p-core/src/protocol/try_from.rs @@ -1,7 +1,7 @@ //! This module contains the implementations of [`TryFrom`] and [`From`] to convert between //! [`Message`], [`PeerRequest`] and [`PeerResponse`]. -use monero_wire::{Message, ProtocolMessage, RequestMessage, ResponseMessage}; +use cuprate_wire::{Message, ProtocolMessage, RequestMessage, ResponseMessage}; use super::{PeerRequest, PeerResponse}; diff --git a/p2p/monero-p2p/src/services.rs b/p2p/p2p-core/src/services.rs similarity index 95% rename from p2p/monero-p2p/src/services.rs rename to p2p/p2p-core/src/services.rs index a0ea2e7a..6fd6c15b 100644 --- a/p2p/monero-p2p/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -1,5 +1,5 @@ -use monero_pruning::{PruningError, PruningSeed}; -use monero_wire::{CoreSyncData, PeerListEntryBase}; +use cuprate_pruning::{PruningError, PruningSeed}; +use cuprate_wire::{CoreSyncData, PeerListEntryBase}; use crate::{ client::InternalPeerID, handles::ConnectionHandle, NetZoneAddress, NetworkAddressIncorrectZone, @@ -44,7 +44,7 @@ pub struct ZoneSpecificPeerListEntryBase<A: NetZoneAddress> { pub rpc_credits_per_hash: u32, } -impl<A: NetZoneAddress> From<ZoneSpecificPeerListEntryBase<A>> for monero_wire::PeerListEntryBase { +impl<A: NetZoneAddress> From<ZoneSpecificPeerListEntryBase<A>> for cuprate_wire::PeerListEntryBase { fn from(value: ZoneSpecificPeerListEntryBase<A>) -> Self { Self { adr: value.adr.into(), @@ -65,7 +65,7 @@ pub enum PeerListConversionError { PruningSeed(#[from] PruningError), } -impl<A: NetZoneAddress> TryFrom<monero_wire::PeerListEntryBase> +impl<A: NetZoneAddress> TryFrom<cuprate_wire::PeerListEntryBase> for ZoneSpecificPeerListEntryBase<A> { type Error = PeerListConversionError; diff --git a/p2p/monero-p2p/tests/fragmented_handshake.rs b/p2p/p2p-core/tests/fragmented_handshake.rs similarity index 99% rename from p2p/monero-p2p/tests/fragmented_handshake.rs rename to p2p/p2p-core/tests/fragmented_handshake.rs index e9833cf3..2e96574c 100644 --- a/p2p/monero-p2p/tests/fragmented_handshake.rs +++ b/p2p/p2p-core/tests/fragmented_handshake.rs @@ -23,12 +23,12 @@ use tokio_util::{ use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; -use monero_p2p::{ +use cuprate_p2p_core::{ client::{ConnectRequest, Connector, DoHandshakeRequest, HandShaker, InternalPeerID}, network_zones::ClearNetServerCfg, ConnectionDirection, NetworkZone, }; -use monero_wire::{ +use cuprate_wire::{ common::PeerSupportFlags, levin::{message::make_fragmented_messages, LevinMessage, Protocol}, BasicNodeData, Message, MoneroWireCodec, diff --git a/p2p/monero-p2p/tests/handles.rs b/p2p/p2p-core/tests/handles.rs similarity index 97% rename from p2p/monero-p2p/tests/handles.rs rename to p2p/p2p-core/tests/handles.rs index 6766f78f..e98cd2d4 100644 --- a/p2p/monero-p2p/tests/handles.rs +++ b/p2p/p2p-core/tests/handles.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration}; use tokio::sync::Semaphore; -use monero_p2p::handles::HandleBuilder; +use cuprate_p2p_core::handles::HandleBuilder; #[test] fn send_ban_signal() { diff --git a/p2p/monero-p2p/tests/handshake.rs b/p2p/p2p-core/tests/handshake.rs similarity index 98% rename from p2p/monero-p2p/tests/handshake.rs rename to p2p/p2p-core/tests/handshake.rs index b63a221b..f9792488 100644 --- a/p2p/monero-p2p/tests/handshake.rs +++ b/p2p/p2p-core/tests/handshake.rs @@ -10,9 +10,9 @@ use tokio_util::codec::{FramedRead, FramedWrite}; use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; -use monero_wire::{common::PeerSupportFlags, BasicNodeData, MoneroWireCodec}; +use cuprate_wire::{common::PeerSupportFlags, BasicNodeData, MoneroWireCodec}; -use monero_p2p::{ +use cuprate_p2p_core::{ client::{ConnectRequest, Connector, DoHandshakeRequest, HandShaker, InternalPeerID}, network_zones::{ClearNet, ClearNetServerCfg}, ConnectionDirection, NetworkZone, diff --git a/p2p/monero-p2p/tests/sending_receiving.rs b/p2p/p2p-core/tests/sending_receiving.rs similarity index 94% rename from p2p/monero-p2p/tests/sending_receiving.rs rename to p2p/p2p-core/tests/sending_receiving.rs index fc5c369b..b4c42e2c 100644 --- a/p2p/monero-p2p/tests/sending_receiving.rs +++ b/p2p/p2p-core/tests/sending_receiving.rs @@ -4,9 +4,9 @@ use tokio::sync::Semaphore; use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; -use monero_wire::{common::PeerSupportFlags, protocol::GetObjectsRequest, BasicNodeData}; +use cuprate_wire::{common::PeerSupportFlags, protocol::GetObjectsRequest, BasicNodeData}; -use monero_p2p::{ +use cuprate_p2p_core::{ client::{ConnectRequest, Connector, HandShaker}, network_zones::ClearNet, protocol::{PeerRequest, PeerResponse}, diff --git a/p2p/monero-p2p/tests/utils.rs b/p2p/p2p-core/tests/utils.rs similarity index 97% rename from p2p/monero-p2p/tests/utils.rs rename to p2p/p2p-core/tests/utils.rs index 9836cbfa..9587bb58 100644 --- a/p2p/monero-p2p/tests/utils.rs +++ b/p2p/p2p-core/tests/utils.rs @@ -7,7 +7,7 @@ use std::{ use futures::FutureExt; use tower::Service; -use monero_p2p::{ +use cuprate_p2p_core::{ services::{ AddressBookRequest, AddressBookResponse, CoreSyncDataRequest, CoreSyncDataResponse, PeerSyncRequest, PeerSyncResponse, @@ -54,7 +54,7 @@ impl Service<CoreSyncDataRequest> for DummyCoreSyncSvc { fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future { async move { - Ok(CoreSyncDataResponse(monero_wire::CoreSyncData { + Ok(CoreSyncDataResponse(cuprate_wire::CoreSyncData { cumulative_difficulty: 1, cumulative_difficulty_top64: 0, current_height: 1, diff --git a/p2p/cuprate-p2p/Cargo.toml b/p2p/p2p/Cargo.toml similarity index 77% rename from p2p/cuprate-p2p/Cargo.toml rename to p2p/p2p/Cargo.toml index ab477a83..507d3621 100644 --- a/p2p/cuprate-p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -6,13 +6,13 @@ license = "MIT" authors = ["Boog900"] [dependencies] -fixed-bytes = { path = "../../net/fixed-bytes" } -monero-wire = { path = "../../net/monero-wire" } -monero-p2p = { path = "../monero-p2p", features = ["borsh"] } -monero-address-book = { path = "../address-book" } -monero-pruning = { path = "../../pruning" } +cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } +cuprate-wire = { path = "../../net/wire" } +cuprate-p2p-core = { path = "../p2p-core", features = ["borsh"] } +cuprate-address-book = { path = "../address-book" } +cuprate-pruning = { path = "../../pruning" } cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } -async-buffer = { path = "../async-buffer" } +cuprate-async-buffer = { path = "../async-buffer" } monero-serai = { workspace = true, features = ["std"] } diff --git a/p2p/cuprate-p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs similarity index 99% rename from p2p/cuprate-p2p/src/block_downloader.rs rename to p2p/p2p/src/block_downloader.rs index 3f7f7e73..7d0ab7e2 100644 --- a/p2p/cuprate-p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -21,13 +21,13 @@ use tokio::{ use tower::{Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; -use async_buffer::{BufferAppender, BufferStream}; -use monero_p2p::{ +use cuprate_async_buffer::{BufferAppender, BufferStream}; +use cuprate_p2p_core::{ handles::ConnectionHandle, services::{PeerSyncRequest, PeerSyncResponse}, NetworkZone, PeerSyncSvc, }; -use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; +use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; use crate::{ client_pool::{ClientPool, ClientPoolDropGuard}, @@ -150,7 +150,7 @@ where + 'static, C::Future: Send + 'static, { - let (buffer_appender, buffer_stream) = async_buffer::new_buffer(config.buffer_size); + let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size); let block_downloader = BlockDownloader::new( client_pool, diff --git a/p2p/cuprate-p2p/src/block_downloader/block_queue.rs b/p2p/p2p/src/block_downloader/block_queue.rs similarity index 96% rename from p2p/cuprate-p2p/src/block_downloader/block_queue.rs rename to p2p/p2p/src/block_downloader/block_queue.rs index ada28256..b03d847d 100644 --- a/p2p/cuprate-p2p/src/block_downloader/block_queue.rs +++ b/p2p/p2p/src/block_downloader/block_queue.rs @@ -1,6 +1,6 @@ use std::{cmp::Ordering, collections::BinaryHeap}; -use async_buffer::BufferAppender; +use cuprate_async_buffer::BufferAppender; use super::{BlockBatch, BlockDownloadError}; @@ -120,7 +120,7 @@ mod tests { use tokio::sync::Semaphore; use tokio_test::block_on; - use monero_p2p::handles::HandleBuilder; + use cuprate_p2p_core::handles::HandleBuilder; use super::*; @@ -144,7 +144,7 @@ mod tests { #[test] fn block_queue_returns_items_in_order(batches in vec(ready_batch_strategy(), 0..10_000)) { block_on(async move { - let (buffer_tx, mut buffer_rx) = async_buffer::new_buffer(usize::MAX); + let (buffer_tx, mut buffer_rx) = cuprate_async_buffer::new_buffer(usize::MAX); let mut queue = BlockQueue::new(buffer_tx); diff --git a/p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs b/p2p/p2p/src/block_downloader/chain_tracker.rs similarity index 97% rename from p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs rename to p2p/p2p/src/block_downloader/chain_tracker.rs index 07bad7b6..786a0deb 100644 --- a/p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs +++ b/p2p/p2p/src/block_downloader/chain_tracker.rs @@ -1,9 +1,9 @@ use std::{cmp::min, collections::VecDeque}; -use fixed_bytes::ByteArrayVec; +use cuprate_fixed_bytes::ByteArrayVec; -use monero_p2p::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone}; -use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; +use cuprate_p2p_core::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone}; +use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; use crate::constants::MEDIUM_BAN; diff --git a/p2p/cuprate-p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs similarity index 97% rename from p2p/cuprate-p2p/src/block_downloader/download_batch.rs rename to p2p/p2p/src/block_downloader/download_batch.rs index 8cdde41e..e9dfcb45 100644 --- a/p2p/cuprate-p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -6,10 +6,10 @@ use tokio::time::timeout; use tower::{Service, ServiceExt}; use tracing::instrument; +use cuprate_fixed_bytes::ByteArrayVec; use cuprate_helper::asynch::rayon_spawn_async; -use fixed_bytes::ByteArrayVec; -use monero_p2p::{handles::ConnectionHandle, NetworkZone, PeerRequest, PeerResponse}; -use monero_wire::protocol::{GetObjectsRequest, GetObjectsResponse}; +use cuprate_p2p_core::{handles::ConnectionHandle, NetworkZone, PeerRequest, PeerResponse}; +use cuprate_wire::protocol::{GetObjectsRequest, GetObjectsResponse}; use crate::{ block_downloader::{BlockBatch, BlockDownloadError, BlockDownloadTaskResponse}, diff --git a/p2p/cuprate-p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs similarity index 98% rename from p2p/cuprate-p2p/src/block_downloader/request_chain.rs rename to p2p/p2p/src/block_downloader/request_chain.rs index 7733aef9..f8b53194 100644 --- a/p2p/cuprate-p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -6,13 +6,13 @@ use tokio::{task::JoinSet, time::timeout}; use tower::{Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; -use monero_p2p::{ +use cuprate_p2p_core::{ client::InternalPeerID, handles::ConnectionHandle, services::{PeerSyncRequest, PeerSyncResponse}, NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc, }; -use monero_wire::protocol::{ChainRequest, ChainResponse}; +use cuprate_wire::protocol::{ChainRequest, ChainResponse}; use crate::{ block_downloader::{ diff --git a/p2p/cuprate-p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs similarity index 97% rename from p2p/cuprate-p2p/src/block_downloader/tests.rs rename to p2p/p2p/src/block_downloader/tests.rs index 24360aef..07c30d5d 100644 --- a/p2p/cuprate-p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -18,15 +18,15 @@ use proptest::{collection::vec, prelude::*}; use tokio::{sync::Semaphore, time::timeout}; use tower::{service_fn, Service}; -use fixed_bytes::ByteArrayVec; -use monero_p2p::{ +use cuprate_fixed_bytes::ByteArrayVec; +use cuprate_p2p_core::{ client::{mock_client, Client, InternalPeerID, PeerInformation}, network_zones::ClearNet, services::{PeerSyncRequest, PeerSyncResponse}, ConnectionDirection, NetworkZone, PeerRequest, PeerResponse, }; -use monero_pruning::PruningSeed; -use monero_wire::{ +use cuprate_pruning::PruningSeed; +use cuprate_wire::{ common::{BlockCompleteEntry, TransactionBlobs}, protocol::{ChainResponse, GetObjectsResponse}, }; @@ -184,7 +184,7 @@ prop_compose! { fn mock_block_downloader_client(blockchain: Arc<MockBlockchain>) -> Client<ClearNet> { let semaphore = Arc::new(Semaphore::new(1)); - let (connection_guard, connection_handle) = monero_p2p::handles::HandleBuilder::new() + let (connection_guard, connection_handle) = cuprate_p2p_core::handles::HandleBuilder::new() .with_permit(semaphore.try_acquire_owned().unwrap()) .build(); diff --git a/p2p/cuprate-p2p/src/broadcast.rs b/p2p/p2p/src/broadcast.rs similarity index 98% rename from p2p/cuprate-p2p/src/broadcast.rs rename to p2p/p2p/src/broadcast.rs index b6e5e807..db7a41ee 100644 --- a/p2p/cuprate-p2p/src/broadcast.rs +++ b/p2p/p2p/src/broadcast.rs @@ -22,8 +22,10 @@ use tokio::{ use tokio_stream::wrappers::WatchStream; use tower::Service; -use monero_p2p::{client::InternalPeerID, BroadcastMessage, ConnectionDirection, NetworkZone}; -use monero_wire::{ +use cuprate_p2p_core::{ + client::InternalPeerID, BroadcastMessage, ConnectionDirection, NetworkZone, +}; +use cuprate_wire::{ common::{BlockCompleteEntry, TransactionBlobs}, protocol::{NewFluffyBlock, NewTransactions}, }; @@ -128,7 +130,7 @@ pub fn init_broadcast_channels<N: NetworkZone>( /// Only certain P2P messages are supported here: [`NewFluffyBlock`] and [`NewTransactions`]. These are the only /// P2P messages that make sense to broadcast to multiple peers. /// -/// [`NewBlock`](monero_wire::protocol::NewBlock) has been excluded as monerod has had fluffy blocks for a while and +/// [`NewBlock`](cuprate_wire::protocol::NewBlock) has been excluded as monerod has had fluffy blocks for a while and /// Cuprate sets fluffy blocks as a requirement during handshakes. pub enum BroadcastRequest<N: NetworkZone> { /// Broadcast a block to the network. The block will be broadcast as a fluffy block to all peers. @@ -400,8 +402,8 @@ mod tests { use tokio::time::timeout; use tower::{Service, ServiceExt}; + use cuprate_p2p_core::{client::InternalPeerID, BroadcastMessage, ConnectionDirection}; use cuprate_test_utils::test_netzone::TestNetZone; - use monero_p2p::{client::InternalPeerID, BroadcastMessage, ConnectionDirection}; use super::{init_broadcast_channels, BroadcastConfig, BroadcastRequest}; diff --git a/p2p/cuprate-p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs similarity index 99% rename from p2p/cuprate-p2p/src/client_pool.rs rename to p2p/p2p/src/client_pool.rs index c5a83c8e..711491d0 100644 --- a/p2p/cuprate-p2p/src/client_pool.rs +++ b/p2p/p2p/src/client_pool.rs @@ -16,7 +16,7 @@ use dashmap::DashMap; use tokio::sync::mpsc; use tracing::{Instrument, Span}; -use monero_p2p::{ +use cuprate_p2p_core::{ client::{Client, InternalPeerID}, handles::ConnectionHandle, NetworkZone, diff --git a/p2p/cuprate-p2p/src/client_pool/disconnect_monitor.rs b/p2p/p2p/src/client_pool/disconnect_monitor.rs similarity index 96% rename from p2p/cuprate-p2p/src/client_pool/disconnect_monitor.rs rename to p2p/p2p/src/client_pool/disconnect_monitor.rs index e83fa325..f45d5e38 100644 --- a/p2p/cuprate-p2p/src/client_pool/disconnect_monitor.rs +++ b/p2p/p2p/src/client_pool/disconnect_monitor.rs @@ -14,7 +14,7 @@ use tokio::sync::mpsc; use tokio_util::sync::WaitForCancellationFutureOwned; use tracing::instrument; -use monero_p2p::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone}; +use cuprate_p2p_core::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone}; use super::ClientPool; diff --git a/p2p/cuprate-p2p/src/client_pool/drop_guard_client.rs b/p2p/p2p/src/client_pool/drop_guard_client.rs similarity index 95% rename from p2p/cuprate-p2p/src/client_pool/drop_guard_client.rs rename to p2p/p2p/src/client_pool/drop_guard_client.rs index d8c20c6e..b10c4e9c 100644 --- a/p2p/cuprate-p2p/src/client_pool/drop_guard_client.rs +++ b/p2p/p2p/src/client_pool/drop_guard_client.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use monero_p2p::{client::Client, NetworkZone}; +use cuprate_p2p_core::{client::Client, NetworkZone}; use crate::client_pool::ClientPool; diff --git a/p2p/cuprate-p2p/src/config.rs b/p2p/p2p/src/config.rs similarity index 93% rename from p2p/cuprate-p2p/src/config.rs rename to p2p/p2p/src/config.rs index a92ad9a2..90d7f8ff 100644 --- a/p2p/cuprate-p2p/src/config.rs +++ b/p2p/p2p/src/config.rs @@ -1,7 +1,7 @@ +use cuprate_address_book::AddressBookConfig; use cuprate_helper::network::Network; -use monero_address_book::AddressBookConfig; -use monero_p2p::NetworkZone; -use monero_wire::{common::PeerSupportFlags, BasicNodeData}; +use cuprate_p2p_core::NetworkZone; +use cuprate_wire::{common::PeerSupportFlags, BasicNodeData}; /// P2P config. #[derive(Clone, Debug)] diff --git a/p2p/cuprate-p2p/src/connection_maintainer.rs b/p2p/p2p/src/connection_maintainer.rs similarity index 99% rename from p2p/cuprate-p2p/src/connection_maintainer.rs rename to p2p/p2p/src/connection_maintainer.rs index 4ec66950..8e5c9bc3 100644 --- a/p2p/cuprate-p2p/src/connection_maintainer.rs +++ b/p2p/p2p/src/connection_maintainer.rs @@ -14,7 +14,7 @@ use tokio::{ use tower::{Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; -use monero_p2p::{ +use cuprate_p2p_core::{ client::{Client, ConnectRequest, HandshakeError}, services::{AddressBookRequest, AddressBookResponse}, AddressBook, NetworkZone, diff --git a/p2p/cuprate-p2p/src/constants.rs b/p2p/p2p/src/constants.rs similarity index 100% rename from p2p/cuprate-p2p/src/constants.rs rename to p2p/p2p/src/constants.rs diff --git a/p2p/cuprate-p2p/src/inbound_server.rs b/p2p/p2p/src/inbound_server.rs similarity index 99% rename from p2p/cuprate-p2p/src/inbound_server.rs rename to p2p/p2p/src/inbound_server.rs index d8389e79..6bc1e6d8 100644 --- a/p2p/cuprate-p2p/src/inbound_server.rs +++ b/p2p/p2p/src/inbound_server.rs @@ -12,7 +12,7 @@ use tokio::{ use tower::{Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; -use monero_p2p::{ +use cuprate_p2p_core::{ client::{Client, DoHandshakeRequest, HandshakeError, InternalPeerID}, services::{AddressBookRequest, AddressBookResponse}, AddressBook, ConnectionDirection, NetworkZone, diff --git a/p2p/cuprate-p2p/src/lib.rs b/p2p/p2p/src/lib.rs similarity index 95% rename from p2p/cuprate-p2p/src/lib.rs rename to p2p/p2p/src/lib.rs index aea0cac1..95154ec7 100644 --- a/p2p/cuprate-p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -4,7 +4,7 @@ //! a certain [`NetworkZone`] use std::sync::Arc; -use async_buffer::BufferStream; +use cuprate_async_buffer::BufferStream; use futures::FutureExt; use tokio::{ sync::{mpsc, watch}, @@ -14,7 +14,7 @@ use tokio_stream::wrappers::WatchStream; use tower::{buffer::Buffer, util::BoxCloneService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; -use monero_p2p::{ +use cuprate_p2p_core::{ client::Connector, client::InternalPeerID, services::{AddressBookRequest, AddressBookResponse, PeerSyncRequest}, @@ -56,7 +56,7 @@ where CS: CoreSyncSvc + Clone, { let address_book = - monero_address_book::init_address_book(config.address_book_config.clone()).await?; + cuprate_address_book::init_address_book(config.address_book_config.clone()).await?; let address_book = Buffer::new( address_book, config.max_inbound_connections + config.outbound_connections, @@ -79,7 +79,7 @@ where basic_node_data.peer_id = 1; } - let outbound_handshaker = monero_p2p::client::HandShaker::new( + let outbound_handshaker = cuprate_p2p_core::client::HandShaker::new( address_book.clone(), sync_states_svc.clone(), core_sync_svc.clone(), @@ -88,7 +88,7 @@ where basic_node_data.clone(), ); - let inbound_handshaker = monero_p2p::client::HandShaker::new( + let inbound_handshaker = cuprate_p2p_core::client::HandShaker::new( address_book.clone(), sync_states_svc.clone(), core_sync_svc.clone(), diff --git a/p2p/cuprate-p2p/src/sync_states.rs b/p2p/p2p/src/sync_states.rs similarity index 97% rename from p2p/cuprate-p2p/src/sync_states.rs rename to p2p/p2p/src/sync_states.rs index 127b8d7e..1b4e81ae 100644 --- a/p2p/cuprate-p2p/src/sync_states.rs +++ b/p2p/p2p/src/sync_states.rs @@ -13,14 +13,14 @@ use futures::{stream::FuturesUnordered, StreamExt}; use tokio::sync::watch; use tower::Service; -use monero_p2p::{ +use cuprate_p2p_core::{ client::InternalPeerID, handles::ConnectionHandle, services::{PeerSyncRequest, PeerSyncResponse}, NetworkZone, }; -use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; -use monero_wire::CoreSyncData; +use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; +use cuprate_wire::CoreSyncData; use crate::{client_pool::disconnect_monitor::PeerDisconnectFut, constants::SHORT_BAN}; @@ -243,11 +243,13 @@ mod tests { use tokio::sync::Semaphore; use tower::{Service, ServiceExt}; - use monero_p2p::{client::InternalPeerID, handles::HandleBuilder, services::PeerSyncRequest}; - use monero_wire::CoreSyncData; + use cuprate_p2p_core::{ + client::InternalPeerID, handles::HandleBuilder, services::PeerSyncRequest, + }; + use cuprate_wire::CoreSyncData; + use cuprate_p2p_core::services::PeerSyncResponse; use cuprate_test_utils::test_netzone::TestNetZone; - use monero_p2p::services::PeerSyncResponse; use super::PeerSyncSvc; diff --git a/pruning/Cargo.toml b/pruning/Cargo.toml index bd609975..3f5bd271 100644 --- a/pruning/Cargo.toml +++ b/pruning/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "monero-pruning" +name = "cuprate-pruning" version = "0.1.0" edition = "2021" license = "MIT" diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs index af6017a1..96c3609f 100644 --- a/pruning/src/lib.rs +++ b/pruning/src/lib.rs @@ -8,7 +8,7 @@ //! split into 8 parts): //! //! ```rust -//! use monero_pruning::PruningSeed; +//! use cuprate_pruning::PruningSeed; //! //! let seed: u32 = 386; // the seed you want to check is valid //! match PruningSeed::decompress_p2p_rules(seed) { diff --git a/rpc/json-rpc/Cargo.toml b/rpc/json-rpc/Cargo.toml index 12021eb5..777f3264 100644 --- a/rpc/json-rpc/Cargo.toml +++ b/rpc/json-rpc/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "json-rpc" +name = "cuprate-json-rpc" version = "0.0.0" edition = "2021" description = "JSON-RPC 2.0 implementation" diff --git a/rpc/json-rpc/README.md b/rpc/json-rpc/README.md index 00aa9e3d..9164fa08 100644 --- a/rpc/json-rpc/README.md +++ b/rpc/json-rpc/README.md @@ -30,7 +30,7 @@ and assumes the type within that `body` field is tagged properly, for example: ```rust # use pretty_assertions::assert_eq; use serde::{Deserialize, Serialize}; -use json_rpc::{Id, Request}; +use cuprate_json_rpc::{Id, Request}; // Parameter type. #[derive(Deserialize, Serialize)] @@ -83,7 +83,7 @@ This crate's serialized field order slightly differs compared to `monerod`. With that said, parsing should be not affected at all since a key-value map is used: ```rust # use pretty_assertions::assert_eq; -use json_rpc::{Id, Response}; +use cuprate_json_rpc::{Id, Response}; let response = Response::ok(Id::Num(123), "OK"); let response_json = serde_json::to_string_pretty(&response).unwrap(); @@ -147,7 +147,7 @@ A quick table showing some small differences between this crate and other JSON-R Allows any case for key fields excluding `method/params`: ```rust -# use json_rpc::Response; +# use cuprate_json_rpc::Response; # use serde_json::from_str; # use pretty_assertions::assert_eq; let json = r#"{"jsonrpc":"2.0","id":123,"result":"OK"}"#; @@ -161,7 +161,7 @@ assert_eq!(format!("{err}"), "missing field `jsonrpc` at line 1 column 40"); Allows unknown fields in main `{}`, and response/request objects: ```rust -# use json_rpc::Response; +# use cuprate_json_rpc::Response; # use serde_json::from_str; // unknown fields are allowed in main `{}` // v @@ -176,7 +176,7 @@ from_str::<Response<String>>(&json).unwrap(); Allows overwriting previous values upon duplicate fields (except [`Response`]'s `result/error` field) ```rust -# use json_rpc::{Id, Response}; +# use cuprate_json_rpc::{Id, Response}; # use serde_json::from_str; # use pretty_assertions::assert_eq; // duplicate fields will get overwritten by the latest one diff --git a/rpc/json-rpc/src/error/code.rs b/rpc/json-rpc/src/error/code.rs index 7874ac56..10000b81 100644 --- a/rpc/json-rpc/src/error/code.rs +++ b/rpc/json-rpc/src/error/code.rs @@ -26,7 +26,7 @@ use crate::error::constants::{ /// /// # Display /// ```rust -/// use json_rpc::error::ErrorCode; +/// use cuprate_json_rpc::error::ErrorCode; /// use serde_json::{to_value, from_value, Value}; /// /// for e in [ @@ -46,7 +46,7 @@ use crate::error::constants::{ /// # (De)serialization /// This type gets (de)serialized as the associated `i32`, for example: /// ```rust -/// use json_rpc::error::ErrorCode; +/// use cuprate_json_rpc::error::ErrorCode; /// use serde_json::{to_value, from_value, Value}; /// /// for e in [ @@ -69,7 +69,7 @@ use crate::error::constants::{ /// ``` /// /// ```rust,should_panic -/// # use json_rpc::error::ErrorCode; +/// # use cuprate_json_rpc::error::ErrorCode; /// # use serde_json::from_value; /// // A JSON string that contains an integer won't work. /// from_value::<ErrorCode>("-32700".into()).unwrap(); @@ -109,7 +109,7 @@ impl ErrorCode { /// [`From<i32>`] is the same as this function. /// /// ```rust - /// use json_rpc::error::{ + /// use cuprate_json_rpc::error::{ /// ErrorCode, /// INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR, /// }; @@ -147,7 +147,7 @@ impl ErrorCode { /// Returns `self`'s [`i32`] code representation. /// /// ```rust - /// use json_rpc::error::{ + /// use cuprate_json_rpc::error::{ /// ErrorCode, /// INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR, /// }; @@ -174,7 +174,7 @@ impl ErrorCode { /// Returns `self`'s human readable [`str`] message. /// /// ```rust - /// use json_rpc::error::{ + /// use cuprate_json_rpc::error::{ /// ErrorCode, /// INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR, SERVER_ERROR, /// }; diff --git a/rpc/json-rpc/src/error/object.rs b/rpc/json-rpc/src/error/object.rs index 6ad96a7e..dbaaab0b 100644 --- a/rpc/json-rpc/src/error/object.rs +++ b/rpc/json-rpc/src/error/object.rs @@ -22,7 +22,7 @@ use crate::error::{ /// /// # Display /// ```rust -/// use json_rpc::error::ErrorObject; +/// use cuprate_json_rpc::error::ErrorObject; /// /// // The format is `$CODE: $MESSAGE`. /// // If a message was not passed during construction, @@ -69,7 +69,7 @@ impl ErrorObject { /// /// ```rust /// use std::borrow::Cow; - /// use json_rpc::error::{ErrorCode, ErrorObject}; + /// use cuprate_json_rpc::error::{ErrorCode, ErrorObject}; /// /// for code in [ /// ErrorCode::ParseError, @@ -100,7 +100,7 @@ impl ErrorObject { /// /// ```rust /// use std::borrow::Cow; - /// use json_rpc::error::{ErrorCode, ErrorObject}; + /// use cuprate_json_rpc::error::{ErrorCode, ErrorObject}; /// /// let code = ErrorCode::ParseError; /// let object = ErrorObject::parse_error(); @@ -122,7 +122,7 @@ impl ErrorObject { /// /// ```rust /// use std::borrow::Cow; - /// use json_rpc::error::{ErrorCode, ErrorObject}; + /// use cuprate_json_rpc::error::{ErrorCode, ErrorObject}; /// /// let code = ErrorCode::InvalidRequest; /// let object = ErrorObject::invalid_request(); @@ -144,7 +144,7 @@ impl ErrorObject { /// /// ```rust /// use std::borrow::Cow; - /// use json_rpc::error::{ErrorCode, ErrorObject}; + /// use cuprate_json_rpc::error::{ErrorCode, ErrorObject}; /// /// let code = ErrorCode::MethodNotFound; /// let object = ErrorObject::method_not_found(); @@ -166,7 +166,7 @@ impl ErrorObject { /// /// ```rust /// use std::borrow::Cow; - /// use json_rpc::error::{ErrorCode, ErrorObject}; + /// use cuprate_json_rpc::error::{ErrorCode, ErrorObject}; /// /// let code = ErrorCode::InvalidParams; /// let object = ErrorObject::invalid_params(); @@ -189,7 +189,7 @@ impl ErrorObject { /// /// ```rust /// use std::borrow::Cow; - /// use json_rpc::error::{ErrorCode, ErrorObject}; + /// use cuprate_json_rpc::error::{ErrorCode, ErrorObject}; /// /// let code = ErrorCode::InternalError; /// let object = ErrorObject::internal_error(); @@ -213,7 +213,7 @@ impl ErrorObject { /// /// ```rust /// use std::borrow::Cow; - /// use json_rpc::error::{ErrorCode, ErrorObject}; + /// use cuprate_json_rpc::error::{ErrorCode, ErrorObject}; /// /// let code = ErrorCode::ServerError(0); /// let object = ErrorObject::server_error(0); diff --git a/rpc/json-rpc/src/id.rs b/rpc/json-rpc/src/id.rs index 7ee710d5..db894adc 100644 --- a/rpc/json-rpc/src/id.rs +++ b/rpc/json-rpc/src/id.rs @@ -26,7 +26,7 @@ use std::borrow::Cow; /// (or just manually create the `Cow`) for a non-allocating `Id`. /// /// ```rust -/// use json_rpc::Id; +/// use cuprate_json_rpc::Id; /// /// assert_eq!(Id::from(String::new()), Id::Str("".into())); /// assert_eq!(Id::from(Some(String::new())), Id::Str("".into())); @@ -39,7 +39,7 @@ pub enum Id { /// A JSON `null` value. /// /// ```rust - /// use json_rpc::Id; + /// use cuprate_json_rpc::Id; /// use serde_json::{from_value,to_value,json,Value}; /// /// assert_eq!(from_value::<Id>(json!(null)).unwrap(), Id::Null); @@ -61,7 +61,7 @@ pub enum Id { /// /// ```rust /// use std::borrow::Cow; - /// use json_rpc::Id; + /// use cuprate_json_rpc::Id; /// /// /// A program's static ID. /// const ID: &'static str = "my_id"; @@ -79,7 +79,7 @@ impl Id { /// This returns `Some(u64)` if [`Id`] is a number. /// /// ```rust - /// use json_rpc::Id; + /// use cuprate_json_rpc::Id; /// /// assert_eq!(Id::Num(0).as_u64(), Some(0)); /// assert_eq!(Id::Str("0".into()).as_u64(), None); @@ -95,7 +95,7 @@ impl Id { /// This returns `Some(&str)` if [`Id`] is a string. /// /// ```rust - /// use json_rpc::Id; + /// use cuprate_json_rpc::Id; /// /// assert_eq!(Id::Str("0".into()).as_str(), Some("0")); /// assert_eq!(Id::Num(0).as_str(), None); @@ -111,7 +111,7 @@ impl Id { /// Returns `true` if `self` is [`Id::Null`]. /// /// ```rust - /// use json_rpc::Id; + /// use cuprate_json_rpc::Id; /// /// assert!(Id::Null.is_null()); /// assert!(!Id::Num(0).is_null()); @@ -124,7 +124,7 @@ impl Id { /// Create a new [`Id::Str`] from a static string. /// /// ```rust - /// use json_rpc::Id; + /// use cuprate_json_rpc::Id; /// /// assert_eq!(Id::from_static_str("hi"), Id::Str("hi".into())); /// ``` diff --git a/rpc/json-rpc/src/request.rs b/rpc/json-rpc/src/request.rs index d760047a..d1f1e1e4 100644 --- a/rpc/json-rpc/src/request.rs +++ b/rpc/json-rpc/src/request.rs @@ -47,7 +47,7 @@ impl<T> Request<T> { /// Create a new [`Self`] with no [`Id`]. /// /// ```rust - /// use json_rpc::Request; + /// use cuprate_json_rpc::Request; /// /// assert_eq!(Request::new("").id, None); /// ``` @@ -62,7 +62,7 @@ impl<T> Request<T> { /// Create a new [`Self`] with an [`Id`]. /// /// ```rust - /// use json_rpc::{Id, Request}; + /// use cuprate_json_rpc::{Id, Request}; /// /// assert_eq!(Request::new_with_id(Id::Num(0), "").id, Some(Id::Num(0))); /// ``` @@ -79,7 +79,7 @@ impl<T> Request<T> { /// In other words, if `id` is [`None`], this returns `true`. /// /// ```rust - /// use json_rpc::{Id, Request}; + /// use cuprate_json_rpc::{Id, Request}; /// /// assert!(Request::new("").is_notification()); /// assert!(!Request::new_with_id(Id::Null, "").is_notification()); diff --git a/rpc/json-rpc/src/response.rs b/rpc/json-rpc/src/response.rs index b1c9483e..efd768b5 100644 --- a/rpc/json-rpc/src/response.rs +++ b/rpc/json-rpc/src/response.rs @@ -37,7 +37,7 @@ impl<T> Response<T> { /// Creates a successful response. /// /// ```rust - /// use json_rpc::{Id, Response}; + /// use cuprate_json_rpc::{Id, Response}; /// /// let ok = Response::ok(Id::Num(123), "OK"); /// let json = serde_json::to_string(&ok).unwrap(); @@ -54,7 +54,7 @@ impl<T> Response<T> { /// Creates an error response. /// /// ```rust - /// use json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; + /// use cuprate_json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; /// /// let err = ErrorObject { /// code: 0.into(), @@ -77,7 +77,7 @@ impl<T> Response<T> { /// Creates an error response using [`ErrorObject::parse_error`]. /// /// ```rust - /// use json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; + /// use cuprate_json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; /// /// let ok = Response::<()>::parse_error(Id::Num(0)); /// let json = serde_json::to_string(&ok).unwrap(); @@ -94,7 +94,7 @@ impl<T> Response<T> { /// Creates an error response using [`ErrorObject::invalid_request`]. /// /// ```rust - /// use json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; + /// use cuprate_json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; /// /// let ok = Response::<()>::invalid_request(Id::Num(0)); /// let json = serde_json::to_string(&ok).unwrap(); @@ -111,7 +111,7 @@ impl<T> Response<T> { /// Creates an error response using [`ErrorObject::method_not_found`]. /// /// ```rust - /// use json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; + /// use cuprate_json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; /// /// let ok = Response::<()>::method_not_found(Id::Num(0)); /// let json = serde_json::to_string(&ok).unwrap(); @@ -128,7 +128,7 @@ impl<T> Response<T> { /// Creates an error response using [`ErrorObject::invalid_params`]. /// /// ```rust - /// use json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; + /// use cuprate_json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; /// /// let ok = Response::<()>::invalid_params(Id::Num(0)); /// let json = serde_json::to_string(&ok).unwrap(); @@ -145,7 +145,7 @@ impl<T> Response<T> { /// Creates an error response using [`ErrorObject::internal_error`]. /// /// ```rust - /// use json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; + /// use cuprate_json_rpc::{Id, Response, error::{ErrorObject, ErrorCode}}; /// /// let ok = Response::<()>::internal_error(Id::Num(0)); /// let json = serde_json::to_string(&ok).unwrap(); diff --git a/rpc/json-rpc/src/version.rs b/rpc/json-rpc/src/version.rs index 958d2150..30e507a4 100644 --- a/rpc/json-rpc/src/version.rs +++ b/rpc/json-rpc/src/version.rs @@ -26,7 +26,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// /// # Example /// ```rust -/// use json_rpc::Version; +/// use cuprate_json_rpc::Version; /// use serde_json::{to_string, to_string_pretty, from_str}; /// /// assert_eq!(Version::TWO, "2.0"); @@ -64,7 +64,7 @@ impl Version { /// Note that this does not have extra quotes to mark /// that it's a JSON string and not a float. /// ```rust - /// use json_rpc::Version; + /// use cuprate_json_rpc::Version; /// /// let string = format!("{}", Version); /// assert_eq!(string, "2.0"); diff --git a/rpc/cuprate-rpc-interface/Cargo.toml b/rpc/rpc-interface/Cargo.toml similarity index 100% rename from rpc/cuprate-rpc-interface/Cargo.toml rename to rpc/rpc-interface/Cargo.toml diff --git a/rpc/cuprate-rpc-interface/src/lib.rs b/rpc/rpc-interface/src/lib.rs similarity index 100% rename from rpc/cuprate-rpc-interface/src/lib.rs rename to rpc/rpc-interface/src/lib.rs diff --git a/rpc/monero-rpc-types/Cargo.toml b/rpc/rpc-types/Cargo.toml similarity index 90% rename from rpc/monero-rpc-types/Cargo.toml rename to rpc/rpc-types/Cargo.toml index a32eedb6..e299becc 100644 --- a/rpc/monero-rpc-types/Cargo.toml +++ b/rpc/rpc-types/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "monero-rpc-types" +name = "cuprate-rpc-types" version = "0.0.0" edition = "2021" description = "Monero RPC types" diff --git a/rpc/monero-rpc-types/src/lib.rs b/rpc/rpc-types/src/lib.rs similarity index 100% rename from rpc/monero-rpc-types/src/lib.rs rename to rpc/rpc-types/src/lib.rs diff --git a/storage/cuprate-blockchain/Cargo.toml b/storage/blockchain/Cargo.toml similarity index 98% rename from storage/cuprate-blockchain/Cargo.toml rename to storage/blockchain/Cargo.toml index e5b6bf67..eb5a27ea 100644 --- a/storage/cuprate-blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -28,7 +28,7 @@ cfg-if = { workspace = true } cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } cuprate-types = { path = "../../types", features = ["blockchain"] } curve25519-dalek = { workspace = true } -monero-pruning = { path = "../../pruning" } +cuprate-pruning = { path = "../../pruning" } monero-serai = { workspace = true, features = ["std"] } paste = { workspace = true } page_size = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size. diff --git a/storage/cuprate-blockchain/README.md b/storage/blockchain/README.md similarity index 100% rename from storage/cuprate-blockchain/README.md rename to storage/blockchain/README.md diff --git a/storage/cuprate-blockchain/src/backend/heed/database.rs b/storage/blockchain/src/backend/heed/database.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/heed/database.rs rename to storage/blockchain/src/backend/heed/database.rs diff --git a/storage/cuprate-blockchain/src/backend/heed/env.rs b/storage/blockchain/src/backend/heed/env.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/heed/env.rs rename to storage/blockchain/src/backend/heed/env.rs diff --git a/storage/cuprate-blockchain/src/backend/heed/error.rs b/storage/blockchain/src/backend/heed/error.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/heed/error.rs rename to storage/blockchain/src/backend/heed/error.rs diff --git a/storage/cuprate-blockchain/src/backend/heed/mod.rs b/storage/blockchain/src/backend/heed/mod.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/heed/mod.rs rename to storage/blockchain/src/backend/heed/mod.rs diff --git a/storage/cuprate-blockchain/src/backend/heed/storable.rs b/storage/blockchain/src/backend/heed/storable.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/heed/storable.rs rename to storage/blockchain/src/backend/heed/storable.rs diff --git a/storage/cuprate-blockchain/src/backend/heed/transaction.rs b/storage/blockchain/src/backend/heed/transaction.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/heed/transaction.rs rename to storage/blockchain/src/backend/heed/transaction.rs diff --git a/storage/cuprate-blockchain/src/backend/heed/types.rs b/storage/blockchain/src/backend/heed/types.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/heed/types.rs rename to storage/blockchain/src/backend/heed/types.rs diff --git a/storage/cuprate-blockchain/src/backend/mod.rs b/storage/blockchain/src/backend/mod.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/mod.rs rename to storage/blockchain/src/backend/mod.rs diff --git a/storage/cuprate-blockchain/src/backend/redb/database.rs b/storage/blockchain/src/backend/redb/database.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/redb/database.rs rename to storage/blockchain/src/backend/redb/database.rs diff --git a/storage/cuprate-blockchain/src/backend/redb/env.rs b/storage/blockchain/src/backend/redb/env.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/redb/env.rs rename to storage/blockchain/src/backend/redb/env.rs diff --git a/storage/cuprate-blockchain/src/backend/redb/error.rs b/storage/blockchain/src/backend/redb/error.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/redb/error.rs rename to storage/blockchain/src/backend/redb/error.rs diff --git a/storage/cuprate-blockchain/src/backend/redb/mod.rs b/storage/blockchain/src/backend/redb/mod.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/redb/mod.rs rename to storage/blockchain/src/backend/redb/mod.rs diff --git a/storage/cuprate-blockchain/src/backend/redb/storable.rs b/storage/blockchain/src/backend/redb/storable.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/redb/storable.rs rename to storage/blockchain/src/backend/redb/storable.rs diff --git a/storage/cuprate-blockchain/src/backend/redb/transaction.rs b/storage/blockchain/src/backend/redb/transaction.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/redb/transaction.rs rename to storage/blockchain/src/backend/redb/transaction.rs diff --git a/storage/cuprate-blockchain/src/backend/redb/types.rs b/storage/blockchain/src/backend/redb/types.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/redb/types.rs rename to storage/blockchain/src/backend/redb/types.rs diff --git a/storage/cuprate-blockchain/src/backend/tests.rs b/storage/blockchain/src/backend/tests.rs similarity index 100% rename from storage/cuprate-blockchain/src/backend/tests.rs rename to storage/blockchain/src/backend/tests.rs diff --git a/storage/cuprate-blockchain/src/config/backend.rs b/storage/blockchain/src/config/backend.rs similarity index 100% rename from storage/cuprate-blockchain/src/config/backend.rs rename to storage/blockchain/src/config/backend.rs diff --git a/storage/cuprate-blockchain/src/config/config.rs b/storage/blockchain/src/config/config.rs similarity index 100% rename from storage/cuprate-blockchain/src/config/config.rs rename to storage/blockchain/src/config/config.rs diff --git a/storage/cuprate-blockchain/src/config/mod.rs b/storage/blockchain/src/config/mod.rs similarity index 100% rename from storage/cuprate-blockchain/src/config/mod.rs rename to storage/blockchain/src/config/mod.rs diff --git a/storage/cuprate-blockchain/src/config/reader_threads.rs b/storage/blockchain/src/config/reader_threads.rs similarity index 100% rename from storage/cuprate-blockchain/src/config/reader_threads.rs rename to storage/blockchain/src/config/reader_threads.rs diff --git a/storage/cuprate-blockchain/src/config/sync_mode.rs b/storage/blockchain/src/config/sync_mode.rs similarity index 100% rename from storage/cuprate-blockchain/src/config/sync_mode.rs rename to storage/blockchain/src/config/sync_mode.rs diff --git a/storage/cuprate-blockchain/src/constants.rs b/storage/blockchain/src/constants.rs similarity index 100% rename from storage/cuprate-blockchain/src/constants.rs rename to storage/blockchain/src/constants.rs diff --git a/storage/cuprate-blockchain/src/database.rs b/storage/blockchain/src/database.rs similarity index 100% rename from storage/cuprate-blockchain/src/database.rs rename to storage/blockchain/src/database.rs diff --git a/storage/cuprate-blockchain/src/env.rs b/storage/blockchain/src/env.rs similarity index 100% rename from storage/cuprate-blockchain/src/env.rs rename to storage/blockchain/src/env.rs diff --git a/storage/cuprate-blockchain/src/error.rs b/storage/blockchain/src/error.rs similarity index 100% rename from storage/cuprate-blockchain/src/error.rs rename to storage/blockchain/src/error.rs diff --git a/storage/cuprate-blockchain/src/free.rs b/storage/blockchain/src/free.rs similarity index 100% rename from storage/cuprate-blockchain/src/free.rs rename to storage/blockchain/src/free.rs diff --git a/storage/cuprate-blockchain/src/key.rs b/storage/blockchain/src/key.rs similarity index 100% rename from storage/cuprate-blockchain/src/key.rs rename to storage/blockchain/src/key.rs diff --git a/storage/cuprate-blockchain/src/lib.rs b/storage/blockchain/src/lib.rs similarity index 100% rename from storage/cuprate-blockchain/src/lib.rs rename to storage/blockchain/src/lib.rs diff --git a/storage/cuprate-blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs similarity index 100% rename from storage/cuprate-blockchain/src/ops/block.rs rename to storage/blockchain/src/ops/block.rs diff --git a/storage/cuprate-blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs similarity index 100% rename from storage/cuprate-blockchain/src/ops/blockchain.rs rename to storage/blockchain/src/ops/blockchain.rs diff --git a/storage/cuprate-blockchain/src/ops/key_image.rs b/storage/blockchain/src/ops/key_image.rs similarity index 100% rename from storage/cuprate-blockchain/src/ops/key_image.rs rename to storage/blockchain/src/ops/key_image.rs diff --git a/storage/cuprate-blockchain/src/ops/macros.rs b/storage/blockchain/src/ops/macros.rs similarity index 100% rename from storage/cuprate-blockchain/src/ops/macros.rs rename to storage/blockchain/src/ops/macros.rs diff --git a/storage/cuprate-blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs similarity index 100% rename from storage/cuprate-blockchain/src/ops/mod.rs rename to storage/blockchain/src/ops/mod.rs diff --git a/storage/cuprate-blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs similarity index 100% rename from storage/cuprate-blockchain/src/ops/output.rs rename to storage/blockchain/src/ops/output.rs diff --git a/storage/cuprate-blockchain/src/ops/property.rs b/storage/blockchain/src/ops/property.rs similarity index 97% rename from storage/cuprate-blockchain/src/ops/property.rs rename to storage/blockchain/src/ops/property.rs index 2e584d87..15b5f878 100644 --- a/storage/cuprate-blockchain/src/ops/property.rs +++ b/storage/blockchain/src/ops/property.rs @@ -3,7 +3,7 @@ //! SOMEDAY: the database `properties` table is not yet implemented. //---------------------------------------------------------------------------------------------------- Import -use monero_pruning::PruningSeed; +use cuprate_pruning::PruningSeed; use crate::{error::RuntimeError, ops::macros::doc_error}; //---------------------------------------------------------------------------------------------------- Free Functions diff --git a/storage/cuprate-blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs similarity index 100% rename from storage/cuprate-blockchain/src/ops/tx.rs rename to storage/blockchain/src/ops/tx.rs diff --git a/storage/cuprate-blockchain/src/resize.rs b/storage/blockchain/src/resize.rs similarity index 100% rename from storage/cuprate-blockchain/src/resize.rs rename to storage/blockchain/src/resize.rs diff --git a/storage/cuprate-blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs similarity index 100% rename from storage/cuprate-blockchain/src/service/free.rs rename to storage/blockchain/src/service/free.rs diff --git a/storage/cuprate-blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs similarity index 100% rename from storage/cuprate-blockchain/src/service/mod.rs rename to storage/blockchain/src/service/mod.rs diff --git a/storage/cuprate-blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs similarity index 100% rename from storage/cuprate-blockchain/src/service/read.rs rename to storage/blockchain/src/service/read.rs diff --git a/storage/cuprate-blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs similarity index 100% rename from storage/cuprate-blockchain/src/service/tests.rs rename to storage/blockchain/src/service/tests.rs diff --git a/storage/cuprate-blockchain/src/service/types.rs b/storage/blockchain/src/service/types.rs similarity index 100% rename from storage/cuprate-blockchain/src/service/types.rs rename to storage/blockchain/src/service/types.rs diff --git a/storage/cuprate-blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs similarity index 100% rename from storage/cuprate-blockchain/src/service/write.rs rename to storage/blockchain/src/service/write.rs diff --git a/storage/cuprate-blockchain/src/storable.rs b/storage/blockchain/src/storable.rs similarity index 100% rename from storage/cuprate-blockchain/src/storable.rs rename to storage/blockchain/src/storable.rs diff --git a/storage/cuprate-blockchain/src/table.rs b/storage/blockchain/src/table.rs similarity index 100% rename from storage/cuprate-blockchain/src/table.rs rename to storage/blockchain/src/table.rs diff --git a/storage/cuprate-blockchain/src/tables.rs b/storage/blockchain/src/tables.rs similarity index 100% rename from storage/cuprate-blockchain/src/tables.rs rename to storage/blockchain/src/tables.rs diff --git a/storage/cuprate-blockchain/src/tests.rs b/storage/blockchain/src/tests.rs similarity index 100% rename from storage/cuprate-blockchain/src/tests.rs rename to storage/blockchain/src/tests.rs diff --git a/storage/cuprate-blockchain/src/transaction.rs b/storage/blockchain/src/transaction.rs similarity index 100% rename from storage/cuprate-blockchain/src/transaction.rs rename to storage/blockchain/src/transaction.rs diff --git a/storage/cuprate-blockchain/src/types.rs b/storage/blockchain/src/types.rs similarity index 100% rename from storage/cuprate-blockchain/src/types.rs rename to storage/blockchain/src/types.rs diff --git a/storage/cuprate-blockchain/src/unsafe_sendable.rs b/storage/blockchain/src/unsafe_sendable.rs similarity index 100% rename from storage/cuprate-blockchain/src/unsafe_sendable.rs rename to storage/blockchain/src/unsafe_sendable.rs diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index 50bf0f7e..a0a46384 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "database" +name = "cuprate-database" version = "0.0.0" edition = "2021" description = "Cuprate's database abstraction" diff --git a/storage/cuprate-txpool/Cargo.toml b/storage/txpool/Cargo.toml similarity index 100% rename from storage/cuprate-txpool/Cargo.toml rename to storage/txpool/Cargo.toml diff --git a/storage/cuprate-txpool/src/lib.rs b/storage/txpool/src/lib.rs similarity index 100% rename from storage/cuprate-txpool/src/lib.rs rename to storage/txpool/src/lib.rs diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 82f7e523..f9a5c6d9 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -8,8 +8,8 @@ authors = ["Boog900", "hinto-janai"] [dependencies] cuprate-types = { path = "../types" } cuprate-helper = { path = "../helper", features = ["map"] } -monero-wire = { path = "../net/monero-wire" } -monero-p2p = { path = "../p2p/monero-p2p", features = ["borsh"] } +cuprate-wire = { path = "../net/wire" } +cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } hex = { workspace = true } hex-literal = { workspace = true } diff --git a/test-utils/src/test_netzone.rs b/test-utils/src/test_netzone.rs index e82e5532..f1f75826 100644 --- a/test-utils/src/test_netzone.rs +++ b/test-utils/src/test_netzone.rs @@ -15,12 +15,12 @@ use futures::Stream; use tokio::io::{DuplexStream, ReadHalf, WriteHalf}; use tokio_util::codec::{FramedRead, FramedWrite}; -use monero_wire::{ +use cuprate_wire::{ network_address::{NetworkAddress, NetworkAddressIncorrectZone}, MoneroWireCodec, }; -use monero_p2p::{NetZoneAddress, NetworkZone}; +use cuprate_p2p_core::{NetZoneAddress, NetworkZone}; /// An address on the test network #[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, BorshSerialize, BorshDeserialize)] From 5c08d1a0e2691abe16b362b23dd4bc69cb9ad393 Mon Sep 17 00:00:00 2001 From: Boog900 <boog900@tutanota.com> Date: Tue, 25 Jun 2024 00:55:04 +0000 Subject: [PATCH 08/11] Consensus: fix Rx VM initialization (#190) * fix Rx VM initialization * fix imports * Apply suggestions from code review Co-authored-by: hinto-janai <hinto.janai@protonmail.com> * use checked_sub --------- Co-authored-by: hinto-janai <hinto.janai@protonmail.com> --- consensus/src/block.rs | 53 +++++++++++++-- consensus/src/context.rs | 24 ++----- consensus/src/context/rx_vms.rs | 95 ++++++++++++++------------- consensus/src/context/task.rs | 14 ++-- consensus/src/lib.rs | 3 + consensus/src/tests/context/rx_vms.rs | 4 +- 6 files changed, 113 insertions(+), 80 deletions(-) diff --git a/consensus/src/block.rs b/consensus/src/block.rs index 9fb46b95..d3d06722 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -21,6 +21,7 @@ use cuprate_consensus_rules::{ calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height, randomx_seed_height, BlockError, RandomX, }, + hard_forks::HardForkError, miner_tx::MinerTxError, ConsensusError, HardFork, }; @@ -327,6 +328,14 @@ where }) .await?; + let Some(last_block) = blocks.last() else { + return Err(ExtendedConsensusError::NoBlocksToVerify); + }; + + // hard-forks cannot be reversed, so the last block will contain the highest hard fork (provided the + // batch is valid). + let top_hf_in_batch = last_block.hf_version; + // A Vec of (timestamp, HF) for each block to calculate the expected difficulty for each block. let mut timestamps_hfs = Vec::with_capacity(blocks.len()); let mut new_rx_vm = None; @@ -338,6 +347,13 @@ where let block_0 = &window[0]; let block_1 = &window[1]; + // Make sure no blocks in the batch have a higher hard fork than the last block. + if block_0.hf_version > top_hf_in_batch { + Err(ConsensusError::Block(BlockError::HardForkError( + HardForkError::VersionIncorrect, + )))?; + } + if block_0.block_hash != block_1.block.header.previous || block_0.height != block_1.height - 1 { @@ -346,7 +362,7 @@ where } // Cache any potential RX VM seeds as we may need them for future blocks in the batch. - if is_randomx_seed_height(block_0.height) { + if is_randomx_seed_height(block_0.height) && top_hf_in_batch >= HardFork::V12 { new_rx_vm = Some((block_0.height, block_0.block_hash)); } @@ -395,7 +411,20 @@ where Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; } - let mut rx_vms = context.rx_vms; + let mut rx_vms = if top_hf_in_batch < HardFork::V12 { + HashMap::new() + } else { + let BlockChainContextResponse::RxVms(rx_vms) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::GetCurrentRxVm) + .await? + else { + panic!("Blockchain context service returned wrong response!"); + }; + + rx_vms + }; // If we have a RX seed in the batch calculate it. if let Some((new_vm_height, new_vm_seed)) = new_rx_vm { @@ -407,9 +436,7 @@ where .await; context_svc - .ready() - .await? - .call(BlockChainContextRequest::NewRXVM(( + .oneshot(BlockChainContextRequest::NewRXVM(( new_vm_seed, new_vm.clone(), ))) @@ -501,7 +528,21 @@ where // Set up the block and just pass it to [`verify_prepped_main_chain_block`] - let rx_vms = context.rx_vms.clone(); + // We just use the raw `major_version` here, no need to turn it into a `HardFork`. + let rx_vms = if block.header.major_version < 12 { + HashMap::new() + } else { + let BlockChainContextResponse::RxVms(rx_vms) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::GetCurrentRxVm) + .await? + else { + panic!("Blockchain context service returned wrong response!"); + }; + + rx_vms + }; let height = context.chain_height; let prepped_block = rayon_spawn_async(move || { diff --git a/consensus/src/context.rs b/consensus/src/context.rs index 70db6337..0752b8bf 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -85,20 +85,7 @@ impl ContextConfig { pub async fn initialize_blockchain_context<D>( cfg: ContextConfig, database: D, -) -> Result< - impl Service< - BlockChainContextRequest, - Response = BlockChainContextResponse, - Error = tower::BoxError, - Future = impl Future<Output = Result<BlockChainContextResponse, tower::BoxError>> - + Send - + 'static, - > + Clone - + Send - + Sync - + 'static, - ExtendedConsensusError, -> +) -> Result<BlockChainContextService, ExtendedConsensusError> where D: Database + Clone + Send + Sync + 'static, D::Future: Send + 'static, @@ -121,9 +108,6 @@ where pub struct RawBlockChainContext { /// The current cumulative difficulty. pub cumulative_difficulty: u128, - /// RandomX VMs, this maps seeds height to VM. Will definitely contain the VM required to calculate the current blocks - /// POW hash (if a RX VM is required), may contain more. - pub rx_vms: HashMap<u64, Arc<RandomXVM>>, /// Context to verify a block, as needed by [`cuprate-consensus-rules`] pub context_to_verify_block: ContextToVerifyBlock, /// The median long term block weight. @@ -162,7 +146,7 @@ impl RawBlockChainContext { } } - /// Returns the next blocks long term weight from it's block weight. + /// Returns the next blocks long term weight from its block weight. pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize { weight::calculate_block_long_term_weight( &self.current_hf, @@ -232,6 +216,8 @@ pub struct NewBlockData { pub enum BlockChainContextRequest { /// Get the current blockchain context. GetContext, + /// Gets the current RandomX VM. + GetCurrentRxVm, /// Get the next difficulties for these blocks. /// /// Inputs: a list of block timestamps and hfs @@ -252,6 +238,8 @@ pub enum BlockChainContextRequest { pub enum BlockChainContextResponse { /// Blockchain context response. Context(BlockChainContext), + /// A map of seed height to RandomX VMs. + RxVms(HashMap<u64, Arc<RandomXVM>>), /// A list of difficulties. BatchDifficulties(Vec<u128>), /// Ok response. diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index 87e1de6f..08ecb957 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -125,64 +125,69 @@ impl RandomXVMCache { } /// Get the RandomX VMs. - pub fn get_vms(&self) -> HashMap<u64, Arc<RandomXVM>> { + pub async fn get_vms(&mut self) -> HashMap<u64, Arc<RandomXVM>> { + match self.seeds.len().checked_sub(self.vms.len()) { + // No difference in the amount of seeds to VMs. + Some(0) => (), + // One more seed than VM. + Some(1) => { + let (seed_height, next_seed_hash) = *self.seeds.front().unwrap(); + + let new_vm = 'new_vm_block: { + tracing::debug!( + "Initializing RandomX VM for seed: {}", + hex::encode(next_seed_hash) + ); + + // Check if we have been given the RX VM from another part of Cuprate. + if let Some((cached_hash, cached_vm)) = self.cached_vm.take() { + if cached_hash == next_seed_hash { + tracing::debug!("VM was already created."); + break 'new_vm_block cached_vm; + } + }; + + rayon_spawn_async(move || Arc::new(RandomXVM::new(&next_seed_hash).unwrap())) + .await + }; + + self.vms.insert(seed_height, new_vm); + } + // More than one more seed than VM. + _ => { + // this will only happen when syncing and rx activates. + tracing::debug!("RandomX has activated, initialising VMs"); + + let seeds_clone = self.seeds.clone(); + self.vms = rayon_spawn_async(move || { + seeds_clone + .par_iter() + .map(|(height, seed)| { + let vm = RandomXVM::new(seed).expect("Failed to create RandomX VM!"); + let vm = Arc::new(vm); + (*height, vm) + }) + .collect() + }) + .await + } + } + self.vms.clone() } /// Add a new block to the VM cache. /// /// hash is the block hash not the blocks PoW hash. - pub async fn new_block(&mut self, height: u64, hash: &[u8; 32], hf: &HardFork) { - let should_make_vms = hf >= &HardFork::V12; - if should_make_vms && self.vms.len() != self.seeds.len() { - // this will only happen when syncing and rx activates. - tracing::debug!("RandomX has activated, initialising VMs"); - - let seeds_clone = self.seeds.clone(); - self.vms = rayon_spawn_async(move || { - seeds_clone - .par_iter() - .map(|(height, seed)| { - ( - *height, - Arc::new(RandomXVM::new(seed).expect("Failed to create RandomX VM!")), - ) - }) - .collect() - }) - .await - } - + pub fn new_block(&mut self, height: u64, hash: &[u8; 32]) { if is_randomx_seed_height(height) { tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",); self.seeds.push_front((height, *hash)); - if should_make_vms { - let new_vm = 'new_vm_block: { - tracing::debug!( - "Past hard-fork 12 initializing VM for seed: {}", - hex::encode(hash) - ); - - // Check if we have been given the RX VM from another part of Cuprate. - if let Some((cached_hash, cached_vm)) = self.cached_vm.take() { - if &cached_hash == hash { - tracing::debug!("VM was already created."); - break 'new_vm_block cached_vm; - } - }; - - let hash_clone = *hash; - rayon_spawn_async(move || Arc::new(RandomXVM::new(&hash_clone).unwrap())).await - }; - - self.vms.insert(height, new_vm); - } - if self.seeds.len() > RX_SEEDS_CACHED { self.seeds.pop_back(); - // TODO: This is really not efficient but the amount of VMs cached is not a lot. + // HACK: This is really inefficient but the amount of VMs cached is not a lot. self.vms.retain(|height, _| { self.seeds .iter() diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 90e1de69..108922d7 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -158,13 +158,15 @@ impl ContextTask { next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf), already_generated_coins: self.already_generated_coins, }, - rx_vms: self.rx_vm_cache.get_vms(), cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(), median_long_term_weight: self.weight_cache.median_long_term_weight(), top_block_timestamp: self.difficulty_cache.top_block_timestamp(), }, }) } + BlockChainContextRequest::GetCurrentRxVm => { + BlockChainContextResponse::RxVms(self.rx_vm_cache.get_vms().await) + } BlockChainContextRequest::BatchGetDifficulties(blocks) => { tracing::debug!("Getting batch difficulties len: {}", blocks.len() + 1); @@ -199,15 +201,7 @@ impl ContextTask { self.hardfork_state.new_block(new.vote, new.height); - self.rx_vm_cache - .new_block( - new.height, - &new.block_hash, - // We use the current hf and not the hf of the top block as when syncing we need to generate VMs - // on the switch to RX not after it. - &self.hardfork_state.current_hardfork(), - ) - .await; + self.rx_vm_cache.new_block(new.height, &new.block_hash); self.chain_height = new.height + 1; self.top_block_hash = new.block_hash; diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 5b38983a..1edafdce 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -44,6 +44,9 @@ pub enum ExtendedConsensusError { /// One or more statements in the batch verifier was invalid. #[error("One or more statements in the batch verifier was invalid.")] OneOrMoreBatchVerificationStatementsInvalid, + /// A request to verify a batch of blocks had no blocks in the batch. + #[error("A request to verify a batch of blocks had no blocks in the batch.")] + NoBlocksToVerify, } /// Initialize the 2 verifier [`tower::Service`]s (block and transaction). diff --git a/consensus/src/tests/context/rx_vms.rs b/consensus/src/tests/context/rx_vms.rs index 1d064049..f18a9b59 100644 --- a/consensus/src/tests/context/rx_vms.rs +++ b/consensus/src/tests/context/rx_vms.rs @@ -47,7 +47,9 @@ async fn rx_vm_created_on_hf_12() { .unwrap(); assert!(cache.vms.is_empty()); - cache.new_block(11, &[30; 32], &HardFork::V12).await; + cache.new_block(11, &[30; 32]); + cache.get_vms().await; + assert!(!cache.vms.is_empty()); } From e405786a73bd9557a43208af016e03e3172cbc07 Mon Sep 17 00:00:00 2001 From: hinto-janai <hinto.janai@protonmail.com> Date: Wed, 26 Jun 2024 17:24:05 -0400 Subject: [PATCH 09/11] rpc: start `cuprate-rpc-types` (#147) * rpc: add `monero-rpc-types` * lib.rs: add lints * add base files, deps * fix macro generation, doc test * add `strum`, add `misc` module * document struct generation macro * add `GetHeight` * lib.rs: create re-export macro * macro changes, add few more types * docs * `monero-rpc-types` -> `cuprate-rpc-types` * fix modules * specify commit in macro, add () type aliases * macro docs, fixes * add `Status::Other(String)` * add TODO for `strum` * Update rpc/types/Cargo.toml Co-authored-by: Boog900 <boog900@tutanota.com> * add `BinaryString` * add `ResponseBase` * add `CORE_RPC_*` constants * fix status; use `CORE_RPC_*` constants * cargo.toml: add `epee_encoding` * rpc: add epee_encoding impl for `Status` * macro: add epee_encoding for every type * remove `strum` * add response bases * add `CORE_RPC_STATUS_UNKNOWN` * add response/request bases for epee * create `base` module * use different type for macro example * move base / root types around * docs, status serde test * status: use `Status::Unknown` for `epee_default_value` * json: add missing fields to `GetBlockTemplateRequest` not sure I missed these https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L947-L950 --------- Co-authored-by: Boog900 <boog900@tutanota.com> --- Cargo.lock | 7 + Cargo.toml | 4 +- rpc/{rpc-interface => interface}/Cargo.toml | 0 rpc/{rpc-interface => interface}/src/lib.rs | 0 rpc/rpc-types/src/lib.rs | 1 - rpc/{rpc-types => types}/Cargo.toml | 11 +- rpc/types/README.md | 62 +++++ rpc/types/src/base.rs | 125 +++++++++ rpc/types/src/bin.rs | 11 + rpc/types/src/binary_string.rs | 29 ++ rpc/types/src/constants.rs | 65 +++++ rpc/types/src/json.rs | 129 +++++++++ rpc/types/src/lib.rs | 116 ++++++++ rpc/types/src/macros.rs | 277 ++++++++++++++++++++ rpc/types/src/other.rs | 21 ++ rpc/types/src/status.rs | 182 +++++++++++++ 16 files changed, 1035 insertions(+), 5 deletions(-) rename rpc/{rpc-interface => interface}/Cargo.toml (100%) rename rpc/{rpc-interface => interface}/src/lib.rs (100%) delete mode 100644 rpc/rpc-types/src/lib.rs rename rpc/{rpc-types => types}/Cargo.toml (50%) create mode 100644 rpc/types/README.md create mode 100644 rpc/types/src/base.rs create mode 100644 rpc/types/src/bin.rs create mode 100644 rpc/types/src/binary_string.rs create mode 100644 rpc/types/src/constants.rs create mode 100644 rpc/types/src/json.rs create mode 100644 rpc/types/src/lib.rs create mode 100644 rpc/types/src/macros.rs create mode 100644 rpc/types/src/other.rs create mode 100644 rpc/types/src/status.rs diff --git a/Cargo.lock b/Cargo.lock index 07997288..9380e21d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -745,6 +745,13 @@ version = "0.0.0" [[package]] name = "cuprate-rpc-types" version = "0.0.0" +dependencies = [ + "cuprate-epee-encoding", + "monero-serai", + "paste", + "serde", + "serde_json", +] [[package]] name = "cuprate-test-utils" diff --git a/Cargo.toml b/Cargo.toml index 8891b83b..35aabc5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,8 +23,8 @@ members = [ "test-utils", "types", "rpc/json-rpc", - "rpc/rpc-types", - "rpc/rpc-interface", + "rpc/types", + "rpc/interface", ] [profile.release] diff --git a/rpc/rpc-interface/Cargo.toml b/rpc/interface/Cargo.toml similarity index 100% rename from rpc/rpc-interface/Cargo.toml rename to rpc/interface/Cargo.toml diff --git a/rpc/rpc-interface/src/lib.rs b/rpc/interface/src/lib.rs similarity index 100% rename from rpc/rpc-interface/src/lib.rs rename to rpc/interface/src/lib.rs diff --git a/rpc/rpc-types/src/lib.rs b/rpc/rpc-types/src/lib.rs deleted file mode 100644 index 8b137891..00000000 --- a/rpc/rpc-types/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/rpc/rpc-types/Cargo.toml b/rpc/types/Cargo.toml similarity index 50% rename from rpc/rpc-types/Cargo.toml rename to rpc/types/Cargo.toml index e299becc..30e4aa95 100644 --- a/rpc/rpc-types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -5,11 +5,18 @@ edition = "2021" description = "Monero RPC types" license = "MIT" authors = ["hinto-janai"] -repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/monero-rpc-types" -keywords = ["monero", "rpc", "types"] +repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/types" +keywords = ["cuprate", "rpc", "types", "monero"] [features] +default = [] [dependencies] +cuprate-epee-encoding = { path = "../../net/epee-encoding" } + +monero-serai = { workspace = true } +paste = { workspace = true } +serde = { workspace = true } [dev-dependencies] +serde_json = { workspace = true } diff --git a/rpc/types/README.md b/rpc/types/README.md new file mode 100644 index 00000000..65b6d907 --- /dev/null +++ b/rpc/types/README.md @@ -0,0 +1,62 @@ +Monero RPC types. + +# What +This crate ports the types used in Monero's RPC interface, including: +- JSON types +- Binary (epee) types +- Mixed types +- Other commonly used RPC types + +# Modules +This crate's types are split in the following manner: + +This crate has 4 modules: +- The root module; `cuprate_rpc_types` +- [`json`] module; JSON types from the `/json_rpc` endpoint +- [`bin`] module; Binary types from the binary endpoints +- [`other`] module; Misc JSON types from other endpoints + +Miscellaneous types are found in the root module, e.g. [`crate::Status`]. + +Each type in `{json,bin,other}` come in pairs and have identical names, but are suffixed with either `Request` or `Response`. e.g. [`GetBlockCountRequest`](crate::json::GetBlockCountRequest) & [`GetBlockCountResponse`](crate::json::GetBlockCountResponse). + +# Documentation +The documentation for types within `{json,bin,other}` are omitted, as they can be found in [Monero's RPC documentation](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html). + +However, each type will document: +- **Definition**: the exact type definition location in `monerod` +- **Documentation**: the Monero RPC documentation link +- **Request/response**: the other side of this type, either the request or response + +# Naming +The naming for types within `{json,bin,other}` follow the following scheme: +- Convert the endpoint or method name into `UpperCamelCase` +- Remove any suffix extension + +For example: + +| Endpoint/method | Crate location and name | +|-----------------|-------------------------| +| [`get_block_count`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_block_count) | [`json::GetBlockCountRequest`] & [`json::GetBlockCountResponse`] +| [`/get_blocks.bin`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_blockbin) | `bin::GetBlocksRequest` & `bin::GetBlocksResponse` +| [`/get_height`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_height) | `other::GetHeightRequest` & `other::GetHeightResponse` + +TODO: fix doc links when types are ready. + +# Mixed types +Note that some types within [`other`] mix JSON & binary together, i.e., +the message overall is JSON, however some fields contain binary +values inside JSON strings, for example: + +```json +{ + "string": "", + "float": 30.0, + "integer": 30, + "binary": "<serialized binary>" +} +``` + +`binary` here is (de)serialized as a normal [`String`]. In order to be clear on which fields contain binary data, the struct fields that have them will use [`crate::BinaryString`] instead of [`String`]. + +TODO: list the specific types. \ No newline at end of file diff --git a/rpc/types/src/base.rs b/rpc/types/src/base.rs new file mode 100644 index 00000000..6a293678 --- /dev/null +++ b/rpc/types/src/base.rs @@ -0,0 +1,125 @@ +//! The base data that appear in many RPC request/responses. +//! +//! These are the common "headers" or "base" types that are +//! [`flattened`](https://serde.rs/field-attrs.html#flatten) +//! into many of Monero's RPC types. +//! +//! The `Access*` structs (e.g. [`AccessResponseBase`] +//! are pseudo-deprecated structs for the RPC payment system, see: +//! +//! - <https://github.com/monero-project/monero/commit/2899379791b7542e4eb920b5d9d58cf232806937> +//! - <https://github.com/monero-project/monero/issues/8722> +//! - <https://github.com/monero-project/monero/pull/8843> + +//---------------------------------------------------------------------------------------------------- Import +use serde::{Deserialize, Serialize}; + +use cuprate_epee_encoding::epee_object; + +use crate::Status; + +//---------------------------------------------------------------------------------------------------- Macro +/// Link the original `monerod` definition for RPC base types. +macro_rules! monero_rpc_base_link { + ($start:literal..=$end:literal) => { + concat!( + "[Definition](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L", + stringify!($start), + "-L", + stringify!($end), + ")." + ) + }; +} + +//---------------------------------------------------------------------------------------------------- Requests +/// The most common base for responses (nothing). +/// +#[doc = monero_rpc_base_link!(95..=99)] +#[derive( + Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, +)] +pub struct EmptyRequestBase; + +cuprate_epee_encoding::epee_object! { + EmptyRequestBase, +} + +/// A base for RPC request types that support RPC payment. +/// +#[doc = monero_rpc_base_link!(114..=122)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct AccessRequestBase { + /// The RPC payment client. + pub client: String, +} + +cuprate_epee_encoding::epee_object! { + AccessRequestBase, + client: String, +} + +//---------------------------------------------------------------------------------------------------- Responses +/// An empty response base. +/// +/// This is for response types that do not contain +/// any extra fields, e.g. TODO. +// [`CalcPowResponse`](crate::json::CalcPowResponse). +#[derive( + Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, +)] +pub struct EmptyResponseBase; + +cuprate_epee_encoding::epee_object! { + EmptyResponseBase, +} + +/// The most common base for responses. +/// +#[doc = monero_rpc_base_link!(101..=112)] +#[derive( + Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, +)] +pub struct ResponseBase { + /// General RPC error code. [`Status::Ok`] means everything looks good. + pub status: Status, + /// States if the result is obtained using the bootstrap mode, + /// and is therefore not trusted (`true`), or when the daemon + /// is fully synced and thus handles the RPC locally (`false`). + pub untrusted: bool, +} + +epee_object! { + ResponseBase, + status: Status, + untrusted: bool, +} + +/// A base for RPC response types that support RPC payment. +/// +#[doc = monero_rpc_base_link!(124..=136)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct AccessResponseBase { + /// A flattened [`ResponseBase`]. + #[serde(flatten)] + pub response_base: ResponseBase, + /// If payment for RPC is enabled, the number of credits + /// available to the requesting client. Otherwise, `0`. + pub credits: u64, + /// If payment for RPC is enabled, the hash of the + /// highest block in the chain. Otherwise, empty. + pub top_hash: String, +} + +epee_object! { + AccessResponseBase, + credits: u64, + top_hash: String, + !flatten: response_base: ResponseBase, +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs new file mode 100644 index 00000000..f327847f --- /dev/null +++ b/rpc/types/src/bin.rs @@ -0,0 +1,11 @@ +//! Binary types from [binary](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_blocksbin) endpoints. + +//---------------------------------------------------------------------------------------------------- Import + +//---------------------------------------------------------------------------------------------------- TODO + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/types/src/binary_string.rs b/rpc/types/src/binary_string.rs new file mode 100644 index 00000000..b644ad32 --- /dev/null +++ b/rpc/types/src/binary_string.rs @@ -0,0 +1,29 @@ +//! TODO + +//---------------------------------------------------------------------------------------------------- Import + +//---------------------------------------------------------------------------------------------------- BinaryString +/// TODO +/// +/// ```rust +/// use serde::Deserialize; +/// use serde_json::from_str; +/// use cuprate_rpc_types::BinaryString; +/// +/// #[derive(Deserialize)] +/// struct Key { +/// key: BinaryString, +/// } +/// +/// let binary = r"�\b����������"; +/// let json = format!("{{\"key\":\"{binary}\"}}"); +/// let key = from_str::<Key>(&json).unwrap(); +/// let binary: BinaryString = key.key; +/// ``` +pub type BinaryString = String; + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/types/src/constants.rs b/rpc/types/src/constants.rs new file mode 100644 index 00000000..2d5266fd --- /dev/null +++ b/rpc/types/src/constants.rs @@ -0,0 +1,65 @@ +//! TODO + +// From: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L83-L89> +// +// ``` +// When making *any* change here, bump minor +// If the change is incompatible, then bump major and set minor to 0 +// This ensures CORE_RPC_VERSION always increases, that every change +// has its own version, and that clients can just test major to see +// whether they can talk to a given daemon without having to know in +// advance which version they will stop working with +// Don't go over 32767 for any of these +// ``` +// +// What this means for Cuprate: just follow `monerod`. + +//---------------------------------------------------------------------------------------------------- Import + +//---------------------------------------------------------------------------------------------------- Status +// Common RPC status strings: +// <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L78-L81>. +// +// Note that these are _distinct_ from the ones in ZMQ: +// <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/message.cpp#L40-L44>. + +/// <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L78> +pub const CORE_RPC_STATUS_OK: &str = "OK"; + +/// <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L79> +pub const CORE_RPC_STATUS_BUSY: &str = "BUSY"; + +/// <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L80> +pub const CORE_RPC_STATUS_NOT_MINING: &str = "NOT MINING"; + +/// <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L81> +pub const CORE_RPC_STATUS_PAYMENT_REQUIRED: &str = "PAYMENT REQUIRED"; + +/// Custom `CORE_RPC_STATUS` for usage in Cuprate. +pub const CORE_RPC_STATUS_UNKNOWN: &str = "UNKNOWN"; + +//---------------------------------------------------------------------------------------------------- Versions +/// RPC major version. +/// +/// See: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L90>. +pub const CORE_RPC_VERSION_MAJOR: u32 = 3; + +/// RPC miror version. +/// +/// See: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L91>. +pub const CORE_RPC_VERSION_MINOR: u32 = 14; + +/// RPC version. +/// +/// See: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L92-L93>. +/// +/// ```rust +/// assert_eq!(cuprate_rpc_types::CORE_RPC_VERSION, 196_622); +/// ``` +pub const CORE_RPC_VERSION: u32 = (CORE_RPC_VERSION_MAJOR << 16) | CORE_RPC_VERSION_MINOR; + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs new file mode 100644 index 00000000..5f5f8ff7 --- /dev/null +++ b/rpc/types/src/json.rs @@ -0,0 +1,129 @@ +//! JSON types from the [`/json_rpc`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#json-rpc-methods) endpoint. +//! +//! <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/daemon_messages.h>. + +//---------------------------------------------------------------------------------------------------- Import +use crate::{ + base::{EmptyRequestBase, EmptyResponseBase, ResponseBase}, + macros::define_request_and_response, +}; + +//---------------------------------------------------------------------------------------------------- Struct definitions +// This generates 2 structs: +// +// - `GetBlockTemplateRequest` +// - `GetBlockTemplateResponse` +// +// with some interconnected documentation. +define_request_and_response! { + // The markdown tag for Monero RPC documentation. Not necessarily the endpoint. + get_block_template, + + // The commit hash and `$file.$extension` in which this type is defined in + // the Monero codebase in the `rpc/` directory, followed by the specific lines. + cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 943..=994, + + // The base type name. + GetBlockTemplate, + + // The base request type. + // + // This must be a type found in [`crate::base`]. + // It acts as a "base" that gets flattened into + // the actually request type. + // + // "Flatten" means the field(s) of a struct gets inlined + // directly into the struct during (de)serialization, see: + // <https://serde.rs/field-attrs.html#flatten>. + // + // For example here, we're using [`crate::base::EmptyRequestBase`], + // which means that there is no extra fields flattened. + // + // If a request is not specified here, it will create a `type alias YOUR_REQUEST_TYPE = ()` + // instead of a `struct`, see below in other macro definitions for an example. + EmptyRequestBase { + reserve_size: u64, + wallet_address: String, + prev_block: String, + extra_nonce: String, + }, + + // The base response type. + // + // This is the same as the request base type, + // it must be a type found in [`crate::base`]. + // + // If there are any additional attributes (`/// docs` or `#[derive]`s) + // for the struct, they go here, e.g.: + // #[derive(Copy)] + ResponseBase { + // This is using `crate::base::ResponseBase`, + // so the type we generate will contain this field: + // ``` + // base: crate::base::ResponseBase, + // ``` + // + // This is flattened with serde and epee, so during + // (de)serialization, it will act as if there are 2 extra fields here: + // ``` + // status: crate::Status, + // untrusted: bool, + // ``` + + // Within the `{}` is an infinite matching pattern of: + // ``` + // $ATTRIBUTES + // $FIELD_NAME: $FIELD_TYPE, + // ``` + // The struct generated and all fields are `pub`. + difficulty: u64, + wide_difficulty: String, + difficulty_top64: u64, + height: u64, + reserved_offset: u64, + expected_reward: u64, + prev_hash: String, + seed_height: u64, + seed_hash: String, + next_seed_hash: String, + blocktemplate_blob: String, + blockhashing_blob: String, + } +} + +define_request_and_response! { + get_block_count, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 919..=933, + GetBlockCount, + + // There is no request type specified, + // this will cause the macro to generate a + // type alias to `()` instead of a `struct`. + + ResponseBase { + count: u64, + } +} + +define_request_and_response! { + on_get_block_hash, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 935..=939, + OnGetBlockHash, + #[derive(Copy)] + EmptyRequestBase { + #[serde(flatten)] + block_height: u64, + }, + EmptyResponseBase { + #[serde(flatten)] + block_hash: String, + } +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs new file mode 100644 index 00000000..780208bd --- /dev/null +++ b/rpc/types/src/lib.rs @@ -0,0 +1,116 @@ +#![doc = include_str!("../README.md")] +//---------------------------------------------------------------------------------------------------- Lints +// Forbid lints. +// Our code, and code generated (e.g macros) cannot overrule these. +#![forbid( + // `unsafe` is allowed but it _must_ be + // commented with `SAFETY: reason`. + clippy::undocumented_unsafe_blocks, + + // Never. + unused_unsafe, + redundant_semicolons, + unused_allocation, + coherence_leak_check, + while_true, + clippy::missing_docs_in_private_items, + + // Maybe can be put into `#[deny]`. + unconditional_recursion, + for_loops_over_fallibles, + unused_braces, + unused_labels, + keyword_idents, + non_ascii_idents, + variant_size_differences, + single_use_lifetimes, + + // Probably can be put into `#[deny]`. + future_incompatible, + let_underscore, + break_with_label_and_loop, + duplicate_macro_attributes, + exported_private_dependencies, + large_assignments, + overlapping_range_endpoints, + semicolon_in_expressions_from_macros, + noop_method_call, +)] +// Deny lints. +// Some of these are `#[allow]`'ed on a per-case basis. +#![deny( + clippy::all, + clippy::correctness, + clippy::suspicious, + clippy::style, + clippy::complexity, + clippy::perf, + clippy::pedantic, + clippy::nursery, + clippy::cargo, + unused_doc_comments, + unused_mut, + missing_docs, + deprecated, + unused_comparisons, + nonstandard_style, + unreachable_pub +)] +#![allow( + // FIXME: this lint affects crates outside of + // `database/` for some reason, allow for now. + clippy::cargo_common_metadata, + + // FIXME: adding `#[must_use]` onto everything + // might just be more annoying than useful... + // although it is sometimes nice. + clippy::must_use_candidate, + + // FIXME: good lint but too many false positives + // with our `Env` + `RwLock` setup. + clippy::significant_drop_tightening, + + // FIXME: good lint but is less clear in most cases. + clippy::items_after_statements, + + // TODO + rustdoc::bare_urls, + + clippy::module_name_repetitions, + clippy::module_inception, + clippy::redundant_pub_crate, + clippy::option_if_let_else, +)] +// Allow some lints when running in debug mode. +#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))] +// Allow some lints in tests. +#![cfg_attr( + test, + allow( + clippy::cognitive_complexity, + clippy::needless_pass_by_value, + clippy::cast_possible_truncation, + clippy::too_many_lines + ) +)] +// TODO: remove me after finishing impl +#![allow(dead_code)] + +//---------------------------------------------------------------------------------------------------- Use +mod binary_string; +mod constants; +mod macros; +mod status; + +pub use binary_string::BinaryString; +pub use constants::{ + CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, + CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN, CORE_RPC_VERSION, + CORE_RPC_VERSION_MAJOR, CORE_RPC_VERSION_MINOR, +}; +pub use status::Status; + +pub mod base; +pub mod bin; +pub mod json; +pub mod other; diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs new file mode 100644 index 00000000..27288004 --- /dev/null +++ b/rpc/types/src/macros.rs @@ -0,0 +1,277 @@ +//! Macros. + +//---------------------------------------------------------------------------------------------------- Struct definition +/// A template for generating 2 `struct`s with a bunch of information filled out. +/// +/// These are the RPC request and response `struct`s. +/// +/// These `struct`s automatically implement: +/// - `Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash` +/// - `serde::{Serialize, Deserialize}` +/// - `epee_encoding::EpeeObject` +/// +/// It's best to see the output of this macro via the documentation +/// of the generated structs via `cargo doc`s to see which parts +/// generate which docs. +/// +/// See the [`crate::json`] module for example usage. +/// +/// # Macro internals +/// This macro has 2 branches with almost the same output: +/// 1. An empty `Request` type +/// 2. An `Request` type with fields +/// +/// The first branch is the same as the second with the exception +/// that if the caller of this macro provides no fields, it will +/// generate: +/// ``` +/// pub type Request = (); +/// ``` +/// instead of: +/// ``` +/// pub struct Request {/* fields */} +/// ``` +/// +/// This is because having a bunch of types that are all empty structs +/// means they are not compatible and it makes it cumbersome for end-users. +/// Really, they semantically are empty types, so `()` is used. +/// +/// Again, other than this, the 2 branches do (should) not differ. +/// +/// FIXME: there's probably a less painful way to branch here on input +/// without having to duplicate 80% of the macro. Sub-macros were attempted +/// but they ended up unreadable. So for now, make sure to fix the other +/// branch as well when making changes. The only de-duplicated part is +/// the doc generation with [`define_request_and_response_doc`]. +macro_rules! define_request_and_response { + //------------------------------------------------------------------------------ + // This version of the macro expects a `Request` type with no fields, i.e. `Request {}`. + ( + // The markdown tag for Monero RPC documentation. Not necessarily the endpoint. + $monero_daemon_rpc_doc_link:ident, + + // The commit hash and `$file.$extension` in which this type is defined in + // the Monero codebase in the `rpc/` directory, followed by the specific lines. + $monero_code_commit:ident => + $monero_code_filename:ident. + $monero_code_filename_extension:ident => + $monero_code_line_start:literal..= + $monero_code_line_end:literal, + + // The base `struct` name. + $type_name:ident, + + // The response type (and any doc comments, derives, etc). + $( #[$response_type_attr:meta] )* + $response_base_type:ty { + // And any fields. + $( + $( #[$response_field_attr:meta] )* + $response_field:ident: $response_field_type:ty, + )* + } + ) => { paste::paste! { + #[doc = $crate::macros::define_request_and_response_doc!( + "response", + $monero_daemon_rpc_doc_link, + $monero_code_commit, + $monero_code_filename, + $monero_code_filename_extension, + $monero_code_line_start, + $monero_code_line_end, + [<$type_name Request>], + )] + /// + /// This request has no inputs. + pub type [<$type_name Request>] = (); + + #[allow(dead_code)] + #[allow(missing_docs)] + #[derive(serde::Serialize, serde::Deserialize)] + #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] + $( #[$response_type_attr] )* + #[doc = $crate::macros::define_request_and_response_doc!( + "request", + $monero_daemon_rpc_doc_link, + $monero_code_commit, + $monero_code_filename, + $monero_code_filename_extension, + $monero_code_line_start, + $monero_code_line_end, + [<$type_name Response>], + )] + pub struct [<$type_name Response>] { + #[serde(flatten)] + pub base: $response_base_type, + + $( + $( #[$response_field_attr] )* + pub $response_field: $response_field_type, + )* + } + + ::cuprate_epee_encoding::epee_object! { + [<$type_name Response>], + $( + $response_field: $response_field_type, + )* + !flatten: base: $response_base_type, + } + }}; + + //------------------------------------------------------------------------------ + // This version of the macro expects a `Request` type with fields. + ( + // The markdown tag for Monero RPC documentation. Not necessarily the endpoint. + $monero_daemon_rpc_doc_link:ident, + + // The commit hash and `$file.$extension` in which this type is defined in + // the Monero codebase in the `rpc/` directory, followed by the specific lines. + $monero_code_commit:ident => + $monero_code_filename:ident. + $monero_code_filename_extension:ident => + $monero_code_line_start:literal..= + $monero_code_line_end:literal, + + // The base `struct` name. + $type_name:ident, + + // The request type (and any doc comments, derives, etc). + $( #[$request_type_attr:meta] )* + $request_base_type:ty { + // And any fields. + $( + $( #[$request_field_attr:meta] )* + $request_field:ident: $request_field_type:ty, + )* + }, + + // The response type (and any doc comments, derives, etc). + $( #[$response_type_attr:meta] )* + $response_base_type:ty { + // And any fields. + $( + $( #[$response_field_attr:meta] )* + $response_field:ident: $response_field_type:ty, + )* + } + ) => { paste::paste! { + #[allow(dead_code)] + #[allow(missing_docs)] + #[derive(serde::Serialize, serde::Deserialize)] + #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] + $( #[$request_type_attr] )* + #[doc = $crate::macros::define_request_and_response_doc!( + "response", + $monero_daemon_rpc_doc_link, + $monero_code_commit, + $monero_code_filename, + $monero_code_filename_extension, + $monero_code_line_start, + $monero_code_line_end, + [<$type_name Request>], + )] + pub struct [<$type_name Request>] { + #[serde(flatten)] + pub base: $request_base_type, + + $( + $( #[$request_field_attr] )* + pub $request_field: $request_field_type, + )* + } + + ::cuprate_epee_encoding::epee_object! { + [<$type_name Request>], + $( + $request_field: $request_field_type, + )* + !flatten: base: $request_base_type, + } + + #[allow(dead_code)] + #[allow(missing_docs)] + #[derive(serde::Serialize, serde::Deserialize)] + #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] + $( #[$response_type_attr] )* + #[doc = $crate::macros::define_request_and_response_doc!( + "request", + $monero_daemon_rpc_doc_link, + $monero_code_commit, + $monero_code_filename, + $monero_code_filename_extension, + $monero_code_line_start, + $monero_code_line_end, + [<$type_name Response>], + )] + pub struct [<$type_name Response>] { + #[serde(flatten)] + pub base: $response_base_type, + + $( + $( #[$response_field_attr] )* + pub $response_field: $response_field_type, + )* + } + + ::cuprate_epee_encoding::epee_object! { + [<$type_name Response>], + $( + $response_field: $response_field_type, + )* + !flatten: base: $response_base_type, + } + }}; +} +pub(crate) use define_request_and_response; + +/// Generate documentation for the types generated +/// by the [`define_request_and_response`] macro. +/// +/// See it for more info on inputs. +macro_rules! define_request_and_response_doc { + ( + // This labels the last `[request]` or `[response]` + // hyperlink in documentation. Input is either: + // - "request" + // - "response" + // + // Remember this is linking to the _other_ type, + // so if defining a `Request` type, input should + // be "response". + $request_or_response:literal, + + $monero_daemon_rpc_doc_link:ident, + $monero_code_commit:ident, + $monero_code_filename:ident, + $monero_code_filename_extension:ident, + $monero_code_line_start:literal, + $monero_code_line_end:literal, + $type_name:ident, + ) => { + concat!( + "", + "[Definition](", + "https://github.com/monero-project/monero/blob/", + stringify!($monero_code_commit), + "/src/rpc/", + stringify!($monero_code_filename), + ".", + stringify!($monero_code_filename_extension), + "#L", + stringify!($monero_code_line_start), + "-L", + stringify!($monero_code_line_end), + "), [documentation](", + "https://www.getmonero.org/resources/developer-guides/daemon-rpc.html", + "#", + stringify!($monero_daemon_rpc_doc_link), + "), [", + $request_or_response, + "](", + stringify!($type_name), + ")." + ) + }; +} +pub(crate) use define_request_and_response_doc; diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs new file mode 100644 index 00000000..22547edd --- /dev/null +++ b/rpc/types/src/other.rs @@ -0,0 +1,21 @@ +//! JSON types from the [`other`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#other-daemon-rpc-calls) endpoints. +//! +//! <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/daemon_messages.h>. + +//---------------------------------------------------------------------------------------------------- Import +use crate::{base::ResponseBase, macros::define_request_and_response}; + +//---------------------------------------------------------------------------------------------------- TODO +define_request_and_response! { + save_bc, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 898..=916, + SaveBc, + ResponseBase {} +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/types/src/status.rs b/rpc/types/src/status.rs new file mode 100644 index 00000000..e8ac6ce9 --- /dev/null +++ b/rpc/types/src/status.rs @@ -0,0 +1,182 @@ +//! RPC response status type. + +//---------------------------------------------------------------------------------------------------- Import +use std::fmt::Display; + +use serde::{Deserialize, Serialize}; + +use cuprate_epee_encoding::{ + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +use crate::constants::{ + CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, + CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN, +}; + +//---------------------------------------------------------------------------------------------------- Status +/// RPC response status. +/// +/// This type represents `monerod`'s frequently appearing string field, `status`. +/// +/// This field appears within RPC [JSON response](crate::json) types. +/// +/// Reference: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L78-L81>. +/// +/// ## Serialization and string formatting +/// ```rust +/// use cuprate_rpc_types::{ +/// Status, +/// CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, +/// CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN +/// }; +/// use serde_json::to_string; +/// +/// let unknown = Status::Unknown; +/// +/// assert_eq!(to_string(&Status::Ok).unwrap(), r#""OK""#); +/// assert_eq!(to_string(&Status::Busy).unwrap(), r#""BUSY""#); +/// assert_eq!(to_string(&Status::NotMining).unwrap(), r#""NOT MINING""#); +/// assert_eq!(to_string(&Status::PaymentRequired).unwrap(), r#""PAYMENT REQUIRED""#); +/// assert_eq!(to_string(&unknown).unwrap(), r#""UNKNOWN""#); +/// +/// assert_eq!(Status::Ok.as_ref(), CORE_RPC_STATUS_OK); +/// assert_eq!(Status::Busy.as_ref(), CORE_RPC_STATUS_BUSY); +/// assert_eq!(Status::NotMining.as_ref(), CORE_RPC_STATUS_NOT_MINING); +/// assert_eq!(Status::PaymentRequired.as_ref(), CORE_RPC_STATUS_PAYMENT_REQUIRED); +/// assert_eq!(unknown.as_ref(), CORE_RPC_STATUS_UNKNOWN); +/// +/// assert_eq!(format!("{}", Status::Ok), CORE_RPC_STATUS_OK); +/// assert_eq!(format!("{}", Status::Busy), CORE_RPC_STATUS_BUSY); +/// assert_eq!(format!("{}", Status::NotMining), CORE_RPC_STATUS_NOT_MINING); +/// assert_eq!(format!("{}", Status::PaymentRequired), CORE_RPC_STATUS_PAYMENT_REQUIRED); +/// assert_eq!(format!("{}", unknown), CORE_RPC_STATUS_UNKNOWN); +/// +/// assert_eq!(format!("{:?}", Status::Ok), "Ok"); +/// assert_eq!(format!("{:?}", Status::Busy), "Busy"); +/// assert_eq!(format!("{:?}", Status::NotMining), "NotMining"); +/// assert_eq!(format!("{:?}", Status::PaymentRequired), "PaymentRequired"); +/// assert_eq!(format!("{:?}", unknown), "Unknown"); +/// ``` +#[derive( + Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, +)] +pub enum Status { + // FIXME: + // `#[serde(rename = "")]` only takes raw string literals? + // We have to re-type the constants here... + /// Successful RPC response, everything is OK; [`CORE_RPC_STATUS_OK`]. + #[serde(rename = "OK")] + #[default] + Ok, + + /// The daemon is busy, try later; [`CORE_RPC_STATUS_BUSY`]. + #[serde(rename = "BUSY")] + Busy, + + /// The daemon is not mining; [`CORE_RPC_STATUS_NOT_MINING`]. + #[serde(rename = "NOT MINING")] + NotMining, + + /// Payment is required for RPC; [`CORE_RPC_STATUS_PAYMENT_REQUIRED`]. + #[serde(rename = "PAYMENT REQUIRED")] + PaymentRequired, + + /// Some unknown other string; [`CORE_RPC_STATUS_UNKNOWN`]. + /// + /// This exists to act as a catch-all if `monerod` adds + /// a string and a Cuprate node hasn't updated yet. + /// + /// The reason this isn't `Unknown(String)` is because that + /// disallows [`Status`] to be [`Copy`], and thus other types + /// that contain it. + #[serde(other)] + #[serde(rename = "UNKNOWN")] + Unknown, +} + +impl From<String> for Status { + fn from(s: String) -> Self { + match s.as_str() { + CORE_RPC_STATUS_OK => Self::Ok, + CORE_RPC_STATUS_BUSY => Self::Busy, + CORE_RPC_STATUS_NOT_MINING => Self::NotMining, + CORE_RPC_STATUS_PAYMENT_REQUIRED => Self::PaymentRequired, + _ => Self::Unknown, + } + } +} + +impl AsRef<str> for Status { + fn as_ref(&self) -> &str { + match self { + Self::Ok => CORE_RPC_STATUS_OK, + Self::Busy => CORE_RPC_STATUS_BUSY, + Self::NotMining => CORE_RPC_STATUS_NOT_MINING, + Self::PaymentRequired => CORE_RPC_STATUS_PAYMENT_REQUIRED, + Self::Unknown => CORE_RPC_STATUS_UNKNOWN, + } + } +} + +impl Display for Status { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.as_ref()) + } +} + +// [`Status`] is essentially a [`String`] when it comes to +// (de)serialization, except when writing we usually have +// access to a `&'static str` and don't need to allocate. +// +// See below for more impl info: +// <https://github.com/Cuprate/cuprate/blob/bef2a2cbd4e1194991751d1fbc96603cba8c7a51/net/epee-encoding/src/value.rs#L366-L392>. +impl EpeeValue for Status { + const MARKER: Marker = <String as EpeeValue>::MARKER; + + fn read<B: Buf>(r: &mut B, marker: &Marker) -> cuprate_epee_encoding::Result<Self> { + let string = <String as EpeeValue>::read(r, marker)?; + Ok(Self::from(string)) + } + + fn should_write(&self) -> bool { + true + } + + fn epee_default_value() -> Option<Self> { + // <https://github.com/Cuprate/cuprate/pull/147#discussion_r1654992559> + Some(Self::Unknown) + } + + fn write<B: BufMut>(self, w: &mut B) -> cuprate_epee_encoding::Result<()> { + cuprate_epee_encoding::write_bytes(self.as_ref(), w) + } +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + use super::*; + + // Test epee (de)serialization works. + #[test] + fn epee() { + for status in [ + Status::Ok, + Status::Busy, + Status::NotMining, + Status::PaymentRequired, + Status::Unknown, + ] { + let mut buf = vec![]; + + <Status as EpeeValue>::write(status, &mut buf).unwrap(); + let status2 = + <Status as EpeeValue>::read(&mut buf.as_slice(), &<Status as EpeeValue>::MARKER) + .unwrap(); + + assert_eq!(status, status2); + } + } +} From a438279aa8282a9edb6a7bae7acfe03efbb5403c Mon Sep 17 00:00:00 2001 From: hinto-janai <hinto.janai@protonmail.com> Date: Wed, 26 Jun 2024 17:51:06 -0400 Subject: [PATCH 10/11] storage: split `cuprate-blockchain` <-> `cuprate-database` (#160) * storage: port some code `cuprate-blockchain` -> `database` * database: remove `Tables` references * database: remove old `cuprate-blockchain` type references * find/replace `cuprate_blockchain` -> `database`, add `create_db()` * database: fix redb * database: use readme for docs, link in `lib.rs` * database: fix `open_db_ro`, `open_db_rw`, `create_db` behavior * database: add open table tests * database: fix tests, remove blockchain specific references * database: remove `ReaderThreads`, make `db_directory` mandatory * initial `cuprate-blockchain` split * fix doc links * rename, fix database config * blockchain: create `crate::open()`, `OpenTables::create_tables()` * more compat fixes * fix imports * fix conflicts * align cargo.toml * docs * fixes * add `unused_crate_dependencies` lint, fix * blockchain: add open table tests --- Cargo.lock | 337 +++++++-- consensus/fast-sync/src/create.rs | 4 +- storage/blockchain/Cargo.toml | 39 +- storage/blockchain/DESIGN.md | 600 ++++++++++++++++ storage/blockchain/README.md | 661 +++--------------- storage/blockchain/src/backend/tests.rs | 550 --------------- storage/blockchain/src/config/config.rs | 178 ++--- storage/blockchain/src/config/mod.rs | 26 +- storage/blockchain/src/constants.rs | 52 -- storage/blockchain/src/free.rs | 65 ++ storage/blockchain/src/lib.rs | 190 +---- storage/blockchain/src/open_tables.rs | 188 +++++ storage/blockchain/src/ops/block.rs | 11 +- storage/blockchain/src/ops/blockchain.rs | 9 +- storage/blockchain/src/ops/key_image.rs | 10 +- storage/blockchain/src/ops/mod.rs | 19 +- storage/blockchain/src/ops/output.rs | 14 +- storage/blockchain/src/ops/property.rs | 5 +- storage/blockchain/src/ops/tx.rs | 16 +- storage/blockchain/src/service/free.rs | 8 +- storage/blockchain/src/service/mod.rs | 14 +- storage/blockchain/src/service/read.rs | 6 +- storage/blockchain/src/service/tests.rs | 6 +- storage/blockchain/src/service/types.rs | 3 +- storage/blockchain/src/service/write.rs | 6 +- storage/blockchain/src/tables.rs | 66 +- storage/blockchain/src/tests.rs | 10 +- storage/blockchain/src/types.rs | 12 +- storage/database/Cargo.toml | 21 +- storage/database/README.md | 143 ++++ .../src/backend/heed/database.rs | 0 .../src/backend/heed/env.rs | 51 +- .../src/backend/heed/error.rs | 8 +- .../src/backend/heed/mod.rs | 0 .../src/backend/heed/storable.rs | 4 +- .../src/backend/heed/transaction.rs | 0 .../src/backend/heed/types.rs | 0 .../src/backend/mod.rs | 0 .../src/backend/redb/database.rs | 0 .../src/backend/redb/env.rs | 37 +- .../src/backend/redb/error.rs | 5 +- .../src/backend/redb/mod.rs | 0 .../src/backend/redb/storable.rs | 4 +- .../src/backend/redb/transaction.rs | 0 .../src/backend/redb/types.rs | 0 storage/database/src/backend/tests.rs | 374 ++++++++++ storage/database/src/config/backend.rs | 31 + storage/database/src/config/config.rs | 210 ++++++ storage/database/src/config/mod.rs | 40 ++ storage/database/src/config/sync_mode.rs | 135 ++++ storage/database/src/constants.rs | 74 ++ .../{blockchain => database}/src/database.rs | 0 storage/{blockchain => database}/src/env.rs | 65 +- storage/{blockchain => database}/src/error.rs | 6 +- storage/{blockchain => database}/src/key.rs | 2 +- storage/database/src/lib.rs | 151 ++++ .../{blockchain => database}/src/resize.rs | 16 +- .../{blockchain => database}/src/storable.rs | 14 +- storage/{blockchain => database}/src/table.rs | 7 +- storage/database/src/tests.rs | 35 + .../src/transaction.rs | 0 61 files changed, 2743 insertions(+), 1795 deletions(-) create mode 100644 storage/blockchain/DESIGN.md delete mode 100644 storage/blockchain/src/backend/tests.rs create mode 100644 storage/blockchain/src/open_tables.rs create mode 100644 storage/database/README.md rename storage/{blockchain => database}/src/backend/heed/database.rs (100%) rename storage/{blockchain => database}/src/backend/heed/env.rs (88%) rename storage/{blockchain => database}/src/backend/heed/error.rs (96%) rename storage/{blockchain => database}/src/backend/heed/mod.rs (100%) rename storage/{blockchain => database}/src/backend/heed/storable.rs (96%) rename storage/{blockchain => database}/src/backend/heed/transaction.rs (100%) rename storage/{blockchain => database}/src/backend/heed/types.rs (100%) rename storage/{blockchain => database}/src/backend/mod.rs (100%) rename storage/{blockchain => database}/src/backend/redb/database.rs (100%) rename storage/{blockchain => database}/src/backend/redb/env.rs (86%) rename storage/{blockchain => database}/src/backend/redb/error.rs (98%) rename storage/{blockchain => database}/src/backend/redb/mod.rs (100%) rename storage/{blockchain => database}/src/backend/redb/storable.rs (98%) rename storage/{blockchain => database}/src/backend/redb/transaction.rs (100%) rename storage/{blockchain => database}/src/backend/redb/types.rs (100%) create mode 100644 storage/database/src/backend/tests.rs create mode 100644 storage/database/src/config/backend.rs create mode 100644 storage/database/src/config/config.rs create mode 100644 storage/database/src/config/mod.rs create mode 100644 storage/database/src/config/sync_mode.rs create mode 100644 storage/database/src/constants.rs rename storage/{blockchain => database}/src/database.rs (100%) rename storage/{blockchain => database}/src/env.rs (84%) rename storage/{blockchain => database}/src/error.rs (94%) rename storage/{blockchain => database}/src/key.rs (97%) rename storage/{blockchain => database}/src/resize.rs (96%) rename storage/{blockchain => database}/src/storable.rs (96%) rename storage/{blockchain => database}/src/table.rs (70%) create mode 100644 storage/database/src/tests.rs rename storage/{blockchain => database}/src/transaction.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 9380e21d..8e6734d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -493,27 +493,21 @@ version = "0.0.0" dependencies = [ "bitflags 2.5.0", "bytemuck", - "bytes", - "cfg-if", "crossbeam", + "cuprate-database", "cuprate-helper", "cuprate-pruning", "cuprate-test-utils", "cuprate-types", "curve25519-dalek", "futures", - "heed", "hex", "hex-literal", "monero-serai", - "page_size", "paste", "pretty_assertions", "rayon", - "redb", - "serde", "tempfile", - "thiserror", "thread_local", "tokio", "tokio-util", @@ -595,7 +589,18 @@ dependencies = [ [[package]] name = "cuprate-database" -version = "0.0.0" +version = "0.0.1" +dependencies = [ + "bytemuck", + "bytes", + "cfg-if", + "heed", + "page_size", + "redb", + "serde", + "tempfile", + "thiserror", +] [[package]] name = "cuprate-epee-encoding" @@ -803,9 +808,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.3" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", @@ -813,6 +818,7 @@ dependencies = [ "digest", "fiat-crypto", "group", + "platforms", "rand_core", "rustc_version", "subtle", @@ -910,6 +916,17 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "doxygen-rs" version = "0.4.2" @@ -1267,9 +1284,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "d0e7a4dd27b9476dc40cb050d3632d3bba3a70ddbff012285f7f8559a1e7e545" [[package]] name = "hyper" @@ -1352,13 +1369,133 @@ dependencies = [ ] [[package]] -name = "idna" -version = "0.5.0" +name = "icu_collections" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f8ac670d7422d7f76b32e17a5db556510825b29ec9154f235977c9caba61036" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + +[[package]] +name = "idna" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4716a3a0933a1d01c2f72450e89596eb51dd34ef3c211ccd875acdf1f8fe47ed" +dependencies = [ + "icu_normalizer", + "icu_properties", + "smallvec", + "utf8_iter", ] [[package]] @@ -1439,6 +1576,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "lmdb-master-sys" version = "0.2.1" @@ -1478,9 +1621,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "merlin" @@ -1496,9 +1639,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] @@ -1768,6 +1911,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "platforms" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -1967,9 +2116,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ "bitflags 2.5.0", ] @@ -2287,6 +2436,12 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "std-shims" version = "0.1.1" @@ -2345,6 +2500,17 @@ dependencies = [ "crossbeam-queue", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "tap" version = "1.0.1" @@ -2403,20 +2569,15 @@ dependencies = [ ] [[package]] -name = "tinyvec" -version = "1.6.0" +name = "tinystr" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" dependencies = [ - "tinyvec_macros", + "displaydoc", + "zerovec", ] -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - [[package]] name = "tokio" version = "1.38.0" @@ -2609,27 +2770,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "unicode-bidi" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" - [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" -[[package]] -name = "unicode-normalization" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" -dependencies = [ - "tinyvec", -] - [[package]] name = "untrusted" version = "0.9.0" @@ -2638,15 +2784,27 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "version_check" version = "0.9.4" @@ -2963,6 +3121,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wyz" version = "0.5.1" @@ -2978,6 +3148,30 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.34" @@ -2998,6 +3192,27 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", + "synstructure", +] + [[package]] name = "zeroize" version = "1.8.1" @@ -3017,3 +3232,25 @@ dependencies = [ "quote", "syn 2.0.66", ] + +[[package]] +name = "zerovec" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 2e2b0477..dc2311fe 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -3,7 +3,9 @@ use std::{fmt::Write, fs::write}; use clap::Parser; use tower::{Service, ServiceExt}; -use cuprate_blockchain::{config::ConfigBuilder, service::DatabaseReadHandle, RuntimeError}; +use cuprate_blockchain::{ + config::ConfigBuilder, cuprate_database::RuntimeError, service::DatabaseReadHandle, +}; use cuprate_types::blockchain::{BCReadRequest, BCResponse}; use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes}; diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index eb5a27ea..bab582d6 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -9,30 +9,28 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/cuprate-bloc keywords = ["cuprate", "blockchain", "database"] [features] -default = ["heed", "redb", "service"] -# default = ["redb", "service"] -# default = ["redb-memory", "service"] -heed = ["dep:heed"] -redb = ["dep:redb"] -redb-memory = ["redb"] +default = ["heed", "service"] +# default = ["redb", "service"] +# default = ["redb-memory", "service"] +heed = ["cuprate-database/heed"] +redb = ["cuprate-database/redb"] +redb-memory = ["cuprate-database/redb-memory"] service = ["dep:crossbeam", "dep:futures", "dep:tokio", "dep:tokio-util", "dep:tower", "dep:rayon"] [dependencies] -bitflags = { workspace = true, features = ["serde", "bytemuck"] } -bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } -bytes = { workspace = true } -cfg-if = { workspace = true } # FIXME: # We only need the `thread` feature if `service` is enabled. # Figure out how to enable features of an already pulled in dependency conditionally. +cuprate-database = { path = "../database" } cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } cuprate-types = { path = "../../types", features = ["blockchain"] } + +bitflags = { workspace = true, features = ["serde", "bytemuck"] } +bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } curve25519-dalek = { workspace = true } cuprate-pruning = { path = "../../pruning" } monero-serai = { workspace = true, features = ["std"] } paste = { workspace = true } -page_size = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size. -thiserror = { workspace = true } # `service` feature. crossbeam = { workspace = true, features = ["std"], optional = true } @@ -43,17 +41,12 @@ tower = { workspace = true, features = ["full"], optional = true } thread_local = { workspace = true } rayon = { workspace = true, optional = true } -# Optional features. -heed = { version = "0.20.0", features = ["read-txn-no-tls"], optional = true } -redb = { version = "2.1.0", optional = true } -serde = { workspace = true, optional = true } - [dev-dependencies] -bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } -cuprate-helper = { path = "../../helper", features = ["thread"] } +cuprate-helper = { path = "../../helper", features = ["thread"] } cuprate-test-utils = { path = "../../test-utils" } -page_size = { version = "0.6.0" } -tempfile = { version = "3.10.0" } + +bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +tempfile = { version = "3.10.0" } pretty_assertions = { workspace = true } -hex = { workspace = true } -hex-literal = { workspace = true } +hex = { workspace = true } +hex-literal = { workspace = true } diff --git a/storage/blockchain/DESIGN.md b/storage/blockchain/DESIGN.md new file mode 100644 index 00000000..22f729f0 --- /dev/null +++ b/storage/blockchain/DESIGN.md @@ -0,0 +1,600 @@ +# Database +FIXME: This documentation must be updated and moved to the architecture book. + +Cuprate's blockchain implementation. + +- [1. Documentation](#1-documentation) +- [2. File structure](#2-file-structure) + - [2.1 `src/`](#21-src) + - [2.2 `src/backend/`](#22-srcbackend) + - [2.3 `src/config/`](#23-srcconfig) + - [2.4 `src/ops/`](#24-srcops) + - [2.5 `src/service/`](#25-srcservice) +- [3. Backends](#3-backends) + - [3.1 heed](#31-heed) + - [3.2 redb](#32-redb) + - [3.3 redb-memory](#33-redb-memory) + - [3.4 sanakirja](#34-sanakirja) + - [3.5 MDBX](#35-mdbx) +- [4. Layers](#4-layers) + - [4.1 Backend](#41-backend) + - [4.2 Trait](#42-trait) + - [4.3 ConcreteEnv](#43-concreteenv) + - [4.4 ops](#44-ops) + - [4.5 service](#45-service) +- [5. The service](#5-the-service) + - [5.1 Initialization](#51-initialization) + - [5.2 Requests](#53-requests) + - [5.3 Responses](#54-responses) + - [5.4 Thread model](#52-thread-model) + - [5.5 Shutdown](#55-shutdown) +- [6. Syncing](#6-Syncing) +- [7. Resizing](#7-resizing) +- [8. (De)serialization](#8-deserialization) +- [9. Schema](#9-schema) + - [9.1 Tables](#91-tables) + - [9.2 Multimap tables](#92-multimap-tables) +- [10. Known issues and tradeoffs](#10-known-issues-and-tradeoffs) + - [10.1 Traits abstracting backends](#101-traits-abstracting-backends) + - [10.2 Hot-swappable backends](#102-hot-swappable-backends) + - [10.3 Copying unaligned bytes](#103-copying-unaligned-bytes) + - [10.4 Endianness](#104-endianness) + - [10.5 Extra table data](#105-extra-table-data) + +--- + +## 1. Documentation +Documentation for `database/` is split into 3 locations: + +| Documentation location | Purpose | +|---------------------------|---------| +| `database/README.md` | High level design of `cuprate-database` +| `cuprate-database` | Practical usage documentation/warnings/notes/etc +| Source file `// comments` | Implementation-specific details (e.g, how many reader threads to spawn?) + +This README serves as the implementation design document. + +For actual practical usage, `cuprate-database`'s types and general usage are documented via standard Rust tooling. + +Run: +```bash +cargo doc --package cuprate-database --open +``` +at the root of the repo to open/read the documentation. + +If this documentation is too abstract, refer to any of the source files, they are heavily commented. There are many `// Regular comments` that explain more implementation specific details that aren't present here or in the docs. Use the file reference below to find what you're looking for. + +The code within `src/` is also littered with some `grep`-able comments containing some keywords: + +| Word | Meaning | +|-------------|---------| +| `INVARIANT` | This code makes an _assumption_ that must be upheld for correctness +| `SAFETY` | This `unsafe` code is okay, for `x,y,z` reasons +| `FIXME` | This code works but isn't ideal +| `HACK` | This code is a brittle workaround +| `PERF` | This code is weird for performance reasons +| `TODO` | This must be implemented; There should be 0 of these in production code +| `SOMEDAY` | This should be implemented... someday + +## 2. File structure +A quick reference of the structure of the folders & files in `cuprate-database`. + +Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`. + +### 2.1 `src/` +The top-level `src/` files. + +| File | Purpose | +|------------------------|---------| +| `constants.rs` | General constants used throughout `cuprate-database` +| `database.rs` | Abstracted database; `trait DatabaseR{o,w}` +| `env.rs` | Abstracted database environment; `trait Env` +| `error.rs` | Database error types +| `free.rs` | General free functions (related to the database) +| `key.rs` | Abstracted database keys; `trait Key` +| `resize.rs` | Database resizing algorithms +| `storable.rs` | Data (de)serialization; `trait Storable` +| `table.rs` | Database table abstraction; `trait Table` +| `tables.rs` | All the table definitions used by `cuprate-database` +| `tests.rs` | Utilities for `cuprate_database` testing +| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}` +| `types.rs` | Database-specific types +| `unsafe_unsendable.rs` | Marker type to impl `Send` for objects not `Send` + +### 2.2 `src/backend/` +This folder contains the implementation for actual databases used as the backend for `cuprate-database`. + +Each backend has its own folder. + +| Folder/File | Purpose | +|-------------|---------| +| `heed/` | Backend using using [`heed`](https://github.com/meilisearch/heed) (LMDB) +| `redb/` | Backend using [`redb`](https://github.com/cberner/redb) +| `tests.rs` | Backend-agnostic tests + +All backends follow the same file structure: + +| File | Purpose | +|------------------|---------| +| `database.rs` | Implementation of `trait DatabaseR{o,w}` +| `env.rs` | Implementation of `trait Env` +| `error.rs` | Implementation of backend's errors to `cuprate_database`'s error types +| `storable.rs` | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization +| `transaction.rs` | Implementation of `trait TxR{o,w}` +| `types.rs` | Type aliases for long backend-specific types + +### 2.3 `src/config/` +This folder contains the `cupate_database::config` module; configuration options for the database. + +| File | Purpose | +|---------------------|---------| +| `config.rs` | Main database `Config` struct +| `reader_threads.rs` | Reader thread configuration for `service` thread-pool +| `sync_mode.rs` | Disk sync configuration for backends + +### 2.4 `src/ops/` +This folder contains the `cupate_database::ops` module. + +These are higher-level functions abstracted over the database, that are Monero-related. + +| File | Purpose | +|-----------------|---------| +| `block.rs` | Block related (main functions) +| `blockchain.rs` | Blockchain related (height, cumulative values, etc) +| `key_image.rs` | Key image related +| `macros.rs` | Macros specific to `ops/` +| `output.rs` | Output related +| `property.rs` | Database properties (pruned, version, etc) +| `tx.rs` | Transaction related + +### 2.5 `src/service/` +This folder contains the `cupate_database::service` module. + +The `async`hronous request/response API other Cuprate crates use instead of managing the database directly themselves. + +| File | Purpose | +|----------------|---------| +| `free.rs` | General free functions used (related to `cuprate_database::service`) +| `read.rs` | Read thread-pool definitions and logic +| `tests.rs` | Thread-pool tests and test helper functions +| `types.rs` | `cuprate_database::service`-related type aliases +| `write.rs` | Writer thread definitions and logic + +## 3. Backends +`cuprate-database`'s `trait`s allow abstracting over the actual database, such that any backend in particular could be used. + +Each database's implementation for those `trait`'s are located in its respective folder in `src/backend/${DATABASE_NAME}/`. + +### 3.1 heed +The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB). The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used. `LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically. + +`heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are: + +| Filename | Purpose | +|------------|---------| +| `data.mdb` | Main data file +| `lock.mdb` | Database lock file + +`heed`-specific notes: +- [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for +- [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129) + +### 3.2 redb +The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb). + +The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used. + +`redb`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are: + +| Filename | Purpose | +|-------------|---------| +| `data.redb` | Main data file + +<!-- TODO: document DB on remote filesystem (does redb allow this?) --> + +### 3.3 redb-memory +This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a database that completely resides in memory instead of a file. + +All other details about this should be the same as the normal `redb` backend. + +### 3.4 sanakirja +[`sanakirja`](https://docs.rs/sanakirja) was a candidate as a backend, however there were problems with maximum value sizes. + +The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.Storable.html) which was too small for our requirements. Using [`sanakirja::Slice`](https://docs.rs/sanakirja/1.4.1/sanakirja/union.Slice.html) and [sanakirja::UnsizedStorage](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.UnsizedStorable.html) was attempted, but there were bugs found when inserting a value in-between `512..=4096` bytes. + +As such, it is not implemented. + +### 3.5 MDBX +[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement [`9.2 Multimap tables`](#92-multimap-tables). It is also quite similar to the main backend LMDB (of which it was originally a fork of). + +As such, it is not implemented (yet). + +## 4. Layers +`cuprate_database` is logically abstracted into 5 layers, with each layer being built upon the last. + +Starting from the lowest: +1. Backend +2. Trait +3. ConcreteEnv +4. `ops` +5. `service` + +<!-- TODO: insert image here after database/ split --> + +### 4.1 Backend +This is the actual database backend implementation (or a Rust shim over one). + +Examples: +- `heed` (LMDB) +- `redb` + +`cuprate_database` itself just uses a backend, it does not implement one. + +All backends have the following attributes: +- [Embedded](https://en.wikipedia.org/wiki/Embedded_database) +- [Multiversion concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) +- [ACID](https://en.wikipedia.org/wiki/ACID) +- Are `(key, value)` oriented and have the expected API (`get()`, `insert()`, `delete()`) +- Are table oriented (`"table_name" -> (key, value)`) +- Allows concurrent readers + +### 4.2 Trait +`cuprate_database` provides a set of `trait`s that abstract over the various database backends. + +This allows the function signatures and behavior to stay the same but allows for swapping out databases in an easier fashion. + +All common behavior of the backend's are encapsulated here and used instead of using the backend directly. + +Examples: +- [`trait Env`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/env.rs) +- [`trait {TxRo, TxRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/transaction.rs) +- [`trait {DatabaseRo, DatabaseRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/database.rs) + +For example, instead of calling `LMDB` or `redb`'s `get()` function directly, `DatabaseRo::get()` is called. + +### 4.3 ConcreteEnv +This is the non-generic, concrete `struct` provided by `cuprate_database` that contains all the data necessary to operate the database. The actual database backend `ConcreteEnv` will use internally depends on which backend feature is used. + +`ConcreteEnv` implements `trait Env`, which opens the door to all the other traits. + +The equivalent objects in the backends themselves are: +- [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html) +- [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html) + +This is the main object used when handling the database directly, although that is not strictly necessary as a user if the [`4.5 service`](#45-service) layer is used. + +### 4.4 ops +These are Monero-specific functions that use the abstracted `trait` forms of the database. + +Instead of dealing with the database directly: +- `get()` +- `delete()` + +the `ops` layer provides more abstract functions that deal with commonly used Monero operations: +- `add_block()` +- `pop_block()` + +### 4.5 service +The final layer abstracts the database completely into a [Monero-specific `async` request/response API](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/types/src/service.rs#L18-L78) using [`tower::Service`](https://docs.rs/tower/latest/tower/trait.Service.html). + +For more information on this layer, see the next section: [`5. The service`](#5-the-service). + +## 5. The service +The main API `cuprate_database` exposes for other crates to use is the `cuprate_database::service` module. + +This module exposes an `async` request/response API with `tower::Service`, backed by a threadpool, that allows reading/writing Monero-related data from/to the database. + +`cuprate_database::service` itself manages the database using a separate writer thread & reader thread-pool, and uses the previously mentioned [`4.4 ops`](#44-ops) functions when responding to requests. + +### 5.1 Initialization +The service is started simply by calling: [`cuprate_database::service::init()`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/service/free.rs#L23). + +This function initializes the database, spawns threads, and returns a: +- Read handle to the database (cloneable) +- Write handle to the database (not cloneable) + +These "handles" implement the `tower::Service` trait, which allows sending requests and receiving responses `async`hronously. + +### 5.2 Requests +Along with the 2 handles, there are 2 types of requests: +- [`ReadRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L23-L90) +- [`WriteRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L93-L105) + +`ReadRequest` is for retrieving various types of information from the database. + +`WriteRequest` currently only has 1 variant: to write a block to the database. + +### 5.3 Responses +After sending one of the above requests using the read/write handle, the value returned is _not_ the response, yet an `async`hronous channel that will eventually return the response: +```rust,ignore +// Send a request. +// tower::Service::call() +// V +let response_channel: Channel = read_handle.call(ReadResponse::ChainHeight)?; + +// Await the response. +let response: ReadResponse = response_channel.await?; + +// Assert the response is what we expected. +assert_eq!(matches!(response), Response::ChainHeight(_)); +``` + +After `await`ing the returned channel, a `Response` will eventually be returned when the `service` threadpool has fetched the value from the database and sent it off. + +Both read/write requests variants match in name with `Response` variants, i.e. +- `ReadRequest::ChainHeight` leads to `Response::ChainHeight` +- `WriteRequest::WriteBlock` leads to `Response::WriteBlockOk` + +### 5.4 Thread model +As mentioned in the [`4. Layers`](#4-layers) section, the base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread. + +However, the `cuprate_database::service` API, _does_ have a thread model backing it. + +When [`cuprate_database::service`'s initialization function](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33-L44) is called, threads will be spawned and maintained until the user drops (disconnects) the returned handles. + +The current behavior for thread count is: +- [1 writer thread](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/write.rs#L52-L66) +- [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L104-L126) + +For example, on a system with 32-threads, `cuprate_database` will spawn: +- 1 writer thread +- 32 reader threads + +whose sole responsibility is to listen for database requests, access the database (potentially in parallel), and return a response. + +Note that the `1 system thread = 1 reader thread` model is only the default setting, the reader thread count can be configured by the user to be any number between `1 .. amount_of_system_threads`. + +The reader threads are managed by [`rayon`](https://docs.rs/rayon). + +For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_database` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L490-L503). + +### 5.5 Shutdown +Once the read/write handles are `Drop`ed, the backing thread(pool) will gracefully exit, automatically. + +Note the writer thread and reader threadpool aren't connected whatsoever; dropping the write handle will make the writer thread exit, however, the reader handle is free to be held onto and can be continued to be read from - and vice-versa for the write handle. + +## 6. Syncing +`cuprate_database`'s database has 5 disk syncing modes. + +1. FastThenSafe +1. Safe +1. Async +1. Threshold +1. Fast + +The default mode is `Safe`. + +This means that upon each transaction commit, all the data that was written will be fully synced to disk. This is the slowest, but safest mode of operation. + +Note that upon any database `Drop`, whether via `service` or dropping the database directly, the current implementation will sync to disk regardless of any configuration. + +For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144). + +## 7. Resizing +Database backends that require manually resizing will, by default, use a similar algorithm as `monerod`'s. + +Note that this only relates to the `service` module, where the database is handled by `cuprate_database` itself, not the user. In the case of a user directly using `cuprate_database`, it is up to them on how to resize. + +Within `service`, the resizing logic defined [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/service/write.rs#L139-L201) does the following: + +- If there's not enough space to fit a write request's data, start a resize +- Each resize adds around [`1_073_745_920`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) bytes to the current map size +- A resize will be attempted `3` times before failing + +There are other [resizing algorithms](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L38-L47) that define how the database's memory map grows, although currently the behavior of [`monerod`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) is closely followed. + +## 8. (De)serialization +All types stored inside the database are either bytes already, or are perfectly bitcast-able. + +As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions. + +The size & layout of types is stable across compiler versions, as they are set and determined with [`#[repr(C)]`](https://doc.rust-lang.org/nomicon/other-reprs.html#reprc) and `bytemuck`'s derive macros such as [`bytemuck::Pod`](https://docs.rs/bytemuck/latest/bytemuck/derive.Pod.html). + +Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type. + +The main deserialization `trait` for database storage is: [`cuprate_database::Storable`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L16-L115). + +- Before storage, the type is [simply cast into bytes](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L125) +- When fetching, the bytes are [simply cast into the type](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L130) + +When a type is casted into bytes, [the reference is casted](https://docs.rs/bytemuck/latest/bytemuck/fn.bytes_of.html), i.e. this is zero-cost serialization. + +However, it is worth noting that when bytes are casted into the type, [it is copied](https://docs.rs/bytemuck/latest/bytemuck/fn.pod_read_unaligned.html). This is due to byte alignment guarantee issues with both backends, see: +- https://github.com/AltSysrq/lmdb-zero/issues/8 +- https://github.com/cberner/redb/issues/360 + +Without this, `bytemuck` will panic with [`TargetAlignmentGreaterAndInputNotAligned`](https://docs.rs/bytemuck/latest/bytemuck/enum.PodCastError.html#variant.TargetAlignmentGreaterAndInputNotAligned) when casting. + +Copying the bytes fixes this problem, although it is more costly than necessary. However, in the main use-case for `cuprate_database` (the `service` module) the bytes would need to be owned regardless as the `Request/Response` API uses owned data types (`T`, `Vec<T>`, `HashMap<K, V>`, etc). + +Practically speaking, this means lower-level database functions that normally look like such: +```rust +fn get(key: &Key) -> &Value; +``` +end up looking like this in `cuprate_database`: +```rust +fn get(key: &Key) -> Value; +``` + +Since each backend has its own (de)serialization methods, our types are wrapped in compatibility types that map our `Storable` functions into whatever is required for the backend, e.g: +- [`StorableHeed<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/heed/storable.rs#L11-L45) +- [`StorableRedb<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/redb/storable.rs#L11-L30) + +Compatibility structs also exist for any `Storable` containers: +- [`StorableVec<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L135-L191) +- [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241) + +Again, it's unfortunate that these must be owned, although in `service`'s use-case, they would have to be owned anyway. + +## 9. Schema +This following section contains Cuprate's database schema, it may change throughout the development of Cuprate, as such, nothing here is final. + +### 9.1 Tables +The `CamelCase` names of the table headers documented here (e.g. `TxIds`) are the actual type name of the table within `cuprate_database`. + +Note that words written within `code blocks` mean that it is a real type defined and usable within `cuprate_database`. Other standard types like u64 and type aliases (TxId) are written normally. + +Within `cuprate_database::tables`, the below table is essentially defined as-is with [a macro](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/tables.rs#L369-L470). + +Many of the data types stored are the same data types, although are different semantically, as such, a map of aliases used and their real data types is also provided below. + +| Alias | Real Type | +|----------------------------------------------------|-----------| +| BlockHeight, Amount, AmountIndex, TxId, UnlockTime | u64 +| BlockHash, KeyImage, TxHash, PrunableHash | [u8; 32] + +| Table | Key | Value | Description | +|-------------------|----------------------|--------------------|-------------| +| `BlockBlobs` | BlockHeight | `StorableVec<u8>` | Maps a block's height to a serialized byte form of a block +| `BlockHeights` | BlockHash | BlockHeight | Maps a block's hash to its height +| `BlockInfos` | BlockHeight | `BlockInfo` | Contains metadata of all blocks +| `KeyImages` | KeyImage | () | This table is a set with no value, it stores transaction key images +| `NumOutputs` | Amount | u64 | Maps an output's amount to the number of outputs with that amount +| `Outputs` | `PreRctOutputId` | `Output` | This table contains legacy CryptoNote outputs which have clear amounts. This table will not contain an output with 0 amount. +| `PrunedTxBlobs` | TxId | `StorableVec<u8>` | Contains pruned transaction blobs (even if the database is not pruned) +| `PrunableTxBlobs` | TxId | `StorableVec<u8>` | Contains the prunable part of a transaction +| `PrunableHashes` | TxId | PrunableHash | Contains the hash of the prunable part of a transaction +| `RctOutputs` | AmountIndex | `RctOutput` | Contains RingCT outputs mapped from their global RCT index +| `TxBlobs` | TxId | `StorableVec<u8>` | Serialized transaction blobs (bytes) +| `TxIds` | TxHash | TxId | Maps a transaction's hash to its index/ID +| `TxHeights` | TxId | BlockHeight | Maps a transaction's ID to the height of the block it comes from +| `TxOutputs` | TxId | `StorableVec<u64>` | Gives the amount indices of a transaction's outputs +| `TxUnlockTime` | TxId | UnlockTime | Stores the unlock time of a transaction (only if it has a non-zero lock time) + +The definitions for aliases and types (e.g. `RctOutput`) are within the [`cuprate_database::types`](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/types.rs#L51) module. + +<!-- TODO(Boog900): We could split this table again into `RingCT (non-miner) Outputs` and `RingCT (miner) Outputs` as for miner outputs we can store the amount instead of commitment saving 24 bytes per miner output. --> + +### 9.2 Multimap tables +When referencing outputs, Monero will [use the amount and the amount index](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L3447-L3449). This means 2 keys are needed to reach an output. + +With LMDB you can set the `DUP_SORT` flag on a table and then set the key/value to: +```rust +Key = KEY_PART_1 +``` +```rust +Value = { + KEY_PART_2, + VALUE // The actual value we are storing. +} +``` + +Then you can set a custom value sorting function that only takes `KEY_PART_2` into account; this is how `monerod` does it. + +This requires that the underlying database supports: +- multimap tables +- custom sort functions on values +- setting a cursor on a specific key/value + +--- + +Another way to implement this is as follows: +```rust +Key = { KEY_PART_1, KEY_PART_2 } +``` +```rust +Value = VALUE +``` + +Then the key type is simply used to look up the value; this is how `cuprate_database` does it. + +For example, the key/value pair for outputs is: +```rust +PreRctOutputId => Output +``` +where `PreRctOutputId` looks like this: +```rust +struct PreRctOutputId { + amount: u64, + amount_index: u64, +} +``` + +## 10. Known issues and tradeoffs +`cuprate_database` takes many tradeoffs, whether due to: +- Prioritizing certain values over others +- Not having a better solution +- Being "good enough" + +This is a list of the larger ones, along with issues that don't have answers yet. + +### 10.1 Traits abstracting backends +Although all database backends used are very similar, they have some crucial differences in small implementation details that must be worked around when conforming them to `cuprate_database`'s traits. + +Put simply: using `cuprate_database`'s traits is less efficient and more awkward than using the backend directly. + +For example: +- [Data types must be wrapped in compatibility layers when they otherwise wouldn't be](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/backend/heed/env.rs#L101-L116) +- [There are types that only apply to a specific backend, but are visible to all](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/error.rs#L86-L89) +- [There are extra layers of abstraction to smoothen the differences between all backends](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/env.rs#L62-L68) +- [Existing functionality of backends must be taken away, as it isn't supported in the others](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/database.rs#L27-L34) + +This is a _tradeoff_ that `cuprate_database` takes, as: +- The backend itself is usually not the source of bottlenecks in the greater system, as such, small inefficiencies are OK +- None of the lost functionality is crucial for operation +- The ability to use, test, and swap between multiple database backends is [worth it](https://github.com/Cuprate/cuprate/pull/35#issuecomment-1952804393) + +### 10.2 Hot-swappable backends +Using a different backend is really as simple as re-building `cuprate_database` with a different feature flag: +```bash +# Use LMDB. +cargo build --package cuprate-database --features heed + +# Use redb. +cargo build --package cuprate-database --features redb +``` + +This is "good enough" for now, however ideally, this hot-swapping of backends would be able to be done at _runtime_. + +As it is now, `cuprate_database` cannot compile both backends and swap based on user input at runtime; it must be compiled with a certain backend, which will produce a binary with only that backend. + +This also means things like [CI testing multiple backends is awkward](https://github.com/Cuprate/cuprate/blob/main/.github/workflows/ci.yml#L132-L136), as we must re-compile with different feature flags instead. + +### 10.3 Copying unaligned bytes +As mentioned in [`8. (De)serialization`](#8-deserialization), bytes are _copied_ when they are turned into a type `T` due to unaligned bytes being returned from database backends. + +Using a regular reference cast results in an improperly aligned type `T`; [such a type even existing causes undefined behavior](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). In our case, `bytemuck` saves us by panicking before this occurs. + +Thus, when using `cuprate_database`'s database traits, an _owned_ `T` is returned. + +This is doubly unfortunately for `&[u8]` as this does not even need deserialization. + +For example, `StorableVec` could have been this: +```rust +enum StorableBytes<'a, T: Storable> { + Owned(T), + Ref(&'a T), +} +``` +but this would require supporting types that must be copied regardless with the occasional `&[u8]` that can be returned without casting. This was hard to do so in a generic way, thus all `[u8]`'s are copied and returned as owned `StorableVec`s. + +This is a _tradeoff_ `cuprate_database` takes as: +- `bytemuck::pod_read_unaligned` is cheap enough +- The main API, `service`, needs to return owned value anyway +- Having no references removes a lot of lifetime complexity + +The alternative is either: +- Using proper (de)serialization instead of casting (which comes with its own costs) +- Somehow fixing the alignment issues in the backends mentioned previously + +### 10.4 Endianness +`cuprate_database`'s (de)serialization and storage of bytes are native-endian, as in, byte storage order will depend on the machine it is running on. + +As Cuprate's build-targets are all little-endian ([big-endian by default machines barely exist](https://en.wikipedia.org/wiki/Endianness#Hardware)), this doesn't matter much and the byte ordering can be seen as a constant. + +Practically, this means `cuprated`'s database files can be transferred across computers, as can `monerod`'s. + +### 10.5 Extra table data +Some of `cuprate_database`'s tables differ from `monerod`'s tables, for example, the way [`9.2 Multimap tables`](#92-multimap-tables) tables are done requires that the primary key is stored _for all_ entries, compared to `monerod` only needing to store it once. + +For example: +```rust +// `monerod` only stores `amount: 1` once, +// `cuprated` stores it each time it appears. +struct PreRctOutputId { amount: 1, amount_index: 0 } +struct PreRctOutputId { amount: 1, amount_index: 1 } +``` + +This means `cuprated`'s database will be slightly larger than `monerod`'s. + +The current method `cuprate_database` uses will be "good enough" until usage shows that it must be optimized as multimap tables are tricky to implement across all backends. diff --git a/storage/blockchain/README.md b/storage/blockchain/README.md index 22f729f0..8a2162c1 100644 --- a/storage/blockchain/README.md +++ b/storage/blockchain/README.md @@ -1,600 +1,105 @@ -# Database -FIXME: This documentation must be updated and moved to the architecture book. +Cuprate's blockchain database. -Cuprate's blockchain implementation. +This documentation is mostly for practical usage of `cuprate_blockchain`. -- [1. Documentation](#1-documentation) -- [2. File structure](#2-file-structure) - - [2.1 `src/`](#21-src) - - [2.2 `src/backend/`](#22-srcbackend) - - [2.3 `src/config/`](#23-srcconfig) - - [2.4 `src/ops/`](#24-srcops) - - [2.5 `src/service/`](#25-srcservice) -- [3. Backends](#3-backends) - - [3.1 heed](#31-heed) - - [3.2 redb](#32-redb) - - [3.3 redb-memory](#33-redb-memory) - - [3.4 sanakirja](#34-sanakirja) - - [3.5 MDBX](#35-mdbx) -- [4. Layers](#4-layers) - - [4.1 Backend](#41-backend) - - [4.2 Trait](#42-trait) - - [4.3 ConcreteEnv](#43-concreteenv) - - [4.4 ops](#44-ops) - - [4.5 service](#45-service) -- [5. The service](#5-the-service) - - [5.1 Initialization](#51-initialization) - - [5.2 Requests](#53-requests) - - [5.3 Responses](#54-responses) - - [5.4 Thread model](#52-thread-model) - - [5.5 Shutdown](#55-shutdown) -- [6. Syncing](#6-Syncing) -- [7. Resizing](#7-resizing) -- [8. (De)serialization](#8-deserialization) -- [9. Schema](#9-schema) - - [9.1 Tables](#91-tables) - - [9.2 Multimap tables](#92-multimap-tables) -- [10. Known issues and tradeoffs](#10-known-issues-and-tradeoffs) - - [10.1 Traits abstracting backends](#101-traits-abstracting-backends) - - [10.2 Hot-swappable backends](#102-hot-swappable-backends) - - [10.3 Copying unaligned bytes](#103-copying-unaligned-bytes) - - [10.4 Endianness](#104-endianness) - - [10.5 Extra table data](#105-extra-table-data) +For a high-level overview, see the database section in +[Cuprate's architecture book](https://architecture.cuprate.org). ---- +# Purpose +This crate does 3 things: +1. Uses [`cuprate_database`] as a base database layer +1. Implements various `Monero` related [operations](ops), [tables], and [types] +1. Exposes a [`tower::Service`] backed by a thread-pool -## 1. Documentation -Documentation for `database/` is split into 3 locations: +Each layer builds on-top of the previous. -| Documentation location | Purpose | -|---------------------------|---------| -| `database/README.md` | High level design of `cuprate-database` -| `cuprate-database` | Practical usage documentation/warnings/notes/etc -| Source file `// comments` | Implementation-specific details (e.g, how many reader threads to spawn?) +As a user of `cuprate_blockchain`, consider using the higher-level [`service`] module, +or at the very least the [`ops`] module instead of interacting with the `cuprate_database` traits directly. -This README serves as the implementation design document. +# `cuprate_database` +Consider reading `cuprate_database`'s crate documentation before this crate, as it is the first layer. -For actual practical usage, `cuprate-database`'s types and general usage are documented via standard Rust tooling. - -Run: -```bash -cargo doc --package cuprate-database --open +If/when this crate needs is used, be sure to use the version that this crate re-exports, e.g.: +```rust +use cuprate_blockchain::{ + cuprate_database::RuntimeError, +}; ``` -at the root of the repo to open/read the documentation. +This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_blockchain` internally. -If this documentation is too abstract, refer to any of the source files, they are heavily commented. There are many `// Regular comments` that explain more implementation specific details that aren't present here or in the docs. Use the file reference below to find what you're looking for. +# Feature flags +The `service` module requires the `service` feature to be enabled. +See the module for more documentation. -The code within `src/` is also littered with some `grep`-able comments containing some keywords: - -| Word | Meaning | -|-------------|---------| -| `INVARIANT` | This code makes an _assumption_ that must be upheld for correctness -| `SAFETY` | This `unsafe` code is okay, for `x,y,z` reasons -| `FIXME` | This code works but isn't ideal -| `HACK` | This code is a brittle workaround -| `PERF` | This code is weird for performance reasons -| `TODO` | This must be implemented; There should be 0 of these in production code -| `SOMEDAY` | This should be implemented... someday - -## 2. File structure -A quick reference of the structure of the folders & files in `cuprate-database`. - -Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`. - -### 2.1 `src/` -The top-level `src/` files. - -| File | Purpose | -|------------------------|---------| -| `constants.rs` | General constants used throughout `cuprate-database` -| `database.rs` | Abstracted database; `trait DatabaseR{o,w}` -| `env.rs` | Abstracted database environment; `trait Env` -| `error.rs` | Database error types -| `free.rs` | General free functions (related to the database) -| `key.rs` | Abstracted database keys; `trait Key` -| `resize.rs` | Database resizing algorithms -| `storable.rs` | Data (de)serialization; `trait Storable` -| `table.rs` | Database table abstraction; `trait Table` -| `tables.rs` | All the table definitions used by `cuprate-database` -| `tests.rs` | Utilities for `cuprate_database` testing -| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}` -| `types.rs` | Database-specific types -| `unsafe_unsendable.rs` | Marker type to impl `Send` for objects not `Send` - -### 2.2 `src/backend/` -This folder contains the implementation for actual databases used as the backend for `cuprate-database`. - -Each backend has its own folder. - -| Folder/File | Purpose | -|-------------|---------| -| `heed/` | Backend using using [`heed`](https://github.com/meilisearch/heed) (LMDB) -| `redb/` | Backend using [`redb`](https://github.com/cberner/redb) -| `tests.rs` | Backend-agnostic tests - -All backends follow the same file structure: - -| File | Purpose | -|------------------|---------| -| `database.rs` | Implementation of `trait DatabaseR{o,w}` -| `env.rs` | Implementation of `trait Env` -| `error.rs` | Implementation of backend's errors to `cuprate_database`'s error types -| `storable.rs` | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization -| `transaction.rs` | Implementation of `trait TxR{o,w}` -| `types.rs` | Type aliases for long backend-specific types - -### 2.3 `src/config/` -This folder contains the `cupate_database::config` module; configuration options for the database. - -| File | Purpose | -|---------------------|---------| -| `config.rs` | Main database `Config` struct -| `reader_threads.rs` | Reader thread configuration for `service` thread-pool -| `sync_mode.rs` | Disk sync configuration for backends - -### 2.4 `src/ops/` -This folder contains the `cupate_database::ops` module. - -These are higher-level functions abstracted over the database, that are Monero-related. - -| File | Purpose | -|-----------------|---------| -| `block.rs` | Block related (main functions) -| `blockchain.rs` | Blockchain related (height, cumulative values, etc) -| `key_image.rs` | Key image related -| `macros.rs` | Macros specific to `ops/` -| `output.rs` | Output related -| `property.rs` | Database properties (pruned, version, etc) -| `tx.rs` | Transaction related - -### 2.5 `src/service/` -This folder contains the `cupate_database::service` module. - -The `async`hronous request/response API other Cuprate crates use instead of managing the database directly themselves. - -| File | Purpose | -|----------------|---------| -| `free.rs` | General free functions used (related to `cuprate_database::service`) -| `read.rs` | Read thread-pool definitions and logic -| `tests.rs` | Thread-pool tests and test helper functions -| `types.rs` | `cuprate_database::service`-related type aliases -| `write.rs` | Writer thread definitions and logic - -## 3. Backends -`cuprate-database`'s `trait`s allow abstracting over the actual database, such that any backend in particular could be used. - -Each database's implementation for those `trait`'s are located in its respective folder in `src/backend/${DATABASE_NAME}/`. - -### 3.1 heed -The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB). The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used. `LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically. - -`heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are: - -| Filename | Purpose | -|------------|---------| -| `data.mdb` | Main data file -| `lock.mdb` | Database lock file - -`heed`-specific notes: -- [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for -- [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129) - -### 3.2 redb -The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb). - -The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used. - -`redb`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are: - -| Filename | Purpose | -|-------------|---------| -| `data.redb` | Main data file - -<!-- TODO: document DB on remote filesystem (does redb allow this?) --> - -### 3.3 redb-memory -This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a database that completely resides in memory instead of a file. - -All other details about this should be the same as the normal `redb` backend. - -### 3.4 sanakirja -[`sanakirja`](https://docs.rs/sanakirja) was a candidate as a backend, however there were problems with maximum value sizes. - -The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.Storable.html) which was too small for our requirements. Using [`sanakirja::Slice`](https://docs.rs/sanakirja/1.4.1/sanakirja/union.Slice.html) and [sanakirja::UnsizedStorage](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.UnsizedStorable.html) was attempted, but there were bugs found when inserting a value in-between `512..=4096` bytes. - -As such, it is not implemented. - -### 3.5 MDBX -[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement [`9.2 Multimap tables`](#92-multimap-tables). It is also quite similar to the main backend LMDB (of which it was originally a fork of). - -As such, it is not implemented (yet). - -## 4. Layers -`cuprate_database` is logically abstracted into 5 layers, with each layer being built upon the last. - -Starting from the lowest: -1. Backend -2. Trait -3. ConcreteEnv -4. `ops` -5. `service` - -<!-- TODO: insert image here after database/ split --> - -### 4.1 Backend -This is the actual database backend implementation (or a Rust shim over one). - -Examples: +Different database backends are enabled by the feature flags: - `heed` (LMDB) - `redb` -`cuprate_database` itself just uses a backend, it does not implement one. +The default is `heed`. -All backends have the following attributes: -- [Embedded](https://en.wikipedia.org/wiki/Embedded_database) -- [Multiversion concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) -- [ACID](https://en.wikipedia.org/wiki/ACID) -- Are `(key, value)` oriented and have the expected API (`get()`, `insert()`, `delete()`) -- Are table oriented (`"table_name" -> (key, value)`) -- Allows concurrent readers +`tracing` is always enabled and cannot be disabled via feature-flag. +<!-- FIXME: tracing should be behind a feature flag --> -### 4.2 Trait -`cuprate_database` provides a set of `trait`s that abstract over the various database backends. +# Invariants when not using `service` +`cuprate_blockchain` can be used without the `service` feature enabled but +there are some things that must be kept in mind when doing so. -This allows the function signatures and behavior to stay the same but allows for swapping out databases in an easier fashion. +Failing to uphold these invariants may cause panics. -All common behavior of the backend's are encapsulated here and used instead of using the backend directly. +1. `LMDB` requires the user to resize the memory map resizing (see [`cuprate_database::RuntimeError::ResizeNeeded`] +1. `LMDB` has a maximum reader transaction count, currently it is set to `128` +1. `LMDB` has [maximum key/value byte size](http://www.lmdb.tech/doc/group__internal.html#gac929399f5d93cef85f874b9e9b1d09e0) which must not be exceeded -Examples: -- [`trait Env`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/env.rs) -- [`trait {TxRo, TxRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/transaction.rs) -- [`trait {DatabaseRo, DatabaseRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/database.rs) +# Examples +The below is an example of using `cuprate_blockchain` +lowest API, i.e. using a mix of this crate and `cuprate_database`'s traits directly - +**this is NOT recommended.** -For example, instead of calling `LMDB` or `redb`'s `get()` function directly, `DatabaseRo::get()` is called. +For examples of the higher-level APIs, see: +- [`ops`] +- [`service`] -### 4.3 ConcreteEnv -This is the non-generic, concrete `struct` provided by `cuprate_database` that contains all the data necessary to operate the database. The actual database backend `ConcreteEnv` will use internally depends on which backend feature is used. - -`ConcreteEnv` implements `trait Env`, which opens the door to all the other traits. - -The equivalent objects in the backends themselves are: -- [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html) -- [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html) - -This is the main object used when handling the database directly, although that is not strictly necessary as a user if the [`4.5 service`](#45-service) layer is used. - -### 4.4 ops -These are Monero-specific functions that use the abstracted `trait` forms of the database. - -Instead of dealing with the database directly: -- `get()` -- `delete()` - -the `ops` layer provides more abstract functions that deal with commonly used Monero operations: -- `add_block()` -- `pop_block()` - -### 4.5 service -The final layer abstracts the database completely into a [Monero-specific `async` request/response API](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/types/src/service.rs#L18-L78) using [`tower::Service`](https://docs.rs/tower/latest/tower/trait.Service.html). - -For more information on this layer, see the next section: [`5. The service`](#5-the-service). - -## 5. The service -The main API `cuprate_database` exposes for other crates to use is the `cuprate_database::service` module. - -This module exposes an `async` request/response API with `tower::Service`, backed by a threadpool, that allows reading/writing Monero-related data from/to the database. - -`cuprate_database::service` itself manages the database using a separate writer thread & reader thread-pool, and uses the previously mentioned [`4.4 ops`](#44-ops) functions when responding to requests. - -### 5.1 Initialization -The service is started simply by calling: [`cuprate_database::service::init()`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/service/free.rs#L23). - -This function initializes the database, spawns threads, and returns a: -- Read handle to the database (cloneable) -- Write handle to the database (not cloneable) - -These "handles" implement the `tower::Service` trait, which allows sending requests and receiving responses `async`hronously. - -### 5.2 Requests -Along with the 2 handles, there are 2 types of requests: -- [`ReadRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L23-L90) -- [`WriteRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L93-L105) - -`ReadRequest` is for retrieving various types of information from the database. - -`WriteRequest` currently only has 1 variant: to write a block to the database. - -### 5.3 Responses -After sending one of the above requests using the read/write handle, the value returned is _not_ the response, yet an `async`hronous channel that will eventually return the response: -```rust,ignore -// Send a request. -// tower::Service::call() -// V -let response_channel: Channel = read_handle.call(ReadResponse::ChainHeight)?; - -// Await the response. -let response: ReadResponse = response_channel.await?; - -// Assert the response is what we expected. -assert_eq!(matches!(response), Response::ChainHeight(_)); -``` - -After `await`ing the returned channel, a `Response` will eventually be returned when the `service` threadpool has fetched the value from the database and sent it off. - -Both read/write requests variants match in name with `Response` variants, i.e. -- `ReadRequest::ChainHeight` leads to `Response::ChainHeight` -- `WriteRequest::WriteBlock` leads to `Response::WriteBlockOk` - -### 5.4 Thread model -As mentioned in the [`4. Layers`](#4-layers) section, the base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread. - -However, the `cuprate_database::service` API, _does_ have a thread model backing it. - -When [`cuprate_database::service`'s initialization function](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33-L44) is called, threads will be spawned and maintained until the user drops (disconnects) the returned handles. - -The current behavior for thread count is: -- [1 writer thread](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/write.rs#L52-L66) -- [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L104-L126) - -For example, on a system with 32-threads, `cuprate_database` will spawn: -- 1 writer thread -- 32 reader threads - -whose sole responsibility is to listen for database requests, access the database (potentially in parallel), and return a response. - -Note that the `1 system thread = 1 reader thread` model is only the default setting, the reader thread count can be configured by the user to be any number between `1 .. amount_of_system_threads`. - -The reader threads are managed by [`rayon`](https://docs.rs/rayon). - -For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_database` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L490-L503). - -### 5.5 Shutdown -Once the read/write handles are `Drop`ed, the backing thread(pool) will gracefully exit, automatically. - -Note the writer thread and reader threadpool aren't connected whatsoever; dropping the write handle will make the writer thread exit, however, the reader handle is free to be held onto and can be continued to be read from - and vice-versa for the write handle. - -## 6. Syncing -`cuprate_database`'s database has 5 disk syncing modes. - -1. FastThenSafe -1. Safe -1. Async -1. Threshold -1. Fast - -The default mode is `Safe`. - -This means that upon each transaction commit, all the data that was written will be fully synced to disk. This is the slowest, but safest mode of operation. - -Note that upon any database `Drop`, whether via `service` or dropping the database directly, the current implementation will sync to disk regardless of any configuration. - -For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144). - -## 7. Resizing -Database backends that require manually resizing will, by default, use a similar algorithm as `monerod`'s. - -Note that this only relates to the `service` module, where the database is handled by `cuprate_database` itself, not the user. In the case of a user directly using `cuprate_database`, it is up to them on how to resize. - -Within `service`, the resizing logic defined [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/service/write.rs#L139-L201) does the following: - -- If there's not enough space to fit a write request's data, start a resize -- Each resize adds around [`1_073_745_920`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) bytes to the current map size -- A resize will be attempted `3` times before failing - -There are other [resizing algorithms](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L38-L47) that define how the database's memory map grows, although currently the behavior of [`monerod`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) is closely followed. - -## 8. (De)serialization -All types stored inside the database are either bytes already, or are perfectly bitcast-able. - -As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions. - -The size & layout of types is stable across compiler versions, as they are set and determined with [`#[repr(C)]`](https://doc.rust-lang.org/nomicon/other-reprs.html#reprc) and `bytemuck`'s derive macros such as [`bytemuck::Pod`](https://docs.rs/bytemuck/latest/bytemuck/derive.Pod.html). - -Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type. - -The main deserialization `trait` for database storage is: [`cuprate_database::Storable`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L16-L115). - -- Before storage, the type is [simply cast into bytes](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L125) -- When fetching, the bytes are [simply cast into the type](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L130) - -When a type is casted into bytes, [the reference is casted](https://docs.rs/bytemuck/latest/bytemuck/fn.bytes_of.html), i.e. this is zero-cost serialization. - -However, it is worth noting that when bytes are casted into the type, [it is copied](https://docs.rs/bytemuck/latest/bytemuck/fn.pod_read_unaligned.html). This is due to byte alignment guarantee issues with both backends, see: -- https://github.com/AltSysrq/lmdb-zero/issues/8 -- https://github.com/cberner/redb/issues/360 - -Without this, `bytemuck` will panic with [`TargetAlignmentGreaterAndInputNotAligned`](https://docs.rs/bytemuck/latest/bytemuck/enum.PodCastError.html#variant.TargetAlignmentGreaterAndInputNotAligned) when casting. - -Copying the bytes fixes this problem, although it is more costly than necessary. However, in the main use-case for `cuprate_database` (the `service` module) the bytes would need to be owned regardless as the `Request/Response` API uses owned data types (`T`, `Vec<T>`, `HashMap<K, V>`, etc). - -Practically speaking, this means lower-level database functions that normally look like such: ```rust -fn get(key: &Key) -> &Value; +use cuprate_blockchain::{ + cuprate_database::{ + ConcreteEnv, + Env, EnvInner, + DatabaseRo, DatabaseRw, TxRo, TxRw, + }, + config::ConfigBuilder, + tables::{Tables, TablesMut}, + OpenTables, +}; + +# fn main() -> Result<(), Box<dyn std::error::Error>> { +// Create a configuration for the database environment. +let tmp_dir = tempfile::tempdir()?; +let db_dir = tmp_dir.path().to_owned(); +let config = ConfigBuilder::new() + .db_directory(db_dir.into()) + .build(); + +// Initialize the database environment. +let env = cuprate_blockchain::open(config)?; + +// Open up a transaction + tables for writing. +let env_inner = env.env_inner(); +let tx_rw = env_inner.tx_rw()?; +let mut tables = env_inner.open_tables_mut(&tx_rw)?; + +// ⚠️ Write data to the tables directly. +// (not recommended, use `ops` or `service`). +const KEY_IMAGE: [u8; 32] = [88; 32]; +tables.key_images_mut().put(&KEY_IMAGE, &())?; + +// Commit the data written. +drop(tables); +TxRw::commit(tx_rw)?; + +// Read the data, assert it is correct. +let tx_ro = env_inner.tx_ro()?; +let tables = env_inner.open_tables(&tx_ro)?; +let (key_image, _) = tables.key_images().first()?; +assert_eq!(key_image, KEY_IMAGE); +# Ok(()) } ``` -end up looking like this in `cuprate_database`: -```rust -fn get(key: &Key) -> Value; -``` - -Since each backend has its own (de)serialization methods, our types are wrapped in compatibility types that map our `Storable` functions into whatever is required for the backend, e.g: -- [`StorableHeed<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/heed/storable.rs#L11-L45) -- [`StorableRedb<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/redb/storable.rs#L11-L30) - -Compatibility structs also exist for any `Storable` containers: -- [`StorableVec<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L135-L191) -- [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241) - -Again, it's unfortunate that these must be owned, although in `service`'s use-case, they would have to be owned anyway. - -## 9. Schema -This following section contains Cuprate's database schema, it may change throughout the development of Cuprate, as such, nothing here is final. - -### 9.1 Tables -The `CamelCase` names of the table headers documented here (e.g. `TxIds`) are the actual type name of the table within `cuprate_database`. - -Note that words written within `code blocks` mean that it is a real type defined and usable within `cuprate_database`. Other standard types like u64 and type aliases (TxId) are written normally. - -Within `cuprate_database::tables`, the below table is essentially defined as-is with [a macro](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/tables.rs#L369-L470). - -Many of the data types stored are the same data types, although are different semantically, as such, a map of aliases used and their real data types is also provided below. - -| Alias | Real Type | -|----------------------------------------------------|-----------| -| BlockHeight, Amount, AmountIndex, TxId, UnlockTime | u64 -| BlockHash, KeyImage, TxHash, PrunableHash | [u8; 32] - -| Table | Key | Value | Description | -|-------------------|----------------------|--------------------|-------------| -| `BlockBlobs` | BlockHeight | `StorableVec<u8>` | Maps a block's height to a serialized byte form of a block -| `BlockHeights` | BlockHash | BlockHeight | Maps a block's hash to its height -| `BlockInfos` | BlockHeight | `BlockInfo` | Contains metadata of all blocks -| `KeyImages` | KeyImage | () | This table is a set with no value, it stores transaction key images -| `NumOutputs` | Amount | u64 | Maps an output's amount to the number of outputs with that amount -| `Outputs` | `PreRctOutputId` | `Output` | This table contains legacy CryptoNote outputs which have clear amounts. This table will not contain an output with 0 amount. -| `PrunedTxBlobs` | TxId | `StorableVec<u8>` | Contains pruned transaction blobs (even if the database is not pruned) -| `PrunableTxBlobs` | TxId | `StorableVec<u8>` | Contains the prunable part of a transaction -| `PrunableHashes` | TxId | PrunableHash | Contains the hash of the prunable part of a transaction -| `RctOutputs` | AmountIndex | `RctOutput` | Contains RingCT outputs mapped from their global RCT index -| `TxBlobs` | TxId | `StorableVec<u8>` | Serialized transaction blobs (bytes) -| `TxIds` | TxHash | TxId | Maps a transaction's hash to its index/ID -| `TxHeights` | TxId | BlockHeight | Maps a transaction's ID to the height of the block it comes from -| `TxOutputs` | TxId | `StorableVec<u64>` | Gives the amount indices of a transaction's outputs -| `TxUnlockTime` | TxId | UnlockTime | Stores the unlock time of a transaction (only if it has a non-zero lock time) - -The definitions for aliases and types (e.g. `RctOutput`) are within the [`cuprate_database::types`](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/types.rs#L51) module. - -<!-- TODO(Boog900): We could split this table again into `RingCT (non-miner) Outputs` and `RingCT (miner) Outputs` as for miner outputs we can store the amount instead of commitment saving 24 bytes per miner output. --> - -### 9.2 Multimap tables -When referencing outputs, Monero will [use the amount and the amount index](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L3447-L3449). This means 2 keys are needed to reach an output. - -With LMDB you can set the `DUP_SORT` flag on a table and then set the key/value to: -```rust -Key = KEY_PART_1 -``` -```rust -Value = { - KEY_PART_2, - VALUE // The actual value we are storing. -} -``` - -Then you can set a custom value sorting function that only takes `KEY_PART_2` into account; this is how `monerod` does it. - -This requires that the underlying database supports: -- multimap tables -- custom sort functions on values -- setting a cursor on a specific key/value - ---- - -Another way to implement this is as follows: -```rust -Key = { KEY_PART_1, KEY_PART_2 } -``` -```rust -Value = VALUE -``` - -Then the key type is simply used to look up the value; this is how `cuprate_database` does it. - -For example, the key/value pair for outputs is: -```rust -PreRctOutputId => Output -``` -where `PreRctOutputId` looks like this: -```rust -struct PreRctOutputId { - amount: u64, - amount_index: u64, -} -``` - -## 10. Known issues and tradeoffs -`cuprate_database` takes many tradeoffs, whether due to: -- Prioritizing certain values over others -- Not having a better solution -- Being "good enough" - -This is a list of the larger ones, along with issues that don't have answers yet. - -### 10.1 Traits abstracting backends -Although all database backends used are very similar, they have some crucial differences in small implementation details that must be worked around when conforming them to `cuprate_database`'s traits. - -Put simply: using `cuprate_database`'s traits is less efficient and more awkward than using the backend directly. - -For example: -- [Data types must be wrapped in compatibility layers when they otherwise wouldn't be](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/backend/heed/env.rs#L101-L116) -- [There are types that only apply to a specific backend, but are visible to all](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/error.rs#L86-L89) -- [There are extra layers of abstraction to smoothen the differences between all backends](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/env.rs#L62-L68) -- [Existing functionality of backends must be taken away, as it isn't supported in the others](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/database.rs#L27-L34) - -This is a _tradeoff_ that `cuprate_database` takes, as: -- The backend itself is usually not the source of bottlenecks in the greater system, as such, small inefficiencies are OK -- None of the lost functionality is crucial for operation -- The ability to use, test, and swap between multiple database backends is [worth it](https://github.com/Cuprate/cuprate/pull/35#issuecomment-1952804393) - -### 10.2 Hot-swappable backends -Using a different backend is really as simple as re-building `cuprate_database` with a different feature flag: -```bash -# Use LMDB. -cargo build --package cuprate-database --features heed - -# Use redb. -cargo build --package cuprate-database --features redb -``` - -This is "good enough" for now, however ideally, this hot-swapping of backends would be able to be done at _runtime_. - -As it is now, `cuprate_database` cannot compile both backends and swap based on user input at runtime; it must be compiled with a certain backend, which will produce a binary with only that backend. - -This also means things like [CI testing multiple backends is awkward](https://github.com/Cuprate/cuprate/blob/main/.github/workflows/ci.yml#L132-L136), as we must re-compile with different feature flags instead. - -### 10.3 Copying unaligned bytes -As mentioned in [`8. (De)serialization`](#8-deserialization), bytes are _copied_ when they are turned into a type `T` due to unaligned bytes being returned from database backends. - -Using a regular reference cast results in an improperly aligned type `T`; [such a type even existing causes undefined behavior](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). In our case, `bytemuck` saves us by panicking before this occurs. - -Thus, when using `cuprate_database`'s database traits, an _owned_ `T` is returned. - -This is doubly unfortunately for `&[u8]` as this does not even need deserialization. - -For example, `StorableVec` could have been this: -```rust -enum StorableBytes<'a, T: Storable> { - Owned(T), - Ref(&'a T), -} -``` -but this would require supporting types that must be copied regardless with the occasional `&[u8]` that can be returned without casting. This was hard to do so in a generic way, thus all `[u8]`'s are copied and returned as owned `StorableVec`s. - -This is a _tradeoff_ `cuprate_database` takes as: -- `bytemuck::pod_read_unaligned` is cheap enough -- The main API, `service`, needs to return owned value anyway -- Having no references removes a lot of lifetime complexity - -The alternative is either: -- Using proper (de)serialization instead of casting (which comes with its own costs) -- Somehow fixing the alignment issues in the backends mentioned previously - -### 10.4 Endianness -`cuprate_database`'s (de)serialization and storage of bytes are native-endian, as in, byte storage order will depend on the machine it is running on. - -As Cuprate's build-targets are all little-endian ([big-endian by default machines barely exist](https://en.wikipedia.org/wiki/Endianness#Hardware)), this doesn't matter much and the byte ordering can be seen as a constant. - -Practically, this means `cuprated`'s database files can be transferred across computers, as can `monerod`'s. - -### 10.5 Extra table data -Some of `cuprate_database`'s tables differ from `monerod`'s tables, for example, the way [`9.2 Multimap tables`](#92-multimap-tables) tables are done requires that the primary key is stored _for all_ entries, compared to `monerod` only needing to store it once. - -For example: -```rust -// `monerod` only stores `amount: 1` once, -// `cuprated` stores it each time it appears. -struct PreRctOutputId { amount: 1, amount_index: 0 } -struct PreRctOutputId { amount: 1, amount_index: 1 } -``` - -This means `cuprated`'s database will be slightly larger than `monerod`'s. - -The current method `cuprate_database` uses will be "good enough" until usage shows that it must be optimized as multimap tables are tricky to implement across all backends. diff --git a/storage/blockchain/src/backend/tests.rs b/storage/blockchain/src/backend/tests.rs deleted file mode 100644 index 3daec669..00000000 --- a/storage/blockchain/src/backend/tests.rs +++ /dev/null @@ -1,550 +0,0 @@ -//! Tests for `cuprate_blockchain`'s backends. -//! -//! These tests are fully trait-based, meaning there -//! is no reference to `backend/`-specific types. -//! -//! As such, which backend is tested is -//! dependant on the feature flags used. -//! -//! | Feature flag | Tested backend | -//! |---------------|----------------| -//! | Only `redb` | `redb` -//! | Anything else | `heed` -//! -//! `redb`, and it only must be enabled for it to be tested. - -//---------------------------------------------------------------------------------------------------- Import - -use crate::{ - database::{DatabaseIter, DatabaseRo, DatabaseRw}, - env::{Env, EnvInner}, - error::RuntimeError, - resize::ResizeAlgorithm, - storable::StorableVec, - tables::{ - BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes, - PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxBlobs, TxHeights, TxIds, TxOutputs, - TxUnlockTime, - }, - tables::{TablesIter, TablesMut}, - tests::tmp_concrete_env, - transaction::{TxRo, TxRw}, - types::{ - Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage, - Output, OutputFlags, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput, - TxBlob, TxHash, TxId, UnlockTime, - }, - ConcreteEnv, -}; - -//---------------------------------------------------------------------------------------------------- Tests -/// Simply call [`Env::open`]. If this fails, something is really wrong. -#[test] -fn open() { - tmp_concrete_env(); -} - -/// Create database transactions, but don't write any data. -#[test] -fn tx() { - let (env, _tempdir) = tmp_concrete_env(); - let env_inner = env.env_inner(); - - TxRo::commit(env_inner.tx_ro().unwrap()).unwrap(); - TxRw::commit(env_inner.tx_rw().unwrap()).unwrap(); - TxRw::abort(env_inner.tx_rw().unwrap()).unwrap(); -} - -/// Open (and verify) that all database tables -/// exist already after calling [`Env::open`]. -#[test] -fn open_db() { - let (env, _tempdir) = tmp_concrete_env(); - let env_inner = env.env_inner(); - let tx_ro = env_inner.tx_ro().unwrap(); - let tx_rw = env_inner.tx_rw().unwrap(); - - // Open all tables in read-only mode. - // This should be updated when tables are modified. - env_inner.open_db_ro::<BlockBlobs>(&tx_ro).unwrap(); - env_inner.open_db_ro::<BlockHeights>(&tx_ro).unwrap(); - env_inner.open_db_ro::<BlockInfos>(&tx_ro).unwrap(); - env_inner.open_db_ro::<KeyImages>(&tx_ro).unwrap(); - env_inner.open_db_ro::<NumOutputs>(&tx_ro).unwrap(); - env_inner.open_db_ro::<Outputs>(&tx_ro).unwrap(); - env_inner.open_db_ro::<PrunableHashes>(&tx_ro).unwrap(); - env_inner.open_db_ro::<PrunableTxBlobs>(&tx_ro).unwrap(); - env_inner.open_db_ro::<PrunedTxBlobs>(&tx_ro).unwrap(); - env_inner.open_db_ro::<RctOutputs>(&tx_ro).unwrap(); - env_inner.open_db_ro::<TxBlobs>(&tx_ro).unwrap(); - env_inner.open_db_ro::<TxHeights>(&tx_ro).unwrap(); - env_inner.open_db_ro::<TxIds>(&tx_ro).unwrap(); - env_inner.open_db_ro::<TxOutputs>(&tx_ro).unwrap(); - env_inner.open_db_ro::<TxUnlockTime>(&tx_ro).unwrap(); - TxRo::commit(tx_ro).unwrap(); - - // Open all tables in read/write mode. - env_inner.open_db_rw::<BlockBlobs>(&tx_rw).unwrap(); - env_inner.open_db_rw::<BlockHeights>(&tx_rw).unwrap(); - env_inner.open_db_rw::<BlockInfos>(&tx_rw).unwrap(); - env_inner.open_db_rw::<KeyImages>(&tx_rw).unwrap(); - env_inner.open_db_rw::<NumOutputs>(&tx_rw).unwrap(); - env_inner.open_db_rw::<Outputs>(&tx_rw).unwrap(); - env_inner.open_db_rw::<PrunableHashes>(&tx_rw).unwrap(); - env_inner.open_db_rw::<PrunableTxBlobs>(&tx_rw).unwrap(); - env_inner.open_db_rw::<PrunedTxBlobs>(&tx_rw).unwrap(); - env_inner.open_db_rw::<RctOutputs>(&tx_rw).unwrap(); - env_inner.open_db_rw::<TxBlobs>(&tx_rw).unwrap(); - env_inner.open_db_rw::<TxHeights>(&tx_rw).unwrap(); - env_inner.open_db_rw::<TxIds>(&tx_rw).unwrap(); - env_inner.open_db_rw::<TxOutputs>(&tx_rw).unwrap(); - env_inner.open_db_rw::<TxUnlockTime>(&tx_rw).unwrap(); - TxRw::commit(tx_rw).unwrap(); -} - -/// Test `Env` resizes. -#[test] -fn resize() { - // This test is only valid for `Env`'s that need to resize manually. - if !ConcreteEnv::MANUAL_RESIZE { - return; - } - - let (env, _tempdir) = tmp_concrete_env(); - - // Resize by the OS page size. - let page_size = crate::resize::page_size(); - let old_size = env.current_map_size(); - env.resize_map(Some(ResizeAlgorithm::FixedBytes(page_size))); - - // Assert it resized exactly by the OS page size. - let new_size = env.current_map_size(); - assert_eq!(new_size, old_size + page_size.get()); -} - -/// Test that `Env`'s that don't manually resize. -#[test] -#[should_panic = "unreachable"] -fn non_manual_resize_1() { - if ConcreteEnv::MANUAL_RESIZE { - unreachable!(); - } else { - let (env, _tempdir) = tmp_concrete_env(); - env.resize_map(None); - } -} - -#[test] -#[should_panic = "unreachable"] -fn non_manual_resize_2() { - if ConcreteEnv::MANUAL_RESIZE { - unreachable!(); - } else { - let (env, _tempdir) = tmp_concrete_env(); - env.current_map_size(); - } -} - -/// Test all `DatabaseR{o,w}` operations. -#[test] -fn db_read_write() { - let (env, _tempdir) = tmp_concrete_env(); - let env_inner = env.env_inner(); - let tx_rw = env_inner.tx_rw().unwrap(); - let mut table = env_inner.open_db_rw::<Outputs>(&tx_rw).unwrap(); - - /// The (1st) key. - const KEY: PreRctOutputId = PreRctOutputId { - amount: 1, - amount_index: 123, - }; - /// The expected value. - const VALUE: Output = Output { - key: [35; 32], - height: 45_761_798, - output_flags: OutputFlags::empty(), - tx_idx: 2_353_487, - }; - /// How many `(key, value)` pairs will be inserted. - const N: u64 = 100; - - /// Assert 2 `Output`'s are equal, and that accessing - /// their fields don't result in an unaligned panic. - fn assert_same(output: Output) { - assert_eq!(output, VALUE); - assert_eq!(output.key, VALUE.key); - assert_eq!(output.height, VALUE.height); - assert_eq!(output.output_flags, VALUE.output_flags); - assert_eq!(output.tx_idx, VALUE.tx_idx); - } - - assert!(table.is_empty().unwrap()); - - // Insert keys. - let mut key = KEY; - for _ in 0..N { - table.put(&key, &VALUE).unwrap(); - key.amount += 1; - } - - assert_eq!(table.len().unwrap(), N); - - // Assert the first/last `(key, value)`s are there. - { - assert!(table.contains(&KEY).unwrap()); - let get: Output = table.get(&KEY).unwrap(); - assert_same(get); - - let first: Output = table.first().unwrap().1; - assert_same(first); - - let last: Output = table.last().unwrap().1; - assert_same(last); - } - - // Commit transactions, create new ones. - drop(table); - TxRw::commit(tx_rw).unwrap(); - let tx_ro = env_inner.tx_ro().unwrap(); - let table_ro = env_inner.open_db_ro::<Outputs>(&tx_ro).unwrap(); - let tx_rw = env_inner.tx_rw().unwrap(); - let mut table = env_inner.open_db_rw::<Outputs>(&tx_rw).unwrap(); - - // Assert the whole range is there. - { - let range = table_ro.get_range(..).unwrap(); - let mut i = 0; - for result in range { - let value: Output = result.unwrap(); - assert_same(value); - - i += 1; - } - assert_eq!(i, N); - } - - // `get_range()` tests. - let mut key = KEY; - key.amount += N; - let range = KEY..key; - - // Assert count is correct. - assert_eq!( - N as usize, - table_ro.get_range(range.clone()).unwrap().count() - ); - - // Assert each returned value from the iterator is owned. - { - let mut iter = table_ro.get_range(range.clone()).unwrap(); - let value: Output = iter.next().unwrap().unwrap(); // 1. take value out - drop(iter); // 2. drop the `impl Iterator + 'a` - assert_same(value); // 3. assert even without the iterator, the value is alive - } - - // Assert each value is the same. - { - let mut iter = table_ro.get_range(range).unwrap(); - for _ in 0..N { - let value: Output = iter.next().unwrap().unwrap(); - assert_same(value); - } - } - - // Assert `update()` works. - { - const HEIGHT: u32 = 999; - - assert_ne!(table.get(&KEY).unwrap().height, HEIGHT); - - table - .update(&KEY, |mut value| { - value.height = HEIGHT; - Some(value) - }) - .unwrap(); - - assert_eq!(table.get(&KEY).unwrap().height, HEIGHT); - } - - // Assert deleting works. - { - table.delete(&KEY).unwrap(); - let value = table.get(&KEY); - assert!(!table.contains(&KEY).unwrap()); - assert!(matches!(value, Err(RuntimeError::KeyNotFound))); - // Assert the other `(key, value)` pairs are still there. - let mut key = KEY; - key.amount += N - 1; // we used inclusive `0..N` - let value = table.get(&key).unwrap(); - assert_same(value); - } - - // Assert `take()` works. - { - let mut key = KEY; - key.amount += 1; - let value = table.take(&key).unwrap(); - assert_eq!(value, VALUE); - - let get = table.get(&KEY); - assert!(!table.contains(&key).unwrap()); - assert!(matches!(get, Err(RuntimeError::KeyNotFound))); - - // Assert the other `(key, value)` pairs are still there. - key.amount += 1; - let value = table.get(&key).unwrap(); - assert_same(value); - } - - drop(table); - TxRw::commit(tx_rw).unwrap(); - - // Assert `clear_db()` works. - { - let mut tx_rw = env_inner.tx_rw().unwrap(); - env_inner.clear_db::<Outputs>(&mut tx_rw).unwrap(); - let table = env_inner.open_db_rw::<Outputs>(&tx_rw).unwrap(); - assert!(table.is_empty().unwrap()); - for n in 0..N { - let mut key = KEY; - key.amount += n; - let value = table.get(&key); - assert!(matches!(value, Err(RuntimeError::KeyNotFound))); - assert!(!table.contains(&key).unwrap()); - } - - // Reader still sees old value. - assert!(!table_ro.is_empty().unwrap()); - - // Writer sees updated value (nothing). - assert!(table.is_empty().unwrap()); - } -} - -/// Assert that `key`'s in database tables are sorted in -/// an ordered B-Tree fashion, i.e. `min_value -> max_value`. -#[test] -fn tables_are_sorted() { - let (env, _tmp) = tmp_concrete_env(); - let env_inner = env.env_inner(); - let tx_rw = env_inner.tx_rw().unwrap(); - let mut tables_mut = env_inner.open_tables_mut(&tx_rw).unwrap(); - - // Insert `{5, 4, 3, 2, 1, 0}`, assert each new - // number inserted is the minimum `first()` value. - for key in (0..6).rev() { - tables_mut.num_outputs_mut().put(&key, &123).unwrap(); - let (first, _) = tables_mut.num_outputs_mut().first().unwrap(); - assert_eq!(first, key); - } - - drop(tables_mut); - TxRw::commit(tx_rw).unwrap(); - let tx_rw = env_inner.tx_rw().unwrap(); - - // Assert iterators are ordered. - { - let tx_ro = env_inner.tx_ro().unwrap(); - let tables = env_inner.open_tables(&tx_ro).unwrap(); - let t = tables.num_outputs_iter(); - let iter = t.iter().unwrap(); - let keys = t.keys().unwrap(); - for ((i, iter), key) in (0..6).zip(iter).zip(keys) { - let (iter, _) = iter.unwrap(); - let key = key.unwrap(); - assert_eq!(i, iter); - assert_eq!(iter, key); - } - } - - let mut tables_mut = env_inner.open_tables_mut(&tx_rw).unwrap(); - let t = tables_mut.num_outputs_mut(); - - // Assert the `first()` values are the minimum, i.e. `{0, 1, 2}` - for key in 0..3 { - let (first, _) = t.first().unwrap(); - assert_eq!(first, key); - t.delete(&key).unwrap(); - } - - // Assert the `last()` values are the maximum, i.e. `{5, 4, 3}` - for key in (3..6).rev() { - let (last, _) = tables_mut.num_outputs_mut().last().unwrap(); - assert_eq!(last, key); - tables_mut.num_outputs_mut().delete(&key).unwrap(); - } -} - -//---------------------------------------------------------------------------------------------------- Table Tests -/// Test multiple tables and their key + values. -/// -/// Each one of these tests: -/// - Opens a specific table -/// - Essentially does the `db_read_write` test -macro_rules! test_tables { - ($( - $table:ident, // Table type - $key_type:ty => // Key (type) - $value_type:ty, // Value (type) - $key:expr => // Key (the value) - $value:expr, // Value (the value) - )* $(,)?) => { paste::paste! { $( - // Test function's name is the table type in `snake_case`. - #[test] - fn [<$table:snake>]() { - // Open the database env and table. - let (env, _tempdir) = tmp_concrete_env(); - let env_inner = env.env_inner(); - let mut tx_rw = env_inner.tx_rw().unwrap(); - let mut table = env_inner.open_db_rw::<$table>(&mut tx_rw).unwrap(); - - /// The expected key. - const KEY: $key_type = $key; - // The expected value. - let value: $value_type = $value; - - // Assert a passed value is equal to the const value. - let assert_eq = |v: &$value_type| { - assert_eq!(v, &value); - }; - - // Insert the key. - table.put(&KEY, &value).unwrap(); - // Assert key is there. - { - let value: $value_type = table.get(&KEY).unwrap(); - assert_eq(&value); - } - - assert!(table.contains(&KEY).unwrap()); - assert_eq!(table.len().unwrap(), 1); - - // Commit transactions, create new ones. - drop(table); - TxRw::commit(tx_rw).unwrap(); - let mut tx_rw = env_inner.tx_rw().unwrap(); - let tx_ro = env_inner.tx_ro().unwrap(); - let mut table = env_inner.open_db_rw::<$table>(&tx_rw).unwrap(); - let table_ro = env_inner.open_db_ro::<$table>(&tx_ro).unwrap(); - - // Assert `get_range()` works. - { - let range = KEY..; - assert_eq!(1, table_ro.get_range(range.clone()).unwrap().count()); - let mut iter = table_ro.get_range(range).unwrap(); - let value = iter.next().unwrap().unwrap(); - assert_eq(&value); - } - - // Assert deleting works. - { - table.delete(&KEY).unwrap(); - let value = table.get(&KEY); - assert!(matches!(value, Err(RuntimeError::KeyNotFound))); - assert!(!table.contains(&KEY).unwrap()); - assert_eq!(table.len().unwrap(), 0); - } - - table.put(&KEY, &value).unwrap(); - - // Assert `clear_db()` works. - { - drop(table); - env_inner.clear_db::<$table>(&mut tx_rw).unwrap(); - let table = env_inner.open_db_rw::<$table>(&mut tx_rw).unwrap(); - let value = table.get(&KEY); - assert!(matches!(value, Err(RuntimeError::KeyNotFound))); - assert!(!table.contains(&KEY).unwrap()); - assert_eq!(table.len().unwrap(), 0); - } - } - )*}}; -} - -// Notes: -// - Keep this sorted A-Z (by table name) -test_tables! { - BlockBlobs, // Table type - BlockHeight => BlockBlob, // Key type => Value type - 123 => StorableVec(vec![1,2,3,4,5,6,7,8]), // Actual key => Actual value - - BlockHeights, - BlockHash => BlockHeight, - [32; 32] => 123, - - BlockInfos, - BlockHeight => BlockInfo, - 123 => BlockInfo { - timestamp: 1, - cumulative_generated_coins: 123, - weight: 321, - cumulative_difficulty_low: 111, - cumulative_difficulty_high: 111, - block_hash: [54; 32], - cumulative_rct_outs: 2389, - long_term_weight: 2389, - }, - - KeyImages, - KeyImage => (), - [32; 32] => (), - - NumOutputs, - Amount => AmountIndex, - 123 => 123, - - TxBlobs, - TxId => TxBlob, - 123 => StorableVec(vec![1,2,3,4,5,6,7,8]), - - TxIds, - TxHash => TxId, - [32; 32] => 123, - - TxHeights, - TxId => BlockHeight, - 123 => 123, - - TxOutputs, - TxId => AmountIndices, - 123 => StorableVec(vec![1,2,3,4,5,6,7,8]), - - TxUnlockTime, - TxId => UnlockTime, - 123 => 123, - - Outputs, - PreRctOutputId => Output, - PreRctOutputId { - amount: 1, - amount_index: 2, - } => Output { - key: [1; 32], - height: 1, - output_flags: OutputFlags::empty(), - tx_idx: 3, - }, - - PrunedTxBlobs, - TxId => PrunedBlob, - 123 => StorableVec(vec![1,2,3,4,5,6,7,8]), - - PrunableTxBlobs, - TxId => PrunableBlob, - 123 => StorableVec(vec![1,2,3,4,5,6,7,8]), - - PrunableHashes, - TxId => PrunableHash, - 123 => [32; 32], - - RctOutputs, - AmountIndex => RctOutput, - 123 => RctOutput { - key: [1; 32], - height: 1, - output_flags: OutputFlags::empty(), - tx_idx: 3, - commitment: [3; 32], - }, -} diff --git a/storage/blockchain/src/config/config.rs b/storage/blockchain/src/config/config.rs index 9d932ab1..c58e292a 100644 --- a/storage/blockchain/src/config/config.rs +++ b/storage/blockchain/src/config/config.rs @@ -1,21 +1,15 @@ //! The main [`Config`] struct, holding all configurable values. //---------------------------------------------------------------------------------------------------- Import -use std::{ - borrow::Cow, - path::{Path, PathBuf}, -}; +use std::{borrow::Cow, path::Path}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm}; use cuprate_helper::fs::cuprate_blockchain_dir; -use crate::{ - config::{ReaderThreads, SyncMode}, - constants::DATABASE_DATA_FILENAME, - resize::ResizeAlgorithm, -}; +use crate::config::ReaderThreads; //---------------------------------------------------------------------------------------------------- ConfigBuilder /// Builder for [`Config`]. @@ -27,14 +21,11 @@ pub struct ConfigBuilder { /// [`Config::db_directory`]. db_directory: Option<Cow<'static, Path>>, - /// [`Config::sync_mode`]. - sync_mode: Option<SyncMode>, + /// [`Config::cuprate_database_config`]. + db_config: cuprate_database::config::ConfigBuilder, /// [`Config::reader_threads`]. reader_threads: Option<ReaderThreads>, - - /// [`Config::resize_algorithm`]. - resize_algorithm: Option<ResizeAlgorithm>, } impl ConfigBuilder { @@ -42,12 +33,13 @@ impl ConfigBuilder { /// /// [`ConfigBuilder::build`] can be called immediately /// after this function to use default values. - pub const fn new() -> Self { + pub fn new() -> Self { Self { db_directory: None, - sync_mode: None, + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( + cuprate_blockchain_dir(), + )), reader_threads: None, - resize_algorithm: None, } } @@ -65,57 +57,37 @@ impl ConfigBuilder { .db_directory .unwrap_or_else(|| Cow::Borrowed(cuprate_blockchain_dir())); - // Add the database filename to the directory. - let db_file = { - let mut db_file = db_directory.to_path_buf(); - db_file.push(DATABASE_DATA_FILENAME); - Cow::Owned(db_file) - }; + let reader_threads = self.reader_threads.unwrap_or_default(); + let db_config = self + .db_config + .db_directory(db_directory) + .reader_threads(reader_threads.as_threads()) + .build(); Config { - db_directory, - db_file, - sync_mode: self.sync_mode.unwrap_or_default(), - reader_threads: self.reader_threads.unwrap_or_default(), - resize_algorithm: self.resize_algorithm.unwrap_or_default(), + db_config, + reader_threads, } } /// Set a custom database directory (and file) [`Path`]. #[must_use] - pub fn db_directory(mut self, db_directory: PathBuf) -> Self { - self.db_directory = Some(Cow::Owned(db_directory)); + pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { + self.db_directory = Some(db_directory); self } - /// Tune the [`ConfigBuilder`] for the highest performing, - /// but also most resource-intensive & maybe risky settings. - /// - /// Good default for testing, and resource-available machines. + /// Calls [`cuprate_database::config::ConfigBuilder::sync_mode`]. #[must_use] - pub fn fast(mut self) -> Self { - self.sync_mode = Some(SyncMode::Fast); - self.reader_threads = Some(ReaderThreads::OnePerThread); - self.resize_algorithm = Some(ResizeAlgorithm::default()); + pub fn sync_mode(mut self, sync_mode: SyncMode) -> Self { + self.db_config = self.db_config.sync_mode(sync_mode); self } - /// Tune the [`ConfigBuilder`] for the lowest performing, - /// but also least resource-intensive settings. - /// - /// Good default for resource-limited machines, e.g. a cheap VPS. + /// Calls [`cuprate_database::config::ConfigBuilder::resize_algorithm`]. #[must_use] - pub fn low_power(mut self) -> Self { - self.sync_mode = Some(SyncMode::default()); - self.reader_threads = Some(ReaderThreads::One); - self.resize_algorithm = Some(ResizeAlgorithm::default()); - self - } - - /// Set a custom [`SyncMode`]. - #[must_use] - pub const fn sync_mode(mut self, sync_mode: SyncMode) -> Self { - self.sync_mode = Some(sync_mode); + pub fn resize_algorithm(mut self, resize_algorithm: ResizeAlgorithm) -> Self { + self.db_config = self.db_config.resize_algorithm(resize_algorithm); self } @@ -126,102 +98,96 @@ impl ConfigBuilder { self } - /// Set a custom [`ResizeAlgorithm`]. + /// Tune the [`ConfigBuilder`] for the highest performing, + /// but also most resource-intensive & maybe risky settings. + /// + /// Good default for testing, and resource-available machines. #[must_use] - pub const fn resize_algorithm(mut self, resize_algorithm: ResizeAlgorithm) -> Self { - self.resize_algorithm = Some(resize_algorithm); + pub fn fast(mut self) -> Self { + self.db_config = + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(cuprate_blockchain_dir())) + .fast(); + + self.reader_threads = Some(ReaderThreads::OnePerThread); + self + } + + /// Tune the [`ConfigBuilder`] for the lowest performing, + /// but also least resource-intensive settings. + /// + /// Good default for resource-limited machines, e.g. a cheap VPS. + #[must_use] + pub fn low_power(mut self) -> Self { + self.db_config = + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(cuprate_blockchain_dir())) + .low_power(); + + self.reader_threads = Some(ReaderThreads::One); self } } impl Default for ConfigBuilder { fn default() -> Self { + let db_directory = Cow::Borrowed(cuprate_blockchain_dir()); Self { - db_directory: Some(Cow::Borrowed(cuprate_blockchain_dir())), - sync_mode: Some(SyncMode::default()), + db_directory: Some(db_directory.clone()), + db_config: cuprate_database::config::ConfigBuilder::new(db_directory), reader_threads: Some(ReaderThreads::default()), - resize_algorithm: Some(ResizeAlgorithm::default()), } } } //---------------------------------------------------------------------------------------------------- Config -/// Database [`Env`](crate::Env) configuration. +/// `cuprate_blockchain` configuration. /// -/// This is the struct passed to [`Env::open`](crate::Env::open) that -/// allows the database to be configured in various ways. +/// This is a configuration built on-top of [`cuprate_database::config::Config`]. +/// +/// It contains configuration specific to this crate, plus the database config. /// /// For construction, either use [`ConfigBuilder`] or [`Config::default`]. -/// -// SOMEDAY: there's are many more options to add in the future. #[derive(Debug, Clone, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Config { - //------------------------ Database PATHs - // These are private since we don't want - // users messing with them after construction. - /// The directory used to store all database files. - /// - /// By default, if no value is provided in the [`Config`] - /// constructor functions, this will be [`cuprate_blockchain_dir`]. - /// - // SOMEDAY: we should also support `/etc/cuprated.conf`. - // This could be represented with an `enum DbPath { Default, Custom, Etc, }` - pub(crate) db_directory: Cow<'static, Path>, - /// The actual database data file. - /// - /// This is private, and created from the above `db_directory`. - pub(crate) db_file: Cow<'static, Path>, - - /// Disk synchronization mode. - pub sync_mode: SyncMode, + /// The database configuration. + pub db_config: cuprate_database::config::Config, /// Database reader thread count. pub reader_threads: ReaderThreads, - - /// Database memory map resizing algorithm. - /// - /// This is used as the default fallback, but - /// custom algorithms can be used as well with - /// [`Env::resize_map`](crate::Env::resize_map). - pub resize_algorithm: ResizeAlgorithm, } impl Config { /// Create a new [`Config`] with sane default settings. /// - /// The [`Config::db_directory`] will be [`cuprate_blockchain_dir`]. + /// The [`cuprate_database::config::Config::db_directory`] + /// will be set to [`cuprate_blockchain_dir`]. /// /// All other values will be [`Default::default`]. /// /// Same as [`Config::default`]. /// /// ```rust - /// use cuprate_blockchain::{config::*, resize::*, DATABASE_DATA_FILENAME}; + /// use cuprate_database::{ + /// config::SyncMode, + /// resize::ResizeAlgorithm, + /// DATABASE_DATA_FILENAME, + /// }; /// use cuprate_helper::fs::*; /// + /// use cuprate_blockchain::config::*; + /// /// let config = Config::new(); /// - /// assert_eq!(config.db_directory(), cuprate_blockchain_dir()); - /// assert!(config.db_file().starts_with(cuprate_blockchain_dir())); - /// assert!(config.db_file().ends_with(DATABASE_DATA_FILENAME)); - /// assert_eq!(config.sync_mode, SyncMode::default()); + /// assert_eq!(config.db_config.db_directory(), cuprate_blockchain_dir()); + /// assert!(config.db_config.db_file().starts_with(cuprate_blockchain_dir())); + /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); + /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); + /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); /// assert_eq!(config.reader_threads, ReaderThreads::default()); - /// assert_eq!(config.resize_algorithm, ResizeAlgorithm::default()); /// ``` pub fn new() -> Self { ConfigBuilder::default().build() } - - /// Return the absolute [`Path`] to the database directory. - pub const fn db_directory(&self) -> &Cow<'_, Path> { - &self.db_directory - } - - /// Return the absolute [`Path`] to the database data file. - pub const fn db_file(&self) -> &Cow<'_, Path> { - &self.db_file - } } impl Default for Config { diff --git a/storage/blockchain/src/config/mod.rs b/storage/blockchain/src/config/mod.rs index 141790b4..7ecc14c4 100644 --- a/storage/blockchain/src/config/mod.rs +++ b/storage/blockchain/src/config/mod.rs @@ -1,28 +1,31 @@ -//! Database [`Env`](crate::Env) configuration. +//! Database configuration. //! //! This module contains the main [`Config`]uration struct -//! for the database [`Env`](crate::Env)ironment, and types -//! related to configuration settings. +//! for the database [`Env`](cuprate_database::Env)ironment, +//! and blockchain-specific configuration. +//! +//! It also contains types related to configuration settings. //! //! The main constructor is the [`ConfigBuilder`]. //! //! These configurations are processed at runtime, meaning -//! the `Env` can/will dynamically adjust its behavior -//! based on these values. +//! the `Env` can/will dynamically adjust its behavior based +//! on these values. //! //! # Example //! ```rust //! use cuprate_blockchain::{ -//! Env, -//! config::{ConfigBuilder, ReaderThreads, SyncMode} +//! cuprate_database::{Env, config::SyncMode}, +//! config::{ConfigBuilder, ReaderThreads}, //! }; //! //! # fn main() -> Result<(), Box<dyn std::error::Error>> { -//! let db_dir = tempfile::tempdir()?; +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); //! //! let config = ConfigBuilder::new() //! // Use a custom database directory. -//! .db_directory(db_dir.path().to_path_buf()) +//! .db_directory(db_dir.into()) //! // Use as many reader threads as possible (when using `service`). //! .reader_threads(ReaderThreads::OnePerThread) //! // Use the fastest sync mode. @@ -33,7 +36,7 @@ //! // Start a database `service` using this configuration. //! let (reader_handle, _) = cuprate_blockchain::service::init(config.clone())?; //! // It's using the config we provided. -//! assert_eq!(reader_handle.env().config(), &config); +//! assert_eq!(reader_handle.env().config(), &config.db_config); //! # Ok(()) } //! ``` @@ -42,6 +45,3 @@ pub use config::{Config, ConfigBuilder}; mod reader_threads; pub use reader_threads::ReaderThreads; - -mod sync_mode; -pub use sync_mode::SyncMode; diff --git a/storage/blockchain/src/constants.rs b/storage/blockchain/src/constants.rs index 25837c23..7f00d4cd 100644 --- a/storage/blockchain/src/constants.rs +++ b/storage/blockchain/src/constants.rs @@ -1,7 +1,6 @@ //! General constants used throughout `cuprate-blockchain`. //---------------------------------------------------------------------------------------------------- Import -use cfg_if::cfg_if; //---------------------------------------------------------------------------------------------------- Version /// Current major version of the database. @@ -30,57 +29,6 @@ TODO: instructions on: 3. General advice for preventing corruption 4. etc"; -//---------------------------------------------------------------------------------------------------- Misc -/// Static string of the `crate` being used as the database backend. -/// -/// | Backend | Value | -/// |---------|-------| -/// | `heed` | `"heed"` -/// | `redb` | `"redb"` -pub const DATABASE_BACKEND: &str = { - cfg_if! { - if #[cfg(all(feature = "redb", not(feature = "heed")))] { - "redb" - } else { - "heed" - } - } -}; - -/// Cuprate's database filename. -/// -/// Used in [`Config::db_file`](crate::config::Config::db_file). -/// -/// | Backend | Value | -/// |---------|-------| -/// | `heed` | `"data.mdb"` -/// | `redb` | `"data.redb"` -pub const DATABASE_DATA_FILENAME: &str = { - cfg_if! { - if #[cfg(all(feature = "redb", not(feature = "heed")))] { - "data.redb" - } else { - "data.mdb" - } - } -}; - -/// Cuprate's database lock filename. -/// -/// | Backend | Value | -/// |---------|-------| -/// | `heed` | `Some("lock.mdb")` -/// | `redb` | `None` (redb doesn't use a file lock) -pub const DATABASE_LOCK_FILENAME: Option<&str> = { - cfg_if! { - if #[cfg(all(feature = "redb", not(feature = "heed")))] { - None - } else { - Some("lock.mdb") - } - } -}; - //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test {} diff --git a/storage/blockchain/src/free.rs b/storage/blockchain/src/free.rs index 7e145a28..255860aa 100644 --- a/storage/blockchain/src/free.rs +++ b/storage/blockchain/src/free.rs @@ -1,8 +1,73 @@ //! General free functions (related to the database). //---------------------------------------------------------------------------------------------------- Import +use cuprate_database::{ConcreteEnv, Env, EnvInner, InitError, RuntimeError, TxRw}; + +use crate::{config::Config, open_tables::OpenTables}; //---------------------------------------------------------------------------------------------------- Free functions +/// Open the blockchain database, using the passed [`Config`]. +/// +/// This calls [`cuprate_database::Env::open`] and prepares the +/// database to be ready for blockchain-related usage, e.g. +/// table creation, table sort order, etc. +/// +/// All tables found in [`crate::tables`] will be +/// ready for usage in the returned [`ConcreteEnv`]. +/// +/// # Errors +/// This will error if: +/// - The database file could not be opened +/// - A write transaction could not be opened +/// - A table could not be created/opened +#[cold] +#[inline(never)] // only called once +pub fn open(config: Config) -> Result<ConcreteEnv, InitError> { + // Attempt to open the database environment. + let env = <ConcreteEnv as Env>::open(config.db_config)?; + + /// Convert runtime errors to init errors. + /// + /// INVARIANT: + /// `cuprate_database`'s functions mostly return the former + /// so we must convert them. We have knowledge of which errors + /// makes sense in this functions context so we panic on + /// unexpected ones. + fn runtime_to_init_error(runtime: RuntimeError) -> InitError { + match runtime { + RuntimeError::Io(io_error) => io_error.into(), + + // These errors shouldn't be happening here. + RuntimeError::KeyExists + | RuntimeError::KeyNotFound + | RuntimeError::ResizeNeeded + | RuntimeError::TableNotFound => unreachable!(), + } + } + + // INVARIANT: We must ensure that all tables are created, + // `cuprate_database` has no way of knowing _which_ tables + // we want since it is agnostic, so we are responsible for this. + { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw(); + let tx_rw = match tx_rw { + Ok(tx_rw) => tx_rw, + Err(e) => return Err(runtime_to_init_error(e)), + }; + + // Create all tables. + if let Err(e) = OpenTables::create_tables(&env_inner, &tx_rw) { + return Err(runtime_to_init_error(e)); + }; + + if let Err(e) = tx_rw.commit() { + return Err(runtime_to_init_error(e)); + } + } + + Ok(env) +} //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index 7e7970fe..ad33e2af 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -1,145 +1,4 @@ -//! Cuprate's database abstraction. -//! -//! This documentation is mostly for practical usage of `cuprate_blockchain`. -//! -//! For a high-level overview, -//! see [`database/README.md`](https://github.com/Cuprate/cuprate/blob/main/database/README.md). -//! -//! # Purpose -//! This crate does 3 things: -//! 1. Abstracts various database backends with traits -//! 2. Implements various `Monero` related [operations](ops), [tables], and [types] -//! 3. Exposes a [`tower::Service`] backed by a thread-pool -//! -//! Each layer builds on-top of the previous. -//! -//! As a user of `cuprate_blockchain`, consider using the higher-level [`service`] module, -//! or at the very least the [`ops`] module instead of interacting with the database traits directly. -//! -//! With that said, many database traits and internals (like [`DatabaseRo::get`]) are exposed. -//! -//! # Terminology -//! To be more clear on some terms used in this crate: -//! -//! | Term | Meaning | -//! |------------------|--------------------------------------| -//! | `Env` | The 1 database environment, the "whole" thing -//! | `DatabaseR{o,w}` | A _actively open_ readable/writable `key/value` store -//! | `Table` | Solely the metadata of a `Database` (the `key` and `value` types, and the name) -//! | `TxR{o,w}` | A read/write transaction -//! | `Storable` | A data that type can be stored in the database -//! -//! The dataflow is `Env` -> `Tx` -> `Database` -//! -//! Which reads as: -//! 1. You have a database `Environment` -//! 1. You open up a `Transaction` -//! 1. You open a particular `Table` from that `Environment`, getting a `Database` -//! 1. You can now read/write data from/to that `Database` -//! -//! # `ConcreteEnv` -//! This crate exposes [`ConcreteEnv`], which is a non-generic/non-dynamic, -//! concrete object representing a database [`Env`]ironment. -//! -//! The actual backend for this type is determined via feature flags. -//! -//! This object existing means `E: Env` doesn't need to be spread all through the codebase, -//! however, it also means some small invariants should be kept in mind. -//! -//! As `ConcreteEnv` is just a re-exposed type which has varying inner types, -//! it means some properties will change depending on the backend used. -//! -//! For example: -//! - [`std::mem::size_of::<ConcreteEnv>`] -//! - [`std::mem::align_of::<ConcreteEnv>`] -//! -//! Things like these functions are affected by the backend and inner data, -//! and should not be relied upon. This extends to any `struct/enum` that contains `ConcreteEnv`. -//! -//! `ConcreteEnv` invariants you can rely on: -//! - It implements [`Env`] -//! - Upon [`Drop::drop`], all database data will sync to disk -//! -//! Note that `ConcreteEnv` itself is not a clonable type, -//! it should be wrapped in [`std::sync::Arc`]. -//! -//! <!-- SOMEDAY: replace `ConcreteEnv` with `fn Env::open() -> impl Env`/ -//! and use `<E: Env>` everywhere it is stored instead. This would allow -//! generic-backed dynamic runtime selection of the database backend, i.e. -//! the user can select which database backend they use. --> -//! -//! # Feature flags -//! The `service` module requires the `service` feature to be enabled. -//! See the module for more documentation. -//! -//! Different database backends are enabled by the feature flags: -//! - `heed` (LMDB) -//! - `redb` -//! -//! The default is `heed`. -//! -//! `tracing` is always enabled and cannot be disabled via feature-flag. -//! <!-- FIXME: tracing should be behind a feature flag --> -//! -//! # Invariants when not using `service` -//! `cuprate_blockchain` can be used without the `service` feature enabled but -//! there are some things that must be kept in mind when doing so. -//! -//! Failing to uphold these invariants may cause panics. -//! -//! 1. `LMDB` requires the user to resize the memory map resizing (see [`RuntimeError::ResizeNeeded`] -//! 1. `LMDB` has a maximum reader transaction count, currently it is set to `128` -//! 1. `LMDB` has [maximum key/value byte size](http://www.lmdb.tech/doc/group__internal.html#gac929399f5d93cef85f874b9e9b1d09e0) which must not be exceeded -//! -//! # Examples -//! The below is an example of using `cuprate_blockchain`'s -//! lowest API, i.e. using the database directly. -//! -//! For examples of the higher-level APIs, see: -//! - [`ops`] -//! - [`service`] -//! -//! ```rust -//! use cuprate_blockchain::{ -//! ConcreteEnv, -//! config::ConfigBuilder, -//! Env, EnvInner, -//! tables::{Tables, TablesMut}, -//! DatabaseRo, DatabaseRw, TxRo, TxRw, -//! }; -//! -//! # fn main() -> Result<(), Box<dyn std::error::Error>> { -//! // Create a configuration for the database environment. -//! let db_dir = tempfile::tempdir()?; -//! let config = ConfigBuilder::new() -//! .db_directory(db_dir.path().to_path_buf()) -//! .build(); -//! -//! // Initialize the database environment. -//! let env = ConcreteEnv::open(config)?; -//! -//! // Open up a transaction + tables for writing. -//! let env_inner = env.env_inner(); -//! let tx_rw = env_inner.tx_rw()?; -//! let mut tables = env_inner.open_tables_mut(&tx_rw)?; -//! -//! // ⚠️ Write data to the tables directly. -//! // (not recommended, use `ops` or `service`). -//! const KEY_IMAGE: [u8; 32] = [88; 32]; -//! tables.key_images_mut().put(&KEY_IMAGE, &())?; -//! -//! // Commit the data written. -//! drop(tables); -//! TxRw::commit(tx_rw)?; -//! -//! // Read the data, assert it is correct. -//! let tx_ro = env_inner.tx_ro()?; -//! let tables = env_inner.open_tables(&tx_ro)?; -//! let (key_image, _) = tables.key_images().first()?; -//! assert_eq!(key_image, KEY_IMAGE); -//! # Ok(()) } -//! ``` - +#![doc = include_str!("../README.md")] //---------------------------------------------------------------------------------------------------- Lints // Forbid lints. // Our code, and code generated (e.g macros) cannot overrule these. @@ -190,6 +49,7 @@ clippy::pedantic, clippy::nursery, clippy::cargo, + unused_crate_dependencies, unused_doc_comments, unused_mut, missing_docs, @@ -220,7 +80,14 @@ clippy::option_if_let_else, )] // Allow some lints when running in debug mode. -#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))] +#![cfg_attr( + debug_assertions, + allow( + clippy::todo, + clippy::multiple_crate_versions, + // unused_crate_dependencies, + ) +)] // Allow some lints in tests. #![cfg_attr( test, @@ -247,47 +114,22 @@ compile_error!("Cuprate is only compatible with 64-bit CPUs"); // // Documentation for each module is located in the respective file. -mod backend; -pub use backend::ConcreteEnv; - pub mod config; mod constants; -pub use constants::{ - DATABASE_BACKEND, DATABASE_CORRUPT_MSG, DATABASE_DATA_FILENAME, DATABASE_LOCK_FILENAME, - DATABASE_VERSION, -}; +pub use constants::{DATABASE_CORRUPT_MSG, DATABASE_VERSION}; -mod database; -pub use database::{DatabaseIter, DatabaseRo, DatabaseRw}; +mod open_tables; +pub use open_tables::OpenTables; -mod env; -pub use env::{Env, EnvInner}; - -mod error; -pub use error::{InitError, RuntimeError}; - -pub(crate) mod free; - -pub mod resize; - -mod key; -pub use key::Key; - -mod storable; -pub use storable::{Storable, StorableBytes, StorableVec}; +mod free; +pub use free::open; pub mod ops; - -mod table; -pub use table::Table; - pub mod tables; - pub mod types; -mod transaction; -pub use transaction::{TxRo, TxRw}; +pub use cuprate_database; //---------------------------------------------------------------------------------------------------- Feature-gated #[cfg(feature = "service")] diff --git a/storage/blockchain/src/open_tables.rs b/storage/blockchain/src/open_tables.rs new file mode 100644 index 00000000..b98b86b1 --- /dev/null +++ b/storage/blockchain/src/open_tables.rs @@ -0,0 +1,188 @@ +//! TODO + +//---------------------------------------------------------------------------------------------------- Import +use cuprate_database::{EnvInner, RuntimeError, TxRo, TxRw}; + +use crate::tables::{TablesIter, TablesMut}; + +//---------------------------------------------------------------------------------------------------- Table function macro +/// `crate`-private macro for callings functions on all tables. +/// +/// This calls the function `$fn` with the optional +/// arguments `$args` on all tables - returning early +/// (within whatever scope this is called) if any +/// of the function calls error. +/// +/// Else, it evaluates to an `Ok((tuple, of, all, table, types, ...))`, +/// i.e., an `impl Table[Mut]` wrapped in `Ok`. +macro_rules! call_fn_on_all_tables_or_early_return { + ( + $($fn:ident $(::)?)* + ( + $($arg:ident),* $(,)? + ) + ) => {{ + Ok(( + $($fn ::)*<$crate::tables::BlockInfos>($($arg),*)?, + $($fn ::)*<$crate::tables::BlockBlobs>($($arg),*)?, + $($fn ::)*<$crate::tables::BlockHeights>($($arg),*)?, + $($fn ::)*<$crate::tables::KeyImages>($($arg),*)?, + $($fn ::)*<$crate::tables::NumOutputs>($($arg),*)?, + $($fn ::)*<$crate::tables::PrunedTxBlobs>($($arg),*)?, + $($fn ::)*<$crate::tables::PrunableHashes>($($arg),*)?, + $($fn ::)*<$crate::tables::Outputs>($($arg),*)?, + $($fn ::)*<$crate::tables::PrunableTxBlobs>($($arg),*)?, + $($fn ::)*<$crate::tables::RctOutputs>($($arg),*)?, + $($fn ::)*<$crate::tables::TxBlobs>($($arg),*)?, + $($fn ::)*<$crate::tables::TxIds>($($arg),*)?, + $($fn ::)*<$crate::tables::TxHeights>($($arg),*)?, + $($fn ::)*<$crate::tables::TxOutputs>($($arg),*)?, + $($fn ::)*<$crate::tables::TxUnlockTime>($($arg),*)?, + )) + }}; +} +pub(crate) use call_fn_on_all_tables_or_early_return; + +//---------------------------------------------------------------------------------------------------- OpenTables +/// Open all tables at once. +/// +/// This trait encapsulates the functionality of opening all tables at once. +/// It can be seen as the "constructor" for the [`Tables`](crate::tables::Tables) object. +/// +/// Note that this is already implemented on [`cuprate_database::EnvInner`], thus: +/// - You don't need to implement this +/// - It can be called using `env_inner.open_tables()` notation +/// +/// # Example +/// ```rust +/// use cuprate_blockchain::{ +/// cuprate_database::{Env, EnvInner}, +/// config::ConfigBuilder, +/// tables::{Tables, TablesMut}, +/// OpenTables, +/// }; +/// +/// # fn main() -> Result<(), Box<dyn std::error::Error>> { +/// // Create a configuration for the database environment. +/// let tmp_dir = tempfile::tempdir()?; +/// let db_dir = tmp_dir.path().to_owned(); +/// let config = ConfigBuilder::new() +/// .db_directory(db_dir.into()) +/// .build(); +/// +/// // Initialize the database environment. +/// let env = cuprate_blockchain::open(config)?; +/// +/// // Open up a transaction. +/// let env_inner = env.env_inner(); +/// let tx_rw = env_inner.tx_rw()?; +/// +/// // Open _all_ tables in write mode using [`OpenTables::open_tables_mut`]. +/// // Note how this is being called on `env_inner`. +/// // | +/// // v +/// let mut tables = env_inner.open_tables_mut(&tx_rw)?; +/// # Ok(()) } +/// ``` +pub trait OpenTables<'env, Ro, Rw> +where + Self: 'env, + Ro: TxRo<'env>, + Rw: TxRw<'env>, +{ + /// Open all tables in read/iter mode. + /// + /// This calls [`EnvInner::open_db_ro`] on all database tables + /// and returns a structure that allows access to all tables. + /// + /// # Errors + /// This will only return [`RuntimeError::Io`] if it errors. + /// + /// As all tables are created upon [`crate::open`], + /// this function will never error because a table doesn't exist. + fn open_tables(&'env self, tx_ro: &Ro) -> Result<impl TablesIter, RuntimeError>; + + /// Open all tables in read-write mode. + /// + /// This calls [`EnvInner::open_db_rw`] on all database tables + /// and returns a structure that allows access to all tables. + /// + /// # Errors + /// This will only return [`RuntimeError::Io`] on errors. + fn open_tables_mut(&'env self, tx_rw: &Rw) -> Result<impl TablesMut, RuntimeError>; + + /// Create all database tables. + /// + /// This will create all the [`Table`](cuprate_database::Table)s + /// found in [`tables`](crate::tables). + /// + /// # Errors + /// This will only return [`RuntimeError::Io`] on errors. + fn create_tables(&'env self, tx_rw: &Rw) -> Result<(), RuntimeError>; +} + +impl<'env, Ei, Ro, Rw> OpenTables<'env, Ro, Rw> for Ei +where + Ei: EnvInner<'env, Ro, Rw>, + Ro: TxRo<'env>, + Rw: TxRw<'env>, +{ + fn open_tables(&'env self, tx_ro: &Ro) -> Result<impl TablesIter, RuntimeError> { + call_fn_on_all_tables_or_early_return! { + Self::open_db_ro(self, tx_ro) + } + } + + fn open_tables_mut(&'env self, tx_rw: &Rw) -> Result<impl TablesMut, RuntimeError> { + call_fn_on_all_tables_or_early_return! { + Self::open_db_rw(self, tx_rw) + } + } + + fn create_tables(&'env self, tx_rw: &Rw) -> Result<(), RuntimeError> { + match call_fn_on_all_tables_or_early_return! { + Self::create_db(self, tx_rw) + } { + Ok(_) => Ok(()), + Err(e) => Err(e), + } + } +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + use std::borrow::Cow; + + use cuprate_database::{Env, EnvInner}; + + use crate::{config::ConfigBuilder, tests::tmp_concrete_env}; + + use super::*; + + /// Tests that [`crate::open`] creates all tables. + #[test] + fn test_all_tables_are_created() { + let (env, _tmp) = tmp_concrete_env(); + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro().unwrap(); + env_inner.open_tables(&tx_ro).unwrap(); + } + + /// Tests that directory [`cuprate_database::ConcreteEnv`] + /// usage does NOT create all tables. + #[test] + #[should_panic(expected = "`Result::unwrap()` on an `Err` value: TableNotFound")] + fn test_no_tables_are_created() { + let tempdir = tempfile::tempdir().unwrap(); + let config = ConfigBuilder::new() + .db_directory(Cow::Owned(tempdir.path().into())) + .low_power() + .build(); + let env = cuprate_database::ConcreteEnv::open(config.db_config).unwrap(); + + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro().unwrap(); + env_inner.open_tables(&tx_ro).unwrap(); + } +} diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 4f16cfde..9097f0ee 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -4,12 +4,13 @@ use bytemuck::TransparentWrapper; use monero_serai::block::Block; +use cuprate_database::{ + RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, +}; use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; use cuprate_types::{ExtendedBlockHeader, VerifiedBlockInformation}; use crate::{ - database::{DatabaseRo, DatabaseRw}, - error::RuntimeError, ops::{ blockchain::{chain_height, cumulative_generated_coins}, macros::doc_error, @@ -18,7 +19,6 @@ use crate::{ }, tables::{BlockHeights, BlockInfos, Tables, TablesMut}, types::{BlockHash, BlockHeight, BlockInfo}, - StorableVec, }; //---------------------------------------------------------------------------------------------------- `add_block_*` @@ -265,14 +265,15 @@ pub fn block_exists( mod test { use pretty_assertions::assert_eq; + use cuprate_database::{Env, EnvInner, TxRw}; use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; use super::*; + use crate::{ + open_tables::OpenTables, ops::tx::{get_tx, tx_exists}, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, - transaction::TxRw, - Env, EnvInner, }; /// Tests all above block functions. diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index ce9cd69d..16e0a3c1 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -1,9 +1,9 @@ //! Blockchain functions - chain height, generated coins, etc. //---------------------------------------------------------------------------------------------------- Import +use cuprate_database::{DatabaseRo, RuntimeError}; + use crate::{ - database::DatabaseRo, - error::RuntimeError, ops::macros::doc_error, tables::{BlockHeights, BlockInfos}, types::BlockHeight, @@ -81,15 +81,16 @@ pub fn cumulative_generated_coins( mod test { use pretty_assertions::assert_eq; + use cuprate_database::{Env, EnvInner, TxRw}; use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; use super::*; + use crate::{ + open_tables::OpenTables, ops::block::add_block, tables::Tables, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, - transaction::TxRw, - Env, EnvInner, }; /// Tests all above functions. diff --git a/storage/blockchain/src/ops/key_image.rs b/storage/blockchain/src/ops/key_image.rs index 5d0786c3..a518490e 100644 --- a/storage/blockchain/src/ops/key_image.rs +++ b/storage/blockchain/src/ops/key_image.rs @@ -1,9 +1,9 @@ //! Key image functions. //---------------------------------------------------------------------------------------------------- Import +use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; + use crate::{ - database::{DatabaseRo, DatabaseRw}, - error::RuntimeError, ops::macros::{doc_add_block_inner_invariant, doc_error}, tables::KeyImages, types::KeyImage, @@ -47,12 +47,14 @@ pub fn key_image_exists( mod test { use hex_literal::hex; + use cuprate_database::{Env, EnvInner, TxRw}; + use super::*; + use crate::{ + open_tables::OpenTables, tables::{Tables, TablesMut}, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, - transaction::TxRw, - Env, EnvInner, }; /// Tests all above key-image functions. diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index a4f50dd0..58211202 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -20,7 +20,7 @@ //! it is up to the caller to decide what happens if one them return //! an error. //! -//! To maintain atomicity, transactions should be [`abort`](crate::transaction::TxRw::abort)ed +//! To maintain atomicity, transactions should be [`abort`](cuprate_database::TxRw::abort)ed //! if one of the functions failed. //! //! For example, if [`add_block()`](block::add_block) is called and returns an [`Err`], @@ -55,25 +55,28 @@ //! use hex_literal::hex; //! //! use cuprate_test_utils::data::block_v16_tx0; -//! //! use cuprate_blockchain::{ -//! ConcreteEnv, +//! cuprate_database::{ +//! ConcreteEnv, +//! Env, EnvInner, +//! DatabaseRo, DatabaseRw, TxRo, TxRw, +//! }, +//! OpenTables, //! config::ConfigBuilder, -//! Env, EnvInner, //! tables::{Tables, TablesMut}, -//! DatabaseRo, DatabaseRw, TxRo, TxRw, //! ops::block::{add_block, pop_block}, //! }; //! //! # fn main() -> Result<(), Box<dyn std::error::Error>> { //! // Create a configuration for the database environment. -//! let db_dir = tempfile::tempdir()?; +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.path().to_path_buf()) +//! .db_directory(db_dir.into()) //! .build(); //! //! // Initialize the database environment. -//! let env = ConcreteEnv::open(config)?; +//! let env = cuprate_blockchain::open(config)?; //! //! // Open up a transaction + tables for writing. //! let env_inner = env.env_inner(); diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index 5b7620e4..f08f7b30 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -4,12 +4,13 @@ use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar}; use monero_serai::{transaction::Timelock, H}; +use cuprate_database::{ + RuntimeError, {DatabaseRo, DatabaseRw}, +}; use cuprate_helper::map::u64_to_timelock; use cuprate_types::OutputOnChain; use crate::{ - database::{DatabaseRo, DatabaseRw}, - error::RuntimeError, ops::macros::{doc_add_block_inner_invariant, doc_error}, tables::{Outputs, RctOutputs, Tables, TablesMut, TxUnlockTime}, types::{Amount, AmountIndex, Output, OutputFlags, PreRctOutputId, RctOutput}, @@ -247,15 +248,18 @@ pub fn id_to_output_on_chain( #[cfg(test)] mod test { use super::*; + + use pretty_assertions::assert_eq; + + use cuprate_database::{Env, EnvInner}; + use crate::{ + open_tables::OpenTables, tables::{Tables, TablesMut}, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, types::OutputFlags, - Env, EnvInner, }; - use pretty_assertions::assert_eq; - /// Dummy `Output`. const OUTPUT: Output = Output { key: [44; 32], diff --git a/storage/blockchain/src/ops/property.rs b/storage/blockchain/src/ops/property.rs index 15b5f878..7810000a 100644 --- a/storage/blockchain/src/ops/property.rs +++ b/storage/blockchain/src/ops/property.rs @@ -5,7 +5,10 @@ //---------------------------------------------------------------------------------------------------- Import use cuprate_pruning::PruningSeed; -use crate::{error::RuntimeError, ops::macros::doc_error}; +use cuprate_database::RuntimeError; + +use crate::ops::macros::doc_error; + //---------------------------------------------------------------------------------------------------- Free Functions /// SOMEDAY /// diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index b4f2984b..6edfbb21 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -5,9 +5,9 @@ use bytemuck::TransparentWrapper; use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar}; use monero_serai::transaction::{Input, Timelock, Transaction}; +use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; + use crate::{ - database::{DatabaseRo, DatabaseRw}, - error::RuntimeError, ops::{ key_image::{add_key_image, remove_key_image}, macros::{doc_add_block_inner_invariant, doc_error}, @@ -17,7 +17,6 @@ use crate::{ }, tables::{TablesMut, TxBlobs, TxIds}, types::{BlockHeight, Output, OutputFlags, PreRctOutputId, RctOutput, TxHash, TxId}, - StorableVec, }; //---------------------------------------------------------------------------------------------------- Private @@ -325,14 +324,17 @@ pub fn tx_exists( #[cfg(test)] mod test { use super::*; + + use pretty_assertions::assert_eq; + + use cuprate_database::{Env, EnvInner, TxRw}; + use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3}; + use crate::{ + open_tables::OpenTables, tables::Tables, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, - transaction::TxRw, - Env, EnvInner, }; - use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3}; - use pretty_assertions::assert_eq; /// Tests all above tx functions when only inputting `Transaction` data (no Block). #[test] diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index 276ce6a8..3ff8d6eb 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -3,11 +3,11 @@ //---------------------------------------------------------------------------------------------------- Import use std::sync::Arc; +use cuprate_database::InitError; + use crate::{ config::Config, - error::InitError, service::{DatabaseReadHandle, DatabaseWriteHandle}, - ConcreteEnv, Env, }; //---------------------------------------------------------------------------------------------------- Init @@ -19,12 +19,12 @@ use crate::{ /// thread-pool and writer thread will exit automatically. /// /// # Errors -/// This will forward the error if [`Env::open`] failed. +/// This will forward the error if [`crate::open`] failed. pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle), InitError> { let reader_threads = config.reader_threads; // Initialize the database itself. - let db = Arc::new(ConcreteEnv::open(config)?); + let db = Arc::new(crate::open(config)?); // Spawn the Reader thread pool and Writer. let readers = DatabaseReadHandle::init(&db, reader_threads); diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 507f7fc3..1d9d10b4 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -36,9 +36,9 @@ //! - The last [`DatabaseReadHandle`] is dropped => reader thread-pool exits //! - The last [`DatabaseWriteHandle`] is dropped => writer thread exits //! -//! Upon dropping the [`crate::ConcreteEnv`]: +//! Upon dropping the [`cuprate_database::ConcreteEnv`]: //! - All un-processed database transactions are completed -//! - All data gets flushed to disk (caused by [`Drop::drop`] impl on [`crate::ConcreteEnv`]) +//! - All data gets flushed to disk (caused by [`Drop::drop`] impl on `ConcreteEnv`) //! //! ## Request and Response //! To interact with the database (whether reading or writing data), @@ -66,14 +66,18 @@ //! use cuprate_types::blockchain::{BCReadRequest, BCWriteRequest, BCResponse}; //! use cuprate_test_utils::data::block_v16_tx0; //! -//! use cuprate_blockchain::{ConcreteEnv, config::ConfigBuilder, Env}; +//! use cuprate_blockchain::{ +//! cuprate_database::Env, +//! config::ConfigBuilder, +//! }; //! //! # #[tokio::main] //! # async fn main() -> Result<(), Box<dyn std::error::Error>> { //! // Create a configuration for the database environment. -//! let db_dir = tempfile::tempdir()?; +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.path().to_path_buf()) +//! .db_directory(db_dir.into()) //! .build(); //! //! // Initialize the database thread-pool. diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index f8aafe8d..20aebf9c 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -13,6 +13,7 @@ use thread_local::ThreadLocal; use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio_util::sync::PollSemaphore; +use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_types::{ blockchain::{BCReadRequest, BCResponse}, @@ -21,7 +22,7 @@ use cuprate_types::{ use crate::{ config::ReaderThreads, - error::RuntimeError, + open_tables::OpenTables, ops::block::block_exists, ops::{ block::{get_block_extended_header_from_height, get_block_info}, @@ -33,7 +34,6 @@ use crate::{ tables::{BlockHeights, BlockInfos, Tables}, types::BlockHash, types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId}, - ConcreteEnv, DatabaseRo, Env, EnvInner, }; //---------------------------------------------------------------------------------------------------- DatabaseReadHandle @@ -233,7 +233,7 @@ fn map_request( /// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576762346> #[inline] fn thread_local<T: Send>(env: &impl Env) -> ThreadLocal<T> { - ThreadLocal::with_capacity(env.config().reader_threads.as_threads().get()) + ThreadLocal::with_capacity(env.config().reader_threads.get()) } /// Take in a `ThreadLocal<impl Tables>` and return an `&impl Tables + Send`. diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 1560deca..d1634749 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -7,6 +7,7 @@ //---------------------------------------------------------------------------------------------------- Use use std::{ + borrow::Cow, collections::{HashMap, HashSet}, sync::Arc, }; @@ -14,6 +15,7 @@ use std::{ use pretty_assertions::assert_eq; use tower::{Service, ServiceExt}; +use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; use cuprate_types::{ blockchain::{BCReadRequest, BCResponse, BCWriteRequest}, @@ -22,6 +24,7 @@ use cuprate_types::{ use crate::{ config::ConfigBuilder, + open_tables::OpenTables, ops::{ block::{get_block_extended_header_from_height, get_block_info}, blockchain::chain_height, @@ -31,7 +34,6 @@ use crate::{ tables::{Tables, TablesIter}, tests::AssertTableLen, types::{Amount, AmountIndex, PreRctOutputId}, - ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError, }; //---------------------------------------------------------------------------------------------------- Helper functions @@ -44,7 +46,7 @@ fn init_service() -> ( ) { let tempdir = tempfile::tempdir().unwrap(); let config = ConfigBuilder::new() - .db_directory(tempdir.path().into()) + .db_directory(Cow::Owned(tempdir.path().into())) .low_power() .build(); let (reader, writer) = init(config).unwrap(); diff --git a/storage/blockchain/src/service/types.rs b/storage/blockchain/src/service/types.rs index 08bc6acc..c6ee67e7 100644 --- a/storage/blockchain/src/service/types.rs +++ b/storage/blockchain/src/service/types.rs @@ -5,11 +5,10 @@ //---------------------------------------------------------------------------------------------------- Use use futures::channel::oneshot::Sender; +use cuprate_database::RuntimeError; use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_types::blockchain::BCResponse; -use crate::error::RuntimeError; - //---------------------------------------------------------------------------------------------------- Types /// The actual type of the response. /// diff --git a/storage/blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs index 8c2cc91e..42d96941 100644 --- a/storage/blockchain/src/service/write.rs +++ b/storage/blockchain/src/service/write.rs @@ -8,6 +8,7 @@ use std::{ use futures::channel::oneshot; +use cuprate_database::{ConcreteEnv, Env, EnvInner, RuntimeError, TxRw}; use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_types::{ blockchain::{BCResponse, BCWriteRequest}, @@ -15,11 +16,8 @@ use cuprate_types::{ }; use crate::{ - env::{Env, EnvInner}, - error::RuntimeError, + open_tables::OpenTables, service::types::{ResponseReceiver, ResponseResult, ResponseSender}, - transaction::TxRw, - ConcreteEnv, }; //---------------------------------------------------------------------------------------------------- Constants diff --git a/storage/blockchain/src/tables.rs b/storage/blockchain/src/tables.rs index 3bdad943..447faa6a 100644 --- a/storage/blockchain/src/tables.rs +++ b/storage/blockchain/src/tables.rs @@ -15,17 +15,15 @@ //! This module also contains a set of traits for //! accessing _all_ tables defined here at once. //! -//! For example, this is the object returned by [`EnvInner::open_tables`](crate::EnvInner::open_tables). +//! For example, this is the object returned by [`OpenTables::open_tables`](crate::OpenTables::open_tables). //---------------------------------------------------------------------------------------------------- Import -use crate::{ - database::{DatabaseIter, DatabaseRo, DatabaseRw}, - table::Table, - types::{ - Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage, - Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput, TxBlob, TxHash, - TxId, UnlockTime, - }, +use cuprate_database::{DatabaseIter, DatabaseRo, DatabaseRw, Table}; + +use crate::types::{ + Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage, + Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput, TxBlob, TxHash, + TxId, UnlockTime, }; //---------------------------------------------------------------------------------------------------- Sealed @@ -61,7 +59,7 @@ macro_rules! define_trait_tables { /// `(tuple, containing, all, table, types, ...)`. /// /// This is used to return a _single_ object from functions like - /// [`EnvInner::open_tables`](crate::EnvInner::open_tables) rather + /// [`OpenTables::open_tables`](crate::OpenTables::open_tables) rather /// than the tuple containing the tables itself. /// /// To replace `tuple.0` style indexing, `field_accessor_functions()` @@ -98,7 +96,7 @@ macro_rules! define_trait_tables { /// /// # Errors /// This returns errors on regular database errors. - fn all_tables_empty(&self) -> Result<bool, $crate::error::RuntimeError>; + fn all_tables_empty(&self) -> Result<bool, cuprate_database::RuntimeError>; } /// Object containing all opened [`Table`]s in read + iter mode. @@ -183,7 +181,7 @@ macro_rules! define_trait_tables { } )* - fn all_tables_empty(&self) -> Result<bool, $crate::error::RuntimeError> { + fn all_tables_empty(&self) -> Result<bool, cuprate_database::RuntimeError> { $( if !DatabaseRo::is_empty(&self.$index)? { return Ok(false); @@ -265,44 +263,6 @@ define_trait_tables! { TxUnlockTime => 14, } -//---------------------------------------------------------------------------------------------------- Table function macro -/// `crate`-private macro for callings functions on all tables. -/// -/// This calls the function `$fn` with the optional -/// arguments `$args` on all tables - returning early -/// (within whatever scope this is called) if any -/// of the function calls error. -/// -/// Else, it evaluates to an `Ok((tuple, of, all, table, types, ...))`, -/// i.e., an `impl Table[Mut]` wrapped in `Ok`. -macro_rules! call_fn_on_all_tables_or_early_return { - ( - $($fn:ident $(::)?)* - ( - $($arg:ident),* $(,)? - ) - ) => {{ - Ok(( - $($fn ::)*<$crate::tables::BlockInfos>($($arg),*)?, - $($fn ::)*<$crate::tables::BlockBlobs>($($arg),*)?, - $($fn ::)*<$crate::tables::BlockHeights>($($arg),*)?, - $($fn ::)*<$crate::tables::KeyImages>($($arg),*)?, - $($fn ::)*<$crate::tables::NumOutputs>($($arg),*)?, - $($fn ::)*<$crate::tables::PrunedTxBlobs>($($arg),*)?, - $($fn ::)*<$crate::tables::PrunableHashes>($($arg),*)?, - $($fn ::)*<$crate::tables::Outputs>($($arg),*)?, - $($fn ::)*<$crate::tables::PrunableTxBlobs>($($arg),*)?, - $($fn ::)*<$crate::tables::RctOutputs>($($arg),*)?, - $($fn ::)*<$crate::tables::TxBlobs>($($arg),*)?, - $($fn ::)*<$crate::tables::TxIds>($($arg),*)?, - $($fn ::)*<$crate::tables::TxHeights>($($arg),*)?, - $($fn ::)*<$crate::tables::TxOutputs>($($arg),*)?, - $($fn ::)*<$crate::tables::TxUnlockTime>($($arg),*)?, - )) - }}; -} -pub(crate) use call_fn_on_all_tables_or_early_return; - //---------------------------------------------------------------------------------------------------- Table macro /// Create all tables, should be used _once_. /// @@ -332,6 +292,7 @@ macro_rules! tables { /// ## Table Name /// ```rust /// # use cuprate_blockchain::{*,tables::*}; + /// use cuprate_database::Table; #[doc = concat!( "assert_eq!(", stringify!([<$table:camel>]), @@ -363,9 +324,8 @@ macro_rules! tables { // - Keep this sorted A-Z (by table name) // - Tables are defined in plural to avoid name conflicts with types // - If adding/changing a table also edit: -// a) the tests in `src/backend/tests.rs` -// b) `Env::open` to make sure it creates the table (for all backends) -// c) `call_fn_on_all_tables_or_early_return!()` macro defined in this file +// - the tests in `src/backend/tests.rs` +// - `call_fn_on_all_tables_or_early_return!()` macro in `src/open_tables.rs` tables! { /// Serialized block blobs (bytes). /// diff --git a/storage/blockchain/src/tests.rs b/storage/blockchain/src/tests.rs index 90a74137..ec2f18eb 100644 --- a/storage/blockchain/src/tests.rs +++ b/storage/blockchain/src/tests.rs @@ -5,11 +5,13 @@ //! - only used internally //---------------------------------------------------------------------------------------------------- Import -use std::fmt::Debug; +use std::{borrow::Cow, fmt::Debug}; use pretty_assertions::assert_eq; -use crate::{config::ConfigBuilder, tables::Tables, ConcreteEnv, DatabaseRo, Env, EnvInner}; +use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; + +use crate::{config::ConfigBuilder, open_tables::OpenTables, tables::Tables}; //---------------------------------------------------------------------------------------------------- Struct /// Named struct to assert the length of all tables. @@ -67,10 +69,10 @@ impl AssertTableLen { pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) { let tempdir = tempfile::tempdir().unwrap(); let config = ConfigBuilder::new() - .db_directory(tempdir.path().into()) + .db_directory(Cow::Owned(tempdir.path().into())) .low_power() .build(); - let env = ConcreteEnv::open(config).unwrap(); + let env = crate::open(config).unwrap(); (env, tempdir) } diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index 2bb9aa0e..f9319442 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -46,7 +46,7 @@ use bytemuck::{Pod, Zeroable}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use crate::storable::StorableVec; +use cuprate_database::StorableVec; //---------------------------------------------------------------------------------------------------- Aliases // These type aliases exist as many Monero-related types are the exact same. @@ -106,6 +106,8 @@ pub type UnlockTime = u64; /// ```rust /// # use std::borrow::*; /// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// /// // Assert Storable is correct. /// let a = PreRctOutputId { /// amount: 1, @@ -149,6 +151,8 @@ pub struct PreRctOutputId { /// ```rust /// # use std::borrow::*; /// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// /// // Assert Storable is correct. /// let a = BlockInfo { /// timestamp: 1, @@ -208,6 +212,8 @@ bitflags::bitflags! { /// ```rust /// # use std::borrow::*; /// # use cuprate_blockchain::{*, types::*}; + /// use cuprate_database::Storable; + /// /// // Assert Storable is correct. /// let a = OutputFlags::NON_ZERO_UNLOCK_TIME; /// let b = Storable::as_bytes(&a); @@ -237,6 +243,8 @@ bitflags::bitflags! { /// ```rust /// # use std::borrow::*; /// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// /// // Assert Storable is correct. /// let a = Output { /// key: [1; 32], @@ -278,6 +286,8 @@ pub struct Output { /// ```rust /// # use std::borrow::*; /// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// /// // Assert Storable is correct. /// let a = RctOutput { /// key: [1; 32], diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index a0a46384..887f1b60 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cuprate-database" -version = "0.0.0" +version = "0.0.1" edition = "2021" description = "Cuprate's database abstraction" license = "MIT" @@ -9,7 +9,26 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/database" keywords = ["cuprate", "database"] [features] +default = ["heed"] +# default = ["redb"] +# default = ["redb-memory"] +heed = ["dep:heed"] +redb = ["dep:redb"] +redb-memory = ["redb"] [dependencies] +bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +bytes = { workspace = true } +cfg-if = { workspace = true } +page_size = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size. +thiserror = { workspace = true } + +# Optional features. +heed = { version = "0.20.0", features = ["read-txn-no-tls"], optional = true } +redb = { version = "2.1.0", optional = true } +serde = { workspace = true, optional = true } [dev-dependencies] +bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +page_size = { version = "0.6.0" } +tempfile = { version = "3.10.0" } \ No newline at end of file diff --git a/storage/database/README.md b/storage/database/README.md new file mode 100644 index 00000000..d7a9b92f --- /dev/null +++ b/storage/database/README.md @@ -0,0 +1,143 @@ +Cuprate's database abstraction. + +This documentation is mostly for practical usage of `cuprate-database`. + +For a high-level overview, see the database section in +[Cuprate's architecture book](https://architecture.cuprate.org). + +If you need blockchain specific capabilities, consider using the higher-level +`cuprate-blockchain` crate which builds upon this one. + +# Purpose +This crate abstracts various database backends with traits. The databases are: + +All backends have the following attributes: +- [Embedded](https://en.wikipedia.org/wiki/Embedded_database) +- [Multiversion concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) +- [ACID](https://en.wikipedia.org/wiki/ACID) +- Are `(key, value)` oriented and have the expected API (`get()`, `insert()`, `delete()`) +- Are table oriented (`"table_name" -> (key, value)`) +- Allows concurrent readers + +# Terminology +To be more clear on some terms used in this crate: + +| Term | Meaning | +|------------------|--------------------------------------| +| `Env` | The 1 database environment, the "whole" thing +| `DatabaseR{o,w}` | A _actively open_ readable/writable `key/value` store +| `Table` | Solely the metadata of a `cuprate_database` (the `key` and `value` types, and the name) +| `TxR{o,w}` | A read/write transaction +| `Storable` | A data that type can be stored in the database + +The dataflow is `Env` -> `Tx` -> `cuprate_database` + +Which reads as: +1. You have a database `Environment` +1. You open up a `Transaction` +1. You open a particular `Table` from that `Environment`, getting a `cuprate_database` +1. You can now read/write data from/to that `cuprate_database` + +# Concrete types +You should _not_ rely on the concrete type of any abstracted backend. + +For example, when using the `heed` backend, [`Env`]'s associated [`TxRw`] type +is `RefCell<heed::RwTxn<'_>>`. In order to ensure compatibility with other backends +and to not create backend-specific code, you should _not_ refer to that concrete type. + +Use generics and trait notation in these situations: +- `impl<T: TxRw> Trait for Object` +- `fn() -> impl TxRw` + +# `ConcreteEnv` +This crate exposes [`ConcreteEnv`], which is a non-generic/non-dynamic, +concrete object representing a database [`Env`]ironment. + +The actual backend for this type is determined via feature flags. + +This object existing means `E: Env` doesn't need to be spread all through the codebase, +however, it also means some small invariants should be kept in mind. + +As `ConcreteEnv` is just a re-exposed type which has varying inner types, +it means some properties will change depending on the backend used. + +For example: +- [`std::mem::size_of::<ConcreteEnv>`] +- [`std::mem::align_of::<ConcreteEnv>`] + +Things like these functions are affected by the backend and inner data, +and should not be relied upon. This extends to any `struct/enum` that contains `ConcreteEnv`. + +`ConcreteEnv` invariants you can rely on: +- It implements [`Env`] +- Upon [`Drop::drop`], all database data will sync to disk + +Note that `ConcreteEnv` itself is not a clonable type, +it should be wrapped in [`std::sync::Arc`]. + +<!-- SOMEDAY: replace `ConcreteEnv` with `fn Env::open() -> impl Env`/ +and use `<E: Env>` everywhere it is stored instead. This would allow +generic-backed dynamic runtime selection of the database backend, i.e. +the user can select which database backend they use. --> + +# Feature flags +Different database backends are enabled by the feature flags: +- `heed` (LMDB) +- `redb` + +The default is `heed`. + +`tracing` is always enabled and cannot be disabled via feature-flag. +<!-- FIXME: tracing should be behind a feature flag --> + +# Examples +The below is an example of using `cuprate-database`. + +```rust +use cuprate_database::{ + ConcreteEnv, + config::ConfigBuilder, + Env, EnvInner, + DatabaseRo, DatabaseRw, TxRo, TxRw, +}; + +# fn main() -> Result<(), Box<dyn std::error::Error>> { +// Create a configuration for the database environment. +let tmp_dir = tempfile::tempdir()?; +let db_dir = tmp_dir.path().to_owned(); +let config = ConfigBuilder::new(db_dir.into()).build(); + +// Initialize the database environment. +let env = ConcreteEnv::open(config)?; + +// Define metadata for a table. +struct Table; +impl cuprate_database::Table for Table { + // The name of the table is "table". + const NAME: &'static str = "table"; + // The key type is a `u8`. + type Key = u8; + // The key type is a `u64`. + type Value = u64; +} + +// Open up a transaction + tables for writing. +let env_inner = env.env_inner(); +let tx_rw = env_inner.tx_rw()?; +// We must create the table first or the next line will error. +env_inner.create_db::<Table>(&tx_rw)?; +let mut table = env_inner.open_db_rw::<Table>(&tx_rw)?; + +// Write data to the table. +table.put(&0, &1)?; + +// Commit the data written. +drop(table); +TxRw::commit(tx_rw)?; + +// Read the data, assert it is correct. +let tx_ro = env_inner.tx_ro()?; +let table = env_inner.open_db_ro::<Table>(&tx_ro)?; +assert_eq!(table.first()?, (0, 1)); +# Ok(()) } +``` \ No newline at end of file diff --git a/storage/blockchain/src/backend/heed/database.rs b/storage/database/src/backend/heed/database.rs similarity index 100% rename from storage/blockchain/src/backend/heed/database.rs rename to storage/database/src/backend/heed/database.rs diff --git a/storage/blockchain/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs similarity index 88% rename from storage/blockchain/src/backend/heed/env.rs rename to storage/database/src/backend/heed/env.rs index 703af4a8..14f9777d 100644 --- a/storage/blockchain/src/backend/heed/env.rs +++ b/storage/database/src/backend/heed/env.rs @@ -7,12 +7,11 @@ use std::{ sync::{RwLock, RwLockReadGuard}, }; -use heed::{DatabaseOpenOptions, EnvFlags, EnvOpenOptions}; +use heed::{EnvFlags, EnvOpenOptions}; use crate::{ backend::heed::{ database::{HeedTableRo, HeedTableRw}, - storable::StorableHeed, types::HeedDb, }, config::{Config, SyncMode}, @@ -21,13 +20,12 @@ use crate::{ error::{InitError, RuntimeError}, resize::ResizeAlgorithm, table::Table, - tables::call_fn_on_all_tables_or_early_return, }; //---------------------------------------------------------------------------------------------------- Consts /// Panic message when there's a table missing. const PANIC_MSG_MISSING_TABLE: &str = - "cuprate_blockchain::Env should uphold the invariant that all tables are already created"; + "cuprate_database::Env should uphold the invariant that all tables are already created"; //---------------------------------------------------------------------------------------------------- ConcreteEnv /// A strongly typed, concrete database environment, backed by `heed`. @@ -184,8 +182,7 @@ impl Env for ConcreteEnv { // For now: // - No other program using our DB exists // - Almost no-one has a 126+ thread CPU - let reader_threads = - u32::try_from(config.reader_threads.as_threads().get()).unwrap_or(u32::MAX); + let reader_threads = u32::try_from(config.reader_threads.get()).unwrap_or(u32::MAX); env_open_options.max_readers(if reader_threads < 110 { 126 } else { @@ -199,34 +196,6 @@ impl Env for ConcreteEnv { // <https://docs.rs/heed/0.20.0/heed/struct.EnvOpenOptions.html#method.open> let env = unsafe { env_open_options.open(config.db_directory())? }; - /// Function that creates the tables based off the passed `T: Table`. - fn create_table<T: Table>( - env: &heed::Env, - tx_rw: &mut heed::RwTxn<'_>, - ) -> Result<(), InitError> { - DatabaseOpenOptions::new(env) - .name(<T as Table>::NAME) - .types::<StorableHeed<<T as Table>::Key>, StorableHeed<<T as Table>::Value>>() - .create(tx_rw)?; - Ok(()) - } - - let mut tx_rw = env.write_txn()?; - // Create all tables. - // FIXME: this macro is kinda awkward. - { - let env = &env; - let tx_rw = &mut tx_rw; - match call_fn_on_all_tables_or_early_return!(create_table(env, tx_rw)) { - Ok(_) => (), - Err(e) => return Err(e), - } - } - - // INVARIANT: this should never return `ResizeNeeded` due to adding - // some tables since we added some leeway to the memory map above. - tx_rw.commit()?; - Ok(Self { env: RwLock::new(env), config, @@ -302,7 +271,7 @@ where Ok(HeedTableRo { db: self .open_database(tx_ro, Some(T::NAME))? - .expect(PANIC_MSG_MISSING_TABLE), + .ok_or(RuntimeError::TableNotFound)?, tx_ro, }) } @@ -312,17 +281,19 @@ where &self, tx_rw: &RefCell<heed::RwTxn<'env>>, ) -> Result<impl DatabaseRw<T>, RuntimeError> { - let tx_ro = tx_rw.borrow(); - // Open up a read/write database using our table's const metadata. Ok(HeedTableRw { - db: self - .open_database(&tx_ro, Some(T::NAME))? - .expect(PANIC_MSG_MISSING_TABLE), + db: self.create_database(&mut tx_rw.borrow_mut(), Some(T::NAME))?, tx_rw, }) } + fn create_db<T: Table>(&self, tx_rw: &RefCell<heed::RwTxn<'env>>) -> Result<(), RuntimeError> { + // INVARIANT: `heed` creates tables with `open_database` if they don't exist. + self.open_db_rw::<T>(tx_rw)?; + Ok(()) + } + #[inline] fn clear_db<T: Table>( &self, diff --git a/storage/blockchain/src/backend/heed/error.rs b/storage/database/src/backend/heed/error.rs similarity index 96% rename from storage/blockchain/src/backend/heed/error.rs rename to storage/database/src/backend/heed/error.rs index c809e51c..bbaeaf0e 100644 --- a/storage/blockchain/src/backend/heed/error.rs +++ b/storage/database/src/backend/heed/error.rs @@ -1,4 +1,4 @@ -//! Conversion from `heed::Error` -> `cuprate_blockchain`'s errors. +//! Conversion from `heed::Error` -> `cuprate_database`'s errors. //---------------------------------------------------------------------------------------------------- Use use crate::constants::DATABASE_CORRUPT_MSG; @@ -85,7 +85,7 @@ impl From<heed::Error> for crate::RuntimeError { E2::Corrupted | E2::PageNotFound => panic!("{mdb_error:#?}\n{DATABASE_CORRUPT_MSG}"), // These errors should not occur, and if they do, - // the best thing `cuprate_blockchain` can do for + // the best thing `cuprate_database` can do for // safety is to panic right here. E2::Panic | E2::PageFull @@ -134,12 +134,12 @@ impl From<heed::Error> for crate::RuntimeError { // Don't use a key that is `>511` bytes. // <http://www.lmdb.tech/doc/group__mdb.html#gaaf0be004f33828bf2fb09d77eb3cef94> | E2::BadValSize - => panic!("fix the database code! {mdb_error:#?}"), + => panic!("E2: fix the database code! {mdb_error:#?}"), }, // Only if we write incorrect code. E1::DatabaseClosing | E1::BadOpenOptions { .. } | E1::Encoding(_) | E1::Decoding(_) => { - panic!("fix the database code! {error:#?}") + panic!("E1: fix the database code! {error:#?}") } } } diff --git a/storage/blockchain/src/backend/heed/mod.rs b/storage/database/src/backend/heed/mod.rs similarity index 100% rename from storage/blockchain/src/backend/heed/mod.rs rename to storage/database/src/backend/heed/mod.rs diff --git a/storage/blockchain/src/backend/heed/storable.rs b/storage/database/src/backend/heed/storable.rs similarity index 96% rename from storage/blockchain/src/backend/heed/storable.rs rename to storage/database/src/backend/heed/storable.rs index ebd8f6e6..83442212 100644 --- a/storage/blockchain/src/backend/heed/storable.rs +++ b/storage/database/src/backend/heed/storable.rs @@ -1,4 +1,4 @@ -//! `cuprate_blockchain::Storable` <-> `heed` serde trait compatibility layer. +//! `cuprate_database::Storable` <-> `heed` serde trait compatibility layer. //---------------------------------------------------------------------------------------------------- Use use std::{borrow::Cow, marker::PhantomData}; @@ -9,7 +9,7 @@ use crate::storable::Storable; //---------------------------------------------------------------------------------------------------- StorableHeed /// The glue struct that implements `heed`'s (de)serialization -/// traits on any type that implements `cuprate_blockchain::Storable`. +/// traits on any type that implements `cuprate_database::Storable`. /// /// Never actually gets constructed, just used for trait bound translations. pub(super) struct StorableHeed<T>(PhantomData<T>) diff --git a/storage/blockchain/src/backend/heed/transaction.rs b/storage/database/src/backend/heed/transaction.rs similarity index 100% rename from storage/blockchain/src/backend/heed/transaction.rs rename to storage/database/src/backend/heed/transaction.rs diff --git a/storage/blockchain/src/backend/heed/types.rs b/storage/database/src/backend/heed/types.rs similarity index 100% rename from storage/blockchain/src/backend/heed/types.rs rename to storage/database/src/backend/heed/types.rs diff --git a/storage/blockchain/src/backend/mod.rs b/storage/database/src/backend/mod.rs similarity index 100% rename from storage/blockchain/src/backend/mod.rs rename to storage/database/src/backend/mod.rs diff --git a/storage/blockchain/src/backend/redb/database.rs b/storage/database/src/backend/redb/database.rs similarity index 100% rename from storage/blockchain/src/backend/redb/database.rs rename to storage/database/src/backend/redb/database.rs diff --git a/storage/blockchain/src/backend/redb/env.rs b/storage/database/src/backend/redb/env.rs similarity index 86% rename from storage/blockchain/src/backend/redb/env.rs rename to storage/database/src/backend/redb/env.rs index 67e430f8..3ff195c1 100644 --- a/storage/blockchain/src/backend/redb/env.rs +++ b/storage/database/src/backend/redb/env.rs @@ -8,7 +8,6 @@ use crate::{ env::{Env, EnvInner}, error::{InitError, RuntimeError}, table::Table, - tables::call_fn_on_all_tables_or_early_return, TxRw, }; @@ -22,7 +21,7 @@ pub struct ConcreteEnv { /// (and in current use). config: Config, - /// A cached, redb version of `cuprate_blockchain::config::SyncMode`. + /// A cached, redb version of `cuprate_database::config::SyncMode`. /// `redb` needs the sync mode to be set _per_ TX, so we /// will continue to use this value every `Env::tx_rw`. durability: redb::Durability, @@ -90,31 +89,6 @@ impl Env for ConcreteEnv { // `redb` creates tables if they don't exist. // <https://docs.rs/redb/latest/redb/struct.WriteTransaction.html#method.open_table> - /// Function that creates the tables based off the passed `T: Table`. - fn create_table<T: Table>(tx_rw: &redb::WriteTransaction) -> Result<(), InitError> { - let table: redb::TableDefinition< - 'static, - StorableRedb<<T as Table>::Key>, - StorableRedb<<T as Table>::Value>, - > = redb::TableDefinition::new(<T as Table>::NAME); - - // `redb` creates tables on open if not already created. - tx_rw.open_table(table)?; - Ok(()) - } - - // Create all tables. - // FIXME: this macro is kinda awkward. - let mut tx_rw = env.begin_write()?; - { - let tx_rw = &mut tx_rw; - match call_fn_on_all_tables_or_early_return!(create_table(tx_rw)) { - Ok(_) => (), - Err(e) => return Err(e), - } - } - tx_rw.commit()?; - // Check for file integrity. // FIXME: should we do this? is it slow? env.check_integrity()?; @@ -174,7 +148,6 @@ where let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> = redb::TableDefinition::new(T::NAME); - // INVARIANT: Our `?` error conversion will panic if the table does not exist. Ok(tx_ro.open_table(table)?) } @@ -187,11 +160,17 @@ where let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> = redb::TableDefinition::new(T::NAME); - // `redb` creates tables if they don't exist, so this should never panic. + // `redb` creates tables if they don't exist, so this shouldn't return `RuntimeError::TableNotFound`. // <https://docs.rs/redb/latest/redb/struct.WriteTransaction.html#method.open_table> Ok(tx_rw.open_table(table)?) } + fn create_db<T: Table>(&self, tx_rw: &redb::WriteTransaction) -> Result<(), RuntimeError> { + // INVARIANT: `redb` creates tables if they don't exist. + self.open_db_rw::<T>(tx_rw)?; + Ok(()) + } + #[inline] fn clear_db<T: Table>(&self, tx_rw: &mut redb::WriteTransaction) -> Result<(), RuntimeError> { let table: redb::TableDefinition< diff --git a/storage/blockchain/src/backend/redb/error.rs b/storage/database/src/backend/redb/error.rs similarity index 98% rename from storage/blockchain/src/backend/redb/error.rs rename to storage/database/src/backend/redb/error.rs index 1cc1456b..fc332655 100644 --- a/storage/blockchain/src/backend/redb/error.rs +++ b/storage/database/src/backend/redb/error.rs @@ -1,4 +1,4 @@ -//! Conversion from `redb`'s errors -> `cuprate_blockchain`'s errors. +//! Conversion from `redb`'s errors -> `cuprate_database`'s errors. //! //! HACK: There's a lot of `_ =>` usage here because //! `redb`'s errors are `#[non_exhaustive]`... @@ -131,12 +131,13 @@ impl From<redb::TableError> for RuntimeError { match error { E::Storage(error) => error.into(), + E::TableDoesNotExist(_) => Self::TableNotFound, + // Only if we write incorrect code. E::TableTypeMismatch { .. } | E::TableIsMultimap(_) | E::TableIsNotMultimap(_) | E::TypeDefinitionChanged { .. } - | E::TableDoesNotExist(_) | E::TableAlreadyOpen(..) => panic!("fix the database code! {error:#?}"), // HACK: Handle new errors as `redb` adds them. diff --git a/storage/blockchain/src/backend/redb/mod.rs b/storage/database/src/backend/redb/mod.rs similarity index 100% rename from storage/blockchain/src/backend/redb/mod.rs rename to storage/database/src/backend/redb/mod.rs diff --git a/storage/blockchain/src/backend/redb/storable.rs b/storage/database/src/backend/redb/storable.rs similarity index 98% rename from storage/blockchain/src/backend/redb/storable.rs rename to storage/database/src/backend/redb/storable.rs index efe77dc5..6735fec0 100644 --- a/storage/blockchain/src/backend/redb/storable.rs +++ b/storage/database/src/backend/redb/storable.rs @@ -1,4 +1,4 @@ -//! `cuprate_blockchain::Storable` <-> `redb` serde trait compatibility layer. +//! `cuprate_database::Storable` <-> `redb` serde trait compatibility layer. //---------------------------------------------------------------------------------------------------- Use use std::{cmp::Ordering, fmt::Debug, marker::PhantomData}; @@ -9,7 +9,7 @@ use crate::{key::Key, storable::Storable}; //---------------------------------------------------------------------------------------------------- StorableRedb /// The glue structs that implements `redb`'s (de)serialization -/// traits on any type that implements `cuprate_blockchain::Key`. +/// traits on any type that implements `cuprate_database::Key`. /// /// Never actually get constructed, just used for trait bound translations. #[derive(Debug)] diff --git a/storage/blockchain/src/backend/redb/transaction.rs b/storage/database/src/backend/redb/transaction.rs similarity index 100% rename from storage/blockchain/src/backend/redb/transaction.rs rename to storage/database/src/backend/redb/transaction.rs diff --git a/storage/blockchain/src/backend/redb/types.rs b/storage/database/src/backend/redb/types.rs similarity index 100% rename from storage/blockchain/src/backend/redb/types.rs rename to storage/database/src/backend/redb/types.rs diff --git a/storage/database/src/backend/tests.rs b/storage/database/src/backend/tests.rs new file mode 100644 index 00000000..df80b631 --- /dev/null +++ b/storage/database/src/backend/tests.rs @@ -0,0 +1,374 @@ +//! Tests for `cuprate_database`'s backends. +//! +//! These tests are fully trait-based, meaning there +//! is no reference to `backend/`-specific types. +//! +//! As such, which backend is tested is +//! dependant on the feature flags used. +//! +//! | Feature flag | Tested backend | +//! |---------------|----------------| +//! | Only `redb` | `redb` +//! | Anything else | `heed` +//! +//! `redb`, and it only must be enabled for it to be tested. + +//---------------------------------------------------------------------------------------------------- Import +use crate::{ + database::{DatabaseIter, DatabaseRo, DatabaseRw}, + env::{Env, EnvInner}, + error::RuntimeError, + resize::ResizeAlgorithm, + tests::{tmp_concrete_env, TestTable}, + transaction::{TxRo, TxRw}, + ConcreteEnv, +}; + +//---------------------------------------------------------------------------------------------------- Tests +/// Simply call [`Env::open`]. If this fails, something is really wrong. +#[test] +fn open() { + tmp_concrete_env(); +} + +/// Create database transactions, but don't write any data. +#[test] +fn tx() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + + TxRo::commit(env_inner.tx_ro().unwrap()).unwrap(); + TxRw::commit(env_inner.tx_rw().unwrap()).unwrap(); + TxRw::abort(env_inner.tx_rw().unwrap()).unwrap(); +} + +/// Test [`Env::open`] and creating/opening tables. +#[test] +fn open_db() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + + // Create table. + { + let tx_rw = env_inner.tx_rw().unwrap(); + env_inner.create_db::<TestTable>(&tx_rw).unwrap(); + TxRw::commit(tx_rw).unwrap(); + } + + let tx_ro = env_inner.tx_ro().unwrap(); + let tx_rw = env_inner.tx_rw().unwrap(); + + // Open table in read-only mode. + env_inner.open_db_ro::<TestTable>(&tx_ro).unwrap(); + TxRo::commit(tx_ro).unwrap(); + + // Open table in read/write mode. + env_inner.open_db_rw::<TestTable>(&tx_rw).unwrap(); + TxRw::commit(tx_rw).unwrap(); +} + +/// Assert that opening a read-only table before creating errors. +#[test] +fn open_ro_uncreated_table() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro().unwrap(); + + // Open uncreated table. + let error = env_inner.open_db_ro::<TestTable>(&tx_ro); + assert!(matches!(error, Err(RuntimeError::TableNotFound))); +} + +/// Assert that opening a read/write table before creating is OK. +#[test] +fn open_rw_uncreated_table() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw().unwrap(); + + // Open uncreated table. + let _table = env_inner.open_db_rw::<TestTable>(&tx_rw).unwrap(); +} + +/// Assert that opening a read-only table after creating is OK. +#[test] +fn open_ro_created_table() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + + // Assert uncreated table errors. + { + let tx_ro = env_inner.tx_ro().unwrap(); + let error = env_inner.open_db_ro::<TestTable>(&tx_ro); + assert!(matches!(error, Err(RuntimeError::TableNotFound))); + } + + // Create table. + { + let tx_rw = env_inner.tx_rw().unwrap(); + env_inner.create_db::<TestTable>(&tx_rw).unwrap(); + TxRw::commit(tx_rw).unwrap(); + } + + // Assert created table is now OK. + let tx_ro = env_inner.tx_ro().unwrap(); + let _table = env_inner.open_db_ro::<TestTable>(&tx_ro).unwrap(); +} + +/// Test `Env` resizes. +#[test] +fn resize() { + // This test is only valid for `Env`'s that need to resize manually. + if !ConcreteEnv::MANUAL_RESIZE { + return; + } + + let (env, _tempdir) = tmp_concrete_env(); + + // Resize by the OS page size. + let page_size = crate::resize::page_size(); + let old_size = env.current_map_size(); + env.resize_map(Some(ResizeAlgorithm::FixedBytes(page_size))); + + // Assert it resized exactly by the OS page size. + let new_size = env.current_map_size(); + assert_eq!(new_size, old_size + page_size.get()); +} + +/// Test that `Env`'s that don't manually resize. +#[test] +#[should_panic = "unreachable"] +fn non_manual_resize_1() { + if ConcreteEnv::MANUAL_RESIZE { + unreachable!(); + } + let (env, _tempdir) = tmp_concrete_env(); + env.resize_map(None); +} + +#[test] +#[should_panic = "unreachable"] +fn non_manual_resize_2() { + if ConcreteEnv::MANUAL_RESIZE { + unreachable!(); + } + let (env, _tempdir) = tmp_concrete_env(); + env.current_map_size(); +} + +/// Test all `DatabaseR{o,w}` operations. +#[test] +fn db_read_write() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw().unwrap(); + let mut table = env_inner.open_db_rw::<TestTable>(&tx_rw).unwrap(); + + /// The (1st) key. + const KEY: u8 = 0; + /// The expected value. + const VALUE: u64 = 0; + /// How many `(key, value)` pairs will be inserted. + const N: u8 = 100; + + /// Assert a u64 is the same as `VALUE`. + fn assert_value(value: u64) { + assert_eq!(value, VALUE); + } + + assert!(table.is_empty().unwrap()); + + // Insert keys. + let mut key = KEY; + #[allow(clippy::explicit_counter_loop)] // we need the +1 side effect + for _ in 0..N { + table.put(&key, &VALUE).unwrap(); + key += 1; + } + + assert_eq!(table.len().unwrap(), u64::from(N)); + + // Assert the first/last `(key, value)`s are there. + { + assert!(table.contains(&KEY).unwrap()); + let get = table.get(&KEY).unwrap(); + assert_value(get); + + let first = table.first().unwrap().1; + assert_value(first); + + let last = table.last().unwrap().1; + assert_value(last); + } + + // Commit transactions, create new ones. + drop(table); + TxRw::commit(tx_rw).unwrap(); + let tx_ro = env_inner.tx_ro().unwrap(); + let table_ro = env_inner.open_db_ro::<TestTable>(&tx_ro).unwrap(); + let tx_rw = env_inner.tx_rw().unwrap(); + let mut table = env_inner.open_db_rw::<TestTable>(&tx_rw).unwrap(); + + // Assert the whole range is there. + { + let range = table_ro.get_range(..).unwrap(); + let mut i = 0; + for result in range { + let value = result.unwrap(); + assert_value(value); + i += 1; + } + assert_eq!(i, N); + } + + // `get_range()` tests. + let mut key = KEY; + key += N; + let range = KEY..key; + + // Assert count is correct. + assert_eq!( + N as usize, + table_ro.get_range(range.clone()).unwrap().count() + ); + + // Assert each returned value from the iterator is owned. + { + let mut iter = table_ro.get_range(range.clone()).unwrap(); + let value = iter.next().unwrap().unwrap(); // 1. take value out + drop(iter); // 2. drop the `impl Iterator + 'a` + assert_value(value); // 3. assert even without the iterator, the value is alive + } + + // Assert each value is the same. + { + let mut iter = table_ro.get_range(range).unwrap(); + for _ in 0..N { + let value = iter.next().unwrap().unwrap(); + assert_value(value); + } + } + + // Assert `update()` works. + { + const NEW_VALUE: u64 = 999; + + assert_ne!(table.get(&KEY).unwrap(), NEW_VALUE); + + #[allow(unused_assignments)] + table + .update(&KEY, |mut value| { + value = NEW_VALUE; + Some(value) + }) + .unwrap(); + + assert_eq!(table.get(&KEY).unwrap(), NEW_VALUE); + } + + // Assert deleting works. + { + table.delete(&KEY).unwrap(); + let value = table.get(&KEY); + assert!(!table.contains(&KEY).unwrap()); + assert!(matches!(value, Err(RuntimeError::KeyNotFound))); + // Assert the other `(key, value)` pairs are still there. + let mut key = KEY; + key += N - 1; // we used inclusive `0..N` + let value = table.get(&key).unwrap(); + assert_value(value); + } + + // Assert `take()` works. + { + let mut key = KEY; + key += 1; + let value = table.take(&key).unwrap(); + assert_eq!(value, VALUE); + + let get = table.get(&KEY); + assert!(!table.contains(&key).unwrap()); + assert!(matches!(get, Err(RuntimeError::KeyNotFound))); + + // Assert the other `(key, value)` pairs are still there. + key += 1; + let value = table.get(&key).unwrap(); + assert_value(value); + } + + drop(table); + TxRw::commit(tx_rw).unwrap(); + + // Assert `clear_db()` works. + { + let mut tx_rw = env_inner.tx_rw().unwrap(); + env_inner.clear_db::<TestTable>(&mut tx_rw).unwrap(); + let table = env_inner.open_db_rw::<TestTable>(&tx_rw).unwrap(); + assert!(table.is_empty().unwrap()); + for n in 0..N { + let mut key = KEY; + key += n; + let value = table.get(&key); + assert!(matches!(value, Err(RuntimeError::KeyNotFound))); + assert!(!table.contains(&key).unwrap()); + } + + // Reader still sees old value. + assert!(!table_ro.is_empty().unwrap()); + + // Writer sees updated value (nothing). + assert!(table.is_empty().unwrap()); + } +} + +/// Assert that `key`'s in database tables are sorted in +/// an ordered B-Tree fashion, i.e. `min_value -> max_value`. +#[test] +fn tables_are_sorted() { + let (env, _tmp) = tmp_concrete_env(); + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw().unwrap(); + let mut table = env_inner.open_db_rw::<TestTable>(&tx_rw).unwrap(); + + // Insert `{5, 4, 3, 2, 1, 0}`, assert each new + // number inserted is the minimum `first()` value. + for key in (0..6).rev() { + table.put(&key, &123).unwrap(); + let (first, _) = table.first().unwrap(); + assert_eq!(first, key); + } + + drop(table); + TxRw::commit(tx_rw).unwrap(); + let tx_rw = env_inner.tx_rw().unwrap(); + + // Assert iterators are ordered. + { + let tx_ro = env_inner.tx_ro().unwrap(); + let table = env_inner.open_db_ro::<TestTable>(&tx_ro).unwrap(); + let iter = table.iter().unwrap(); + let keys = table.keys().unwrap(); + for ((i, iter), key) in (0..6).zip(iter).zip(keys) { + let (iter, _) = iter.unwrap(); + let key = key.unwrap(); + assert_eq!(i, iter); + assert_eq!(iter, key); + } + } + + let mut table = env_inner.open_db_rw::<TestTable>(&tx_rw).unwrap(); + + // Assert the `first()` values are the minimum, i.e. `{0, 1, 2}` + for key in 0..3 { + let (first, _) = table.first().unwrap(); + assert_eq!(first, key); + table.delete(&key).unwrap(); + } + + // Assert the `last()` values are the maximum, i.e. `{5, 4, 3}` + for key in (3..6).rev() { + let (last, _) = table.last().unwrap(); + assert_eq!(last, key); + table.delete(&key).unwrap(); + } +} diff --git a/storage/database/src/config/backend.rs b/storage/database/src/config/backend.rs new file mode 100644 index 00000000..ea92b35d --- /dev/null +++ b/storage/database/src/config/backend.rs @@ -0,0 +1,31 @@ +//! SOMEDAY + +//---------------------------------------------------------------------------------------------------- Import +use std::{ + borrow::Cow, + num::NonZeroUsize, + path::{Path, PathBuf}, +}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use cuprate_helper::fs::database_dir; + +use crate::{ + config::{ReaderThreads, SyncMode}, + constants::DATABASE_DATA_FILENAME, + resize::ResizeAlgorithm, +}; + +//---------------------------------------------------------------------------------------------------- Backend +/// SOMEDAY: allow runtime hot-swappable backends. +#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Backend { + #[default] + /// SOMEDAY + Heed, + /// SOMEDAY + Redb, +} diff --git a/storage/database/src/config/config.rs b/storage/database/src/config/config.rs new file mode 100644 index 00000000..a5ecbb23 --- /dev/null +++ b/storage/database/src/config/config.rs @@ -0,0 +1,210 @@ +//! The main [`Config`] struct, holding all configurable values. + +//---------------------------------------------------------------------------------------------------- Import +use std::{borrow::Cow, num::NonZeroUsize, path::Path}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use crate::{config::SyncMode, constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm}; + +//---------------------------------------------------------------------------------------------------- Constants +/// Default value for [`Config::reader_threads`]. +/// +/// ```rust +/// use cuprate_database::config::*; +/// assert_eq!(READER_THREADS_DEFAULT.get(), 126); +/// ``` +pub const READER_THREADS_DEFAULT: NonZeroUsize = match NonZeroUsize::new(126) { + Some(n) => n, + None => unreachable!(), +}; + +//---------------------------------------------------------------------------------------------------- ConfigBuilder +/// Builder for [`Config`]. +/// +// SOMEDAY: there's are many more options to add in the future. +#[derive(Debug, Clone, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ConfigBuilder { + /// [`Config::db_directory`]. + db_directory: Cow<'static, Path>, + + /// [`Config::sync_mode`]. + sync_mode: Option<SyncMode>, + + /// [`Config::reader_threads`]. + reader_threads: Option<NonZeroUsize>, + + /// [`Config::resize_algorithm`]. + resize_algorithm: Option<ResizeAlgorithm>, +} + +impl ConfigBuilder { + /// Create a new [`ConfigBuilder`]. + /// + /// [`ConfigBuilder::build`] can be called immediately + /// after this function to use default values. + pub const fn new(db_directory: Cow<'static, Path>) -> Self { + Self { + db_directory, + sync_mode: None, + reader_threads: Some(READER_THREADS_DEFAULT), + resize_algorithm: None, + } + } + + /// Build into a [`Config`]. + /// + /// # Default values + /// - [`READER_THREADS_DEFAULT`] is used for [`Config::reader_threads`] + /// - [`Default::default`] is used for all other values (except the `db_directory`) + pub fn build(self) -> Config { + // Add the database filename to the directory. + let db_file = { + let mut db_file = self.db_directory.to_path_buf(); + db_file.push(DATABASE_DATA_FILENAME); + Cow::Owned(db_file) + }; + + Config { + db_directory: self.db_directory, + db_file, + sync_mode: self.sync_mode.unwrap_or_default(), + reader_threads: self.reader_threads.unwrap_or(READER_THREADS_DEFAULT), + resize_algorithm: self.resize_algorithm.unwrap_or_default(), + } + } + + /// Set a custom database directory (and file) [`Path`]. + #[must_use] + pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { + self.db_directory = db_directory; + self + } + + /// Tune the [`ConfigBuilder`] for the highest performing, + /// but also most resource-intensive & maybe risky settings. + /// + /// Good default for testing, and resource-available machines. + #[must_use] + pub fn fast(mut self) -> Self { + self.sync_mode = Some(SyncMode::Fast); + self.resize_algorithm = Some(ResizeAlgorithm::default()); + self + } + + /// Tune the [`ConfigBuilder`] for the lowest performing, + /// but also least resource-intensive settings. + /// + /// Good default for resource-limited machines, e.g. a cheap VPS. + #[must_use] + pub fn low_power(mut self) -> Self { + self.sync_mode = Some(SyncMode::default()); + self.resize_algorithm = Some(ResizeAlgorithm::default()); + self + } + + /// Set a custom [`SyncMode`]. + #[must_use] + pub const fn sync_mode(mut self, sync_mode: SyncMode) -> Self { + self.sync_mode = Some(sync_mode); + self + } + + /// Set a custom [`Config::reader_threads`]. + #[must_use] + pub const fn reader_threads(mut self, reader_threads: NonZeroUsize) -> Self { + self.reader_threads = Some(reader_threads); + self + } + + /// Set a custom [`ResizeAlgorithm`]. + #[must_use] + pub const fn resize_algorithm(mut self, resize_algorithm: ResizeAlgorithm) -> Self { + self.resize_algorithm = Some(resize_algorithm); + self + } +} + +//---------------------------------------------------------------------------------------------------- Config +/// Database [`Env`](crate::Env) configuration. +/// +/// This is the struct passed to [`Env::open`](crate::Env::open) that +/// allows the database to be configured in various ways. +/// +/// For construction, use [`ConfigBuilder`]. +/// +// SOMEDAY: there's are many more options to add in the future. +#[derive(Debug, Clone, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Config { + //------------------------ Database PATHs + // These are private since we don't want + // users messing with them after construction. + /// The directory used to store all database files. + /// + // SOMEDAY: we should also support `/etc/cuprated.conf`. + // This could be represented with an `enum DbPath { Default, Custom, Etc, }` + pub(crate) db_directory: Cow<'static, Path>, + /// The actual database data file. + /// + /// This is private, and created from the above `db_directory`. + pub(crate) db_file: Cow<'static, Path>, + + /// Disk synchronization mode. + pub sync_mode: SyncMode, + + /// Database reader thread count. + /// + /// Set the number of slots in the reader table. + /// + /// This is only used in LMDB, see + /// <https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/mdb.c#L794-L799>. + /// + /// By default, this value is [`READER_THREADS_DEFAULT`]. + pub reader_threads: NonZeroUsize, + + /// Database memory map resizing algorithm. + /// + /// This is used as the default fallback, but + /// custom algorithms can be used as well with + /// [`Env::resize_map`](crate::Env::resize_map). + pub resize_algorithm: ResizeAlgorithm, +} + +impl Config { + /// Create a new [`Config`] with sane default settings. + /// + /// The [`Config::db_directory`] must be passed. + /// + /// All other values will be [`Default::default`]. + /// + /// ```rust + /// use cuprate_database::{config::*, resize::*, DATABASE_DATA_FILENAME}; + /// + /// let tmp_dir = tempfile::tempdir().unwrap(); + /// let db_directory = tmp_dir.path().to_owned(); + /// let config = Config::new(db_directory.clone().into()); + /// + /// assert_eq!(*config.db_directory(), db_directory); + /// assert!(config.db_file().starts_with(db_directory)); + /// assert!(config.db_file().ends_with(DATABASE_DATA_FILENAME)); + /// assert_eq!(config.sync_mode, SyncMode::default()); + /// assert_eq!(config.reader_threads, READER_THREADS_DEFAULT); + /// assert_eq!(config.resize_algorithm, ResizeAlgorithm::default()); + /// ``` + pub fn new(db_directory: Cow<'static, Path>) -> Self { + ConfigBuilder::new(db_directory).build() + } + + /// Return the absolute [`Path`] to the database directory. + pub const fn db_directory(&self) -> &Cow<'_, Path> { + &self.db_directory + } + + /// Return the absolute [`Path`] to the database data file. + pub const fn db_file(&self) -> &Cow<'_, Path> { + &self.db_file + } +} diff --git a/storage/database/src/config/mod.rs b/storage/database/src/config/mod.rs new file mode 100644 index 00000000..19a324e1 --- /dev/null +++ b/storage/database/src/config/mod.rs @@ -0,0 +1,40 @@ +//! Database [`Env`](crate::Env) configuration. +//! +//! This module contains the main [`Config`]uration struct +//! for the database [`Env`](crate::Env)ironment, and types +//! related to configuration settings. +//! +//! The main constructor is the [`ConfigBuilder`]. +//! +//! These configurations are processed at runtime, meaning +//! the `Env` can/will dynamically adjust its behavior +//! based on these values. +//! +//! # Example +//! ```rust +//! use cuprate_database::{ +//! ConcreteEnv, Env, +//! config::{ConfigBuilder, SyncMode} +//! }; +//! +//! # fn main() -> Result<(), Box<dyn std::error::Error>> { +//! let db_dir = tempfile::tempdir()?; +//! +//! let config = ConfigBuilder::new(db_dir.path().to_path_buf().into()) +//! // Use the fastest sync mode. +//! .sync_mode(SyncMode::Fast) +//! // Build into `Config` +//! .build(); +//! +//! // Open the database using this configuration. +//! let env = ConcreteEnv::open(config.clone())?; +//! // It's using the config we provided. +//! assert_eq!(env.config(), &config); +//! # Ok(()) } +//! ``` + +mod config; +pub use config::{Config, ConfigBuilder, READER_THREADS_DEFAULT}; + +mod sync_mode; +pub use sync_mode::SyncMode; diff --git a/storage/database/src/config/sync_mode.rs b/storage/database/src/config/sync_mode.rs new file mode 100644 index 00000000..1d203396 --- /dev/null +++ b/storage/database/src/config/sync_mode.rs @@ -0,0 +1,135 @@ +//! Database [`Env`](crate::Env) configuration. +//! +//! This module contains the main [`Config`]uration struct +//! for the database [`Env`](crate::Env)ironment, and data +//! structures related to any configuration setting. +//! +//! These configurations are processed at runtime, meaning +//! the `Env` can/will dynamically adjust its behavior +//! based on these values. + +//---------------------------------------------------------------------------------------------------- Import + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +//---------------------------------------------------------------------------------------------------- SyncMode +/// Disk synchronization mode. +/// +/// This controls how/when the database syncs its data to disk. +/// +/// Regardless of the variant chosen, dropping [`Env`](crate::Env) +/// will always cause it to fully sync to disk. +/// +/// # Sync vs Async +/// All invariants except [`SyncMode::Async`] & [`SyncMode::Fast`] +/// are `synchronous`, as in the database will wait until the OS has +/// finished syncing all the data to disk before continuing. +/// +/// `SyncMode::Async` & `SyncMode::Fast` are `asynchronous`, meaning +/// the database will _NOT_ wait until the data is fully synced to disk +/// before continuing. Note that this doesn't mean the database itself +/// won't be synchronized between readers/writers, but rather that the +/// data _on disk_ may not be immediately synchronized after a write. +/// +/// Something like: +/// ```rust,ignore +/// db.put("key", value); +/// db.get("key"); +/// ``` +/// will be fine, most likely pulling from memory instead of disk. +/// +/// # SOMEDAY +/// Dynamic sync's are not yet supported. +/// +/// Only: +/// +/// - [`SyncMode::Safe`] +/// - [`SyncMode::Async`] +/// - [`SyncMode::Fast`] +/// +/// are supported, all other variants will panic on [`crate::Env::open`]. +#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum SyncMode { + /// Use [`SyncMode::Fast`] until fully synced, + /// then use [`SyncMode::Safe`]. + /// + // # SOMEDAY: how to implement this? + // ref: <https://github.com/monero-project/monero/issues/1463> + // monerod-solution: <https://github.com/monero-project/monero/pull/1506> + // cuprate-issue: <https://github.com/Cuprate/cuprate/issues/78> + // + // We could: + // ```rust,ignore + // if current_db_block <= top_block.saturating_sub(N) { + // // don't sync() + // } else { + // // sync() + // } + // ``` + // where N is some threshold we pick that is _close_ enough + // to being synced where we want to start being safer. + // + // Essentially, when we are in a certain % range of being finished, + // switch to safe mode, until then, go fast. + FastThenSafe, + + #[default] + /// Fully sync to disk per transaction. + /// + /// Every database transaction commit will + /// fully sync all data to disk, _synchronously_, + /// so the database (writer) halts until synced. + /// + /// This is expected to be very slow. + /// + /// This matches: + /// - LMDB without any special sync flags + /// - [`redb::Durability::Immediate`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Immediate) + Safe, + + /// Asynchrously sync to disk per transaction. + /// + /// This is the same as [`SyncMode::Safe`], + /// but the syncs will be asynchronous, i.e. + /// each transaction commit will sync to disk, + /// but only eventually, not necessarily immediately. + /// + /// This matches: + /// - [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) + /// - [`redb::Durability::Eventual`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Eventual) + Async, + + /// Fully sync to disk after we cross this transaction threshold. + /// + /// After committing [`usize`] amount of database + /// transactions, it will be sync to disk. + /// + /// `0` behaves the same as [`SyncMode::Safe`], and a ridiculously large + /// number like `usize::MAX` is practically the same as [`SyncMode::Fast`]. + Threshold(usize), + + /// Only flush at database shutdown. + /// + /// This is the fastest, yet unsafest option. + /// + /// It will cause the database to never _actively_ sync, + /// letting the OS decide when to flush data to disk. + /// + /// This matches: + /// - [`MDB_NOSYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#ga5791dd1adb09123f82dd1f331209e12e) + [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) + /// - [`redb::Durability::None`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.None) + /// + /// `monerod` reference: <https://github.com/monero-project/monero/blob/7b7958bbd9d76375c47dc418b4adabba0f0b1785/src/blockchain_db/lmdb/db_lmdb.cpp#L1380-L1381> + /// + /// # Corruption + /// In the case of a system crash, the database + /// may become corrupted when using this option. + // + // FIXME: we could call this `unsafe` + // and use that terminology in the config file + // so users know exactly what they are getting + // themselves into. + Fast, +} diff --git a/storage/database/src/constants.rs b/storage/database/src/constants.rs new file mode 100644 index 00000000..903f5cd3 --- /dev/null +++ b/storage/database/src/constants.rs @@ -0,0 +1,74 @@ +//! General constants used throughout `cuprate-blockchain`. + +//---------------------------------------------------------------------------------------------------- Import +use cfg_if::cfg_if; + +//---------------------------------------------------------------------------------------------------- Error Messages +/// Corrupt database error message. +/// +/// The error message shown to end-users in panic +/// messages if we think the database is corrupted. +/// +/// This is meant to be user-friendly. +pub const DATABASE_CORRUPT_MSG: &str = r"Cuprate has encountered a fatal error. The database may be corrupted. + +TODO: instructions on: +1. What to do +2. How to fix (re-sync, recover, etc) +3. General advice for preventing corruption +4. etc"; + +//---------------------------------------------------------------------------------------------------- Misc +/// Static string of the `crate` being used as the database backend. +/// +/// | Backend | Value | +/// |---------|-------| +/// | `heed` | `"heed"` +/// | `redb` | `"redb"` +pub const DATABASE_BACKEND: &str = { + cfg_if! { + if #[cfg(all(feature = "redb", not(feature = "heed")))] { + "redb" + } else { + "heed" + } + } +}; + +/// Cuprate's database filename. +/// +/// Used in [`Config::db_file`](crate::config::Config::db_file). +/// +/// | Backend | Value | +/// |---------|-------| +/// | `heed` | `"data.mdb"` +/// | `redb` | `"data.redb"` +pub const DATABASE_DATA_FILENAME: &str = { + cfg_if! { + if #[cfg(all(feature = "redb", not(feature = "heed")))] { + "data.redb" + } else { + "data.mdb" + } + } +}; + +/// Cuprate's database lock filename. +/// +/// | Backend | Value | +/// |---------|-------| +/// | `heed` | `Some("lock.mdb")` +/// | `redb` | `None` (redb doesn't use a file lock) +pub const DATABASE_LOCK_FILENAME: Option<&str> = { + cfg_if! { + if #[cfg(all(feature = "redb", not(feature = "heed")))] { + None + } else { + Some("lock.mdb") + } + } +}; + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test {} diff --git a/storage/blockchain/src/database.rs b/storage/database/src/database.rs similarity index 100% rename from storage/blockchain/src/database.rs rename to storage/database/src/database.rs diff --git a/storage/blockchain/src/env.rs b/storage/database/src/env.rs similarity index 84% rename from storage/blockchain/src/env.rs rename to storage/database/src/env.rs index 3a32666b..8491f58c 100644 --- a/storage/blockchain/src/env.rs +++ b/storage/database/src/env.rs @@ -9,7 +9,6 @@ use crate::{ error::{InitError, RuntimeError}, resize::ResizeAlgorithm, table::Table, - tables::{call_fn_on_all_tables_or_early_return, TablesIter, TablesMut}, transaction::{TxRo, TxRw}, }; @@ -81,13 +80,13 @@ pub trait Env: Sized { /// Open the database environment, using the passed [`Config`]. /// /// # Invariants - /// This function **must** create all tables listed in [`crate::tables`]. + /// This function does not create any tables. /// - /// The rest of the functions depend on the fact - /// they already exist, or else they will panic. + /// You must create all possible tables with [`EnvInner::create_db`] + /// before attempting to open any. /// /// # Errors - /// This will error if the database could not be opened. + /// This will error if the database file could not be opened. /// /// This is the only [`Env`] function that will return /// an [`InitError`] instead of a [`RuntimeError`]. @@ -180,10 +179,14 @@ pub trait Env: Sized { macro_rules! doc_table_error { () => { r"# Errors -This will only return [`RuntimeError::Io`] if it errors. +This will only return [`RuntimeError::Io`] on normal errors. -As all tables are created upon [`Env::open`], -this function will never error because a table doesn't exist." +If the specified table is not created upon before this function is called, +this will return an error. + +Implementation detail you should NOT rely on: +- This only panics on `heed` +- `redb` will create the table if it does not exist" }; } @@ -196,6 +199,12 @@ this function will never error because a table doesn't exist." /// As noted in `Env::env_inner`, this is a `RwLockReadGuard` /// when using the `heed` backend, be aware of this and do /// not hold onto an `EnvInner` for a long time. +/// +/// # Tables +/// Note that when opening tables with [`EnvInner::open_db_ro`], +/// they must be created first or else it will return error. +/// +/// See [`EnvInner::open_db_rw`] and [`EnvInner::create_db`] for creating tables. pub trait EnvInner<'env, Ro, Rw> where Self: 'env, @@ -229,7 +238,11 @@ where /// // (name, key/value type) /// ``` /// - #[doc = doc_table_error!()] + /// # Errors + /// This will only return [`RuntimeError::Io`] on normal errors. + /// + /// If the specified table is not created upon before this function is called, + /// this will return [`RuntimeError::TableNotFound`]. fn open_db_ro<T: Table>( &self, tx_ro: &Ro, @@ -246,32 +259,22 @@ where /// This will open the database [`Table`] /// passed as a generic to this function. /// - #[doc = doc_table_error!()] + /// # Errors + /// This will only return [`RuntimeError::Io`] on errors. + /// + /// Implementation details: Both `heed` & `redb` backends create + /// the table with this function if it does not already exist. For safety and + /// clear intent, you should still consider using [`EnvInner::create_db`] instead. fn open_db_rw<T: Table>(&self, tx_rw: &Rw) -> Result<impl DatabaseRw<T>, RuntimeError>; - /// Open all tables in read/iter mode. + /// Create a database table. /// - /// This calls [`EnvInner::open_db_ro`] on all database tables - /// and returns a structure that allows access to all tables. + /// This will create the database [`Table`] + /// passed as a generic to this function. /// - #[doc = doc_table_error!()] - fn open_tables(&self, tx_ro: &Ro) -> Result<impl TablesIter, RuntimeError> { - call_fn_on_all_tables_or_early_return! { - Self::open_db_ro(self, tx_ro) - } - } - - /// Open all tables in read-write mode. - /// - /// This calls [`EnvInner::open_db_rw`] on all database tables - /// and returns a structure that allows access to all tables. - /// - #[doc = doc_table_error!()] - fn open_tables_mut(&self, tx_rw: &Rw) -> Result<impl TablesMut, RuntimeError> { - call_fn_on_all_tables_or_early_return! { - Self::open_db_rw(self, tx_rw) - } - } + /// # Errors + /// This will only return [`RuntimeError::Io`] on errors. + fn create_db<T: Table>(&self, tx_rw: &Rw) -> Result<(), RuntimeError>; /// Clear all `(key, value)`'s from a database table. /// diff --git a/storage/blockchain/src/error.rs b/storage/database/src/error.rs similarity index 94% rename from storage/blockchain/src/error.rs rename to storage/database/src/error.rs index 6112d92f..386091d9 100644 --- a/storage/blockchain/src/error.rs +++ b/storage/database/src/error.rs @@ -66,7 +66,7 @@ pub enum InitError { /// 2. (De)serialization /// 3. Shutdown errors /// -/// as `cuprate_blockchain` upholds the invariant that: +/// as `cuprate_database` upholds the invariant that: /// /// 1. All tables exist /// 2. (De)serialization never fails @@ -88,6 +88,10 @@ pub enum RuntimeError { #[error("database memory map must be resized")] ResizeNeeded, + /// The given table did not exist in the database. + #[error("database table did not exist")] + TableNotFound, + /// A [`std::io::Error`]. #[error("I/O error: {0}")] Io(#[from] std::io::Error), diff --git a/storage/blockchain/src/key.rs b/storage/database/src/key.rs similarity index 97% rename from storage/blockchain/src/key.rs rename to storage/database/src/key.rs index daafc6bb..13f7cede 100644 --- a/storage/blockchain/src/key.rs +++ b/storage/database/src/key.rs @@ -23,7 +23,7 @@ pub trait Key: Storable + Sized { /// not a comparison of the key's value. /// /// ```rust - /// # use cuprate_blockchain::*; + /// # use cuprate_database::*; /// assert_eq!( /// <u64 as Key>::compare([0].as_slice(), [1].as_slice()), /// std::cmp::Ordering::Less, diff --git a/storage/database/src/lib.rs b/storage/database/src/lib.rs index 8b137891..1e15b584 100644 --- a/storage/database/src/lib.rs +++ b/storage/database/src/lib.rs @@ -1 +1,152 @@ +#![doc = include_str!("../README.md")] +//---------------------------------------------------------------------------------------------------- Lints +// Forbid lints. +// Our code, and code generated (e.g macros) cannot overrule these. +#![forbid( + // `unsafe` is allowed but it _must_ be + // commented with `SAFETY: reason`. + clippy::undocumented_unsafe_blocks, + // Never. + unused_unsafe, + redundant_semicolons, + unused_allocation, + coherence_leak_check, + while_true, + clippy::missing_docs_in_private_items, + + // Maybe can be put into `#[deny]`. + unconditional_recursion, + for_loops_over_fallibles, + unused_braces, + unused_labels, + keyword_idents, + non_ascii_idents, + variant_size_differences, + single_use_lifetimes, + + // Probably can be put into `#[deny]`. + future_incompatible, + let_underscore, + break_with_label_and_loop, + duplicate_macro_attributes, + exported_private_dependencies, + large_assignments, + overlapping_range_endpoints, + semicolon_in_expressions_from_macros, + noop_method_call, + unreachable_pub, +)] +// Deny lints. +// Some of these are `#[allow]`'ed on a per-case basis. +#![deny( + clippy::all, + clippy::correctness, + clippy::suspicious, + clippy::style, + clippy::complexity, + clippy::perf, + clippy::pedantic, + clippy::nursery, + clippy::cargo, + unused_crate_dependencies, + unused_doc_comments, + unused_mut, + missing_docs, + deprecated, + unused_comparisons, + nonstandard_style +)] +#![allow( + // FIXME: this lint affects crates outside of + // `database/` for some reason, allow for now. + clippy::cargo_common_metadata, + + // FIXME: adding `#[must_use]` onto everything + // might just be more annoying than useful... + // although it is sometimes nice. + clippy::must_use_candidate, + + // FIXME: good lint but too many false positives + // with our `Env` + `RwLock` setup. + clippy::significant_drop_tightening, + + // FIXME: good lint but is less clear in most cases. + clippy::items_after_statements, + + clippy::module_name_repetitions, + clippy::module_inception, + clippy::redundant_pub_crate, + clippy::option_if_let_else, +)] +// Allow some lints when running in debug mode. +#![cfg_attr( + debug_assertions, + allow( + clippy::todo, + clippy::multiple_crate_versions, + // unused_crate_dependencies, + ) +)] +// Allow some lints in tests. +#![cfg_attr( + test, + allow( + clippy::cognitive_complexity, + clippy::needless_pass_by_value, + clippy::cast_possible_truncation, + clippy::too_many_lines + ) +)] + +//---------------------------------------------------------------------------------------------------- Public API +// Import private modules, export public types. +// +// Documentation for each module is located in the respective file. + +mod backend; +pub use backend::ConcreteEnv; + +pub mod config; + +mod constants; +pub use constants::{ + DATABASE_BACKEND, DATABASE_CORRUPT_MSG, DATABASE_DATA_FILENAME, DATABASE_LOCK_FILENAME, +}; + +mod database; +pub use database::{DatabaseIter, DatabaseRo, DatabaseRw}; + +mod env; +pub use env::{Env, EnvInner}; + +mod error; +pub use error::{InitError, RuntimeError}; + +pub mod resize; + +mod key; +pub use key::Key; + +mod storable; +pub use storable::{Storable, StorableBytes, StorableVec}; + +mod table; +pub use table::Table; + +mod transaction; +pub use transaction::{TxRo, TxRw}; + +//---------------------------------------------------------------------------------------------------- Private +#[cfg(test)] +pub(crate) mod tests; + +//---------------------------------------------------------------------------------------------------- +// HACK: needed to satisfy the `unused_crate_dependencies` lint. +cfg_if::cfg_if! { + if #[cfg(feature = "redb")] { + use redb as _; + } else { + use heed as _; + } +} diff --git a/storage/blockchain/src/resize.rs b/storage/database/src/resize.rs similarity index 96% rename from storage/blockchain/src/resize.rs rename to storage/database/src/resize.rs index 488325be..99d6d7e3 100644 --- a/storage/blockchain/src/resize.rs +++ b/storage/database/src/resize.rs @@ -50,7 +50,7 @@ impl ResizeAlgorithm { /// Returns [`Self::Monero`]. /// /// ```rust - /// # use cuprate_blockchain::resize::*; + /// # use cuprate_database::resize::*; /// assert!(matches!(ResizeAlgorithm::new(), ResizeAlgorithm::Monero)); /// ``` #[inline] @@ -75,7 +75,7 @@ impl Default for ResizeAlgorithm { /// Calls [`Self::new`]. /// /// ```rust - /// # use cuprate_blockchain::resize::*; + /// # use cuprate_database::resize::*; /// assert_eq!(ResizeAlgorithm::new(), ResizeAlgorithm::default()); /// ``` #[inline] @@ -113,7 +113,7 @@ pub fn page_size() -> NonZeroUsize { /// [^2]: `1_073_745_920` /// /// ```rust -/// # use cuprate_blockchain::resize::*; +/// # use cuprate_database::resize::*; /// // The value this function will increment by /// // (assuming page multiple of 4096). /// const N: usize = 1_073_741_824; @@ -129,7 +129,7 @@ pub fn page_size() -> NonZeroUsize { /// This function will panic if adding onto `current_size_bytes` overflows [`usize::MAX`]. /// /// ```rust,should_panic -/// # use cuprate_blockchain::resize::*; +/// # use cuprate_database::resize::*; /// // Ridiculous large numbers panic. /// monero(usize::MAX); /// ``` @@ -166,7 +166,7 @@ pub fn monero(current_size_bytes: usize) -> NonZeroUsize { /// and then round up to nearest OS page size. /// /// ```rust -/// # use cuprate_blockchain::resize::*; +/// # use cuprate_database::resize::*; /// let page_size: usize = page_size().get(); /// /// // Anything below the page size will round up to the page size. @@ -185,7 +185,7 @@ pub fn monero(current_size_bytes: usize) -> NonZeroUsize { /// This function will panic if adding onto `current_size_bytes` overflows [`usize::MAX`]. /// /// ```rust,should_panic -/// # use cuprate_blockchain::resize::*; +/// # use cuprate_database::resize::*; /// // Ridiculous large numbers panic. /// fixed_bytes(1, usize::MAX); /// ``` @@ -221,7 +221,7 @@ pub fn fixed_bytes(current_size_bytes: usize, add_bytes: usize) -> NonZeroUsize /// (rounded up to the OS page size). /// /// ```rust -/// # use cuprate_blockchain::resize::*; +/// # use cuprate_database::resize::*; /// let page_size: usize = page_size().get(); /// /// // Anything below the page size will round up to the page size. @@ -247,7 +247,7 @@ pub fn fixed_bytes(current_size_bytes: usize, add_bytes: usize) -> NonZeroUsize /// is closer to [`usize::MAX`] than the OS page size. /// /// ```rust,should_panic -/// # use cuprate_blockchain::resize::*; +/// # use cuprate_database::resize::*; /// // Ridiculous large numbers panic. /// percent(usize::MAX, 1.001); /// ``` diff --git a/storage/blockchain/src/storable.rs b/storage/database/src/storable.rs similarity index 96% rename from storage/blockchain/src/storable.rs rename to storage/database/src/storable.rs index 80d010c6..b5fa2f8a 100644 --- a/storage/blockchain/src/storable.rs +++ b/storage/database/src/storable.rs @@ -22,14 +22,10 @@ use bytes::Bytes; /// /// will automatically implement [`Storable`]. /// -/// This includes: -/// - Most primitive types -/// - All types in [`tables`](crate::tables) -/// /// See [`StorableVec`] & [`StorableBytes`] for storing slices of `T: Storable`. /// /// ```rust -/// # use cuprate_blockchain::*; +/// # use cuprate_database::*; /// # use std::borrow::*; /// let number: u64 = 0; /// @@ -77,7 +73,7 @@ pub trait Storable: Debug { /// /// # Examples /// ```rust - /// # use cuprate_blockchain::*; + /// # use cuprate_database::*; /// assert_eq!(<()>::BYTE_LENGTH, Some(0)); /// assert_eq!(u8::BYTE_LENGTH, Some(1)); /// assert_eq!(u16::BYTE_LENGTH, Some(2)); @@ -99,7 +95,7 @@ pub trait Storable: Debug { /// /// # Blanket implementation /// The blanket implementation that covers all types used - /// by `cuprate_blockchain` will simply bitwise copy `bytes` + /// by `cuprate_database` will simply bitwise copy `bytes` /// into `Self`. /// /// The bytes do not have be correctly aligned. @@ -136,7 +132,7 @@ where /// /// # Example /// ```rust -/// # use cuprate_blockchain::*; +/// # use cuprate_database::*; /// //---------------------------------------------------- u8 /// let vec: StorableVec<u8> = StorableVec(vec![0,1]); /// @@ -202,7 +198,7 @@ impl<T> Borrow<[T]> for StorableVec<T> { /// A [`Storable`] version of [`Bytes`]. /// /// ```rust -/// # use cuprate_blockchain::*; +/// # use cuprate_database::*; /// # use bytes::Bytes; /// let bytes: StorableBytes = StorableBytes(Bytes::from_static(&[0,1])); /// diff --git a/storage/blockchain/src/table.rs b/storage/database/src/table.rs similarity index 70% rename from storage/blockchain/src/table.rs rename to storage/database/src/table.rs index 966a9873..56e84ddd 100644 --- a/storage/blockchain/src/table.rs +++ b/storage/database/src/table.rs @@ -8,12 +8,7 @@ use crate::{key::Key, storable::Storable}; /// Database table metadata. /// /// Purely compile time information for database tables. -/// -/// ## Sealed -/// This trait is [`Sealed`](https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed). -/// -/// It is only implemented on the types inside [`tables`][crate::tables]. -pub trait Table: crate::tables::private::Sealed + 'static { +pub trait Table: 'static { /// Name of the database table. const NAME: &'static str; diff --git a/storage/database/src/tests.rs b/storage/database/src/tests.rs new file mode 100644 index 00000000..81561073 --- /dev/null +++ b/storage/database/src/tests.rs @@ -0,0 +1,35 @@ +//! Utilities for `cuprate_database` testing. +//! +//! These types/fn's are only: +//! - enabled on #[cfg(test)] +//! - only used internally + +//---------------------------------------------------------------------------------------------------- Import +use std::borrow::Cow; + +use crate::{config::ConfigBuilder, table::Table, ConcreteEnv, Env}; + +//---------------------------------------------------------------------------------------------------- struct +/// A test table. +pub(crate) struct TestTable; + +impl Table for TestTable { + const NAME: &'static str = "test_table"; + type Key = u8; + type Value = u64; +} + +//---------------------------------------------------------------------------------------------------- fn +/// Create an `Env` in a temporarily directory. +/// The directory is automatically removed after the `TempDir` is dropped. +/// +/// FIXME: changing this to `-> impl Env` causes lifetime errors... +pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) { + let tempdir = tempfile::tempdir().unwrap(); + let config = ConfigBuilder::new(Cow::Owned(tempdir.path().into())) + .low_power() + .build(); + let env = ConcreteEnv::open(config).unwrap(); + + (env, tempdir) +} diff --git a/storage/blockchain/src/transaction.rs b/storage/database/src/transaction.rs similarity index 100% rename from storage/blockchain/src/transaction.rs rename to storage/database/src/transaction.rs From 7e9891de5b54f9caf11fd1ed57a5c45daeb57a11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Jun 2024 22:59:11 +0100 Subject: [PATCH 11/11] build(deps): bump curve25519-dalek from 4.1.2 to 4.1.3 (#197) Bumps [curve25519-dalek](https://github.com/dalek-cryptography/curve25519-dalek) from 4.1.2 to 4.1.3. - [Release notes](https://github.com/dalek-cryptography/curve25519-dalek/releases) - [Commits](https://github.com/dalek-cryptography/curve25519-dalek/compare/curve25519-4.1.2...curve25519-4.1.3) --- updated-dependencies: - dependency-name: curve25519-dalek dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 11 ++--------- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8e6734d7..68ccc3ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -808,9 +808,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", @@ -818,7 +818,6 @@ dependencies = [ "digest", "fiat-crypto", "group", - "platforms", "rand_core", "rustc_version", "subtle", @@ -1911,12 +1910,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "platforms" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" - [[package]] name = "ppv-lite86" version = "0.2.17" diff --git a/Cargo.toml b/Cargo.toml index 35aabc5b..b00a4b98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ clap = { version = "4.4.7", default-features = false } chrono = { version = "0.4.31", default-features = false } crypto-bigint = { version = "0.5.5", default-features = false } crossbeam = { version = "0.8.4", default-features = false } -curve25519-dalek = { version = "4.1.1", default-features = false } +curve25519-dalek = { version = "4.1.3", default-features = false } dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false } dashmap = { version = "5.5.3", default-features = false } dirs = { version = "5.0.1", default-features = false }