Merge branch 'main' into tracing

This commit is contained in:
hinto.janai 2024-05-17 19:56:10 -04:00
commit 112f4c18e4
No known key found for this signature in database
GPG key ID: D47CE05FA175A499
32 changed files with 2553 additions and 2670 deletions

View file

@ -8,6 +8,7 @@ on:
workflow_dispatch: workflow_dispatch:
env: env:
# Show colored output in CI.
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
# Show full panics. # Show full panics.
RUST_BACKTRACE: "full" RUST_BACKTRACE: "full"
@ -15,6 +16,8 @@ env:
RUST_MIN_STACK: 8000000 RUST_MIN_STACK: 8000000
# Fail on documentation warnings. # Fail on documentation warnings.
RUSTDOCFLAGS: '-D warnings' RUSTDOCFLAGS: '-D warnings'
# Enable debug information generation for build dependencies.
CARGO_PROFILE_DEV_BUILD_OVERRIDE_DEBUG: true
jobs: jobs:
# Run format separately. # Run format separately.
@ -53,6 +56,8 @@ jobs:
include: include:
- os: windows-latest - os: windows-latest
shell: msys2 {0} shell: msys2 {0}
# GNU Windows is used as we need
# `unistd.h` and more in `cryptonight/`.
rust: stable-x86_64-pc-windows-gnu rust: stable-x86_64-pc-windows-gnu
- os: macos-latest - os: macos-latest
shell: bash shell: bash
@ -105,6 +110,19 @@ jobs:
update: true update: true
install: mingw-w64-x86_64-toolchain mingw-w64-x86_64-boost msys2-runtime-devel git mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja install: mingw-w64-x86_64-toolchain mingw-w64-x86_64-boost msys2-runtime-devel git mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja
# HACK: 2024-05-14
# GCC 14+ fails to build `lmdb-master-sys` with no clear error message:
# <https://github.com/Cuprate/cuprate/pull/127>
#
# - MSYS2 repos carry older versions of packages
# - pacman lets us manually downgrade from package files
# - Note that `gcc` requires `gcc-libs`
- name: Downgrade to GCC 13.2 (Windows)
if: matrix.os == 'windows-latest'
run: |
wget https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
pacman -U --noconfirm mingw-w64-x86_64-gcc-13.2.0-6-any.pkg.tar.zst mingw-w64-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
- name: Documentation - name: Documentation
run: cargo doc --workspace --all-features --no-deps run: cargo doc --workspace --all-features --no-deps

57
Cargo.lock generated
View file

@ -577,6 +577,35 @@ dependencies = [
"windows", "windows",
] ]
[[package]]
name = "cuprate-p2p"
version = "0.1.0"
dependencies = [
"bytes",
"cuprate-helper",
"cuprate-test-utils",
"dashmap",
"fixed-bytes",
"futures",
"hex",
"indexmap 2.2.6",
"monero-address-book",
"monero-p2p",
"monero-pruning",
"monero-serai",
"monero-wire",
"pin-project",
"rand",
"rand_distr",
"rayon",
"thiserror",
"tokio",
"tokio-stream",
"tokio-util",
"tower",
"tracing",
]
[[package]] [[package]]
name = "cuprate-test-utils" name = "cuprate-test-utils"
version = "0.1.0" version = "0.1.0"
@ -657,6 +686,34 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "dandelion_tower"
version = "0.1.0"
dependencies = [
"futures",
"proptest",
"rand",
"rand_distr",
"thiserror",
"tokio",
"tokio-util",
"tower",
"tracing",
]
[[package]]
name = "dashmap"
version = "5.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
dependencies = [
"cfg-if",
"hashbrown 0.14.5",
"lock_api",
"once_cell",
"parking_lot_core",
]
[[package]] [[package]]
name = "diff" name = "diff"
version = "0.1.13" version = "0.1.13"

View file

@ -11,6 +11,8 @@ members = [
"net/fixed-bytes", "net/fixed-bytes",
"net/levin", "net/levin",
"net/monero-wire", "net/monero-wire",
"p2p/cuprate-p2p",
"p2p/dandelion",
"p2p/monero-p2p", "p2p/monero-p2p",
"p2p/address-book", "p2p/address-book",
"pruning", "pruning",
@ -49,6 +51,7 @@ crypto-bigint = { version = "0.5.5", default-features = false }
crossbeam = { version = "0.8.4", default-features = false } crossbeam = { version = "0.8.4", default-features = false }
curve25519-dalek = { version = "4.1.1", default-features = false } curve25519-dalek = { version = "4.1.1", default-features = false }
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false } dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
dashmap = { version = "5.5.3", default-features = false }
dirs = { version = "5.0.1", default-features = false } dirs = { version = "5.0.1", default-features = false }
futures = { version = "0.3.29", default-features = false } futures = { version = "0.3.29", default-features = false }
hex = { version = "0.4.3", default-features = false } hex = { version = "0.4.3", default-features = false }
@ -60,6 +63,7 @@ paste = { version = "1.0.14", default-features = false }
pin-project = { version = "1.1.3", default-features = false } pin-project = { version = "1.1.3", default-features = false }
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false } randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
rand = { version = "0.8.5", default-features = false } rand = { version = "0.8.5", default-features = false }
rand_distr = { version = "0.4.3", default-features = false }
rayon = { version = "1.9.0", default-features = false } rayon = { version = "1.9.0", default-features = false }
serde_bytes = { version = "0.11.12", default-features = false } serde_bytes = { version = "0.11.12", default-features = false }
serde_json = { version = "1.0.108", default-features = false } serde_json = { version = "1.0.108", default-features = false }

View file

@ -2,11 +2,11 @@
Cuprate's database implementation. Cuprate's database implementation.
- [1. Documentation](#1-documentation) - [1. Documentation](#1-documentation)
- [2. File Structure](#2-file-structure) - [2. File structure](#2-file-structure)
- [2.1 `src/`](#21-src) - [2.1 `src/`](#21-src)
- [2.2 `src/backend/`](#22-srcbackend) - [2.2 `src/backend/`](#22-srcbackend)
- [2.3 `src/config`](#23-srcconfig) - [2.3 `src/config/`](#23-srcconfig)
- [2.4 `src/ops`](#24-srcops) - [2.4 `src/ops/`](#24-srcops)
- [2.5 `src/service/`](#25-srcservice) - [2.5 `src/service/`](#25-srcservice)
- [3. Backends](#3-backends) - [3. Backends](#3-backends)
- [3.1 heed](#31-heed) - [3.1 heed](#31-heed)
@ -18,12 +18,26 @@ Cuprate's database implementation.
- [4.1 Backend](#41-backend) - [4.1 Backend](#41-backend)
- [4.2 Trait](#42-trait) - [4.2 Trait](#42-trait)
- [4.3 ConcreteEnv](#43-concreteenv) - [4.3 ConcreteEnv](#43-concreteenv)
- [4.4 `ops`](#44-ops) - [4.4 ops](#44-ops)
- [4.5 `service`](#45-service) - [4.5 service](#45-service)
- [5. Syncing](#5-Syncing) - [5. The service](#5-the-service)
- [6. Thread model](#6-thread-model) - [5.1 Initialization](#51-initialization)
- [5.2 Requests](#53-requests)
- [5.3 Responses](#54-responses)
- [5.4 Thread model](#52-thread-model)
- [5.5 Shutdown](#55-shutdown)
- [6. Syncing](#6-Syncing)
- [7. Resizing](#7-resizing) - [7. Resizing](#7-resizing)
- [8. (De)serialization](#8-deserialization) - [8. (De)serialization](#8-deserialization)
- [9. Schema](#9-schema)
- [9.1 Tables](#91-tables)
- [9.2 Multimap tables](#92-multimap-tables)
- [10. Known issues and tradeoffs](#10-known-issues-and-tradeoffs)
- [10.1 Traits abstracting backends](#101-traits-abstracting-backends)
- [10.2 Hot-swappable backends](#102-hot-swappable-backends)
- [10.3 Copying unaligned bytes](#103-copying-unaligned-bytes)
- [10.4 Endianness](#104-endianness)
- [10.5 Extra table data](#105-extra-table-data)
--- ---
@ -36,7 +50,7 @@ Documentation for `database/` is split into 3 locations:
| `cuprate-database` | Practical usage documentation/warnings/notes/etc | `cuprate-database` | Practical usage documentation/warnings/notes/etc
| Source file `// comments` | Implementation-specific details (e.g, how many reader threads to spawn?) | Source file `// comments` | Implementation-specific details (e.g, how many reader threads to spawn?)
This README serves as the overview/design document. This README serves as the implementation design document.
For actual practical usage, `cuprate-database`'s types and general usage are documented via standard Rust tooling. For actual practical usage, `cuprate-database`'s types and general usage are documented via standard Rust tooling.
@ -60,7 +74,7 @@ The code within `src/` is also littered with some `grep`-able comments containin
| `TODO` | This must be implemented; There should be 0 of these in production code | `TODO` | This must be implemented; There should be 0 of these in production code
| `SOMEDAY` | This should be implemented... someday | `SOMEDAY` | This should be implemented... someday
## 2. File Structure ## 2. File structure
A quick reference of the structure of the folders & files in `cuprate-database`. A quick reference of the structure of the folders & files in `cuprate-database`.
Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`. Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`.
@ -150,11 +164,7 @@ The `async`hronous request/response API other Cuprate crates use instead of mana
Each database's implementation for those `trait`'s are located in its respective folder in `src/backend/${DATABASE_NAME}/`. Each database's implementation for those `trait`'s are located in its respective folder in `src/backend/${DATABASE_NAME}/`.
### 3.1 heed ### 3.1 heed
The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB). The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB). The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used. `LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically.
The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used.
`LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically.
`heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are: `heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are:
@ -164,8 +174,8 @@ The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used
| `lock.mdb` | Database lock file | `lock.mdb` | Database lock file
`heed`-specific notes: `heed`-specific notes:
- [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for. - [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for
- [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129). - [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129)
### 3.2 redb ### 3.2 redb
The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb). The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb).
@ -181,7 +191,7 @@ The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used
<!-- TODO: document DB on remote filesystem (does redb allow this?) --> <!-- TODO: document DB on remote filesystem (does redb allow this?) -->
### 3.3 redb-memory ### 3.3 redb-memory
This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a key-value store that completely resides in memory instead of a file. This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a database that completely resides in memory instead of a file.
All other details about this should be the same as the normal `redb` backend. All other details about this should be the same as the normal `redb` backend.
@ -193,20 +203,20 @@ The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/s
As such, it is not implemented. As such, it is not implemented.
### 3.5 MDBX ### 3.5 MDBX
[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement duplicate tables. It is also quite similar to the main backend LMDB (of which it was originally a fork of). [`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement [`9.2 Multimap tables`](#92-multimap-tables). It is also quite similar to the main backend LMDB (of which it was originally a fork of).
As such, it is not implemented (yet). As such, it is not implemented (yet).
## 4. Layers ## 4. Layers
`cuprate_database` is logically abstracted into 5 layers, starting from the lowest: `cuprate_database` is logically abstracted into 5 layers, with each layer being built upon the last.
Starting from the lowest:
1. Backend 1. Backend
2. Trait 2. Trait
3. ConcreteEnv 3. ConcreteEnv
4. `ops` 4. `ops`
5. `service` 5. `service`
Each layer is built upon the last.
<!-- TODO: insert image here after database/ split --> <!-- TODO: insert image here after database/ split -->
### 4.1 Backend ### 4.1 Backend
@ -249,49 +259,78 @@ The equivalent objects in the backends themselves are:
- [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html) - [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html)
- [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html) - [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html)
This is the main object used when handling the database directly, although that is not strictly necessary as a user if the `service` layer is used. This is the main object used when handling the database directly, although that is not strictly necessary as a user if the [`4.5 service`](#45-service) layer is used.
### 4.4 `ops` ### 4.4 ops
These are Monero-specific functions that use the abstracted `trait` forms of the database. These are Monero-specific functions that use the abstracted `trait` forms of the database.
Instead of dealing with the database directly (`get()`, `delete()`), the `ops` layer provides more abstract functions that deal with commonly used Monero operations (`add_block()`, `pop_block()`). Instead of dealing with the database directly:
- `get()`
- `delete()`
### 4.5 `service` the `ops` layer provides more abstract functions that deal with commonly used Monero operations:
The final layer abstracts the database completely into a [Monero-specific `async` request/response API](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/types/src/service.rs#L18-L78), using [`tower::Service`](https://docs.rs/tower/latest/tower/trait.Service.html). - `add_block()`
- `pop_block()`
It handles the database using a separate writer thread & reader thread-pool, and uses the previously mentioned `ops` functions when responding to requests. ### 4.5 service
The final layer abstracts the database completely into a [Monero-specific `async` request/response API](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/types/src/service.rs#L18-L78) using [`tower::Service`](https://docs.rs/tower/latest/tower/trait.Service.html).
Instead of handling the database directly, this layer provides read/write handles that allow: For more information on this layer, see the next section: [`5. The service`](#5-the-service).
- Sending requests for data (e.g. Outputs)
- Receiving responses
For more information on the backing thread-pool, see [`Thread model`](#6-thread-model). ## 5. The service
The main API `cuprate_database` exposes for other crates to use is the `cuprate_database::service` module.
## 5. Syncing This module exposes an `async` request/response API with `tower::Service`, backed by a threadpool, that allows reading/writing Monero-related data from/to the database.
`cuprate_database`'s database has 5 disk syncing modes.
1. FastThenSafe `cuprate_database::service` itself manages the database using a separate writer thread & reader thread-pool, and uses the previously mentioned [`4.4 ops`](#44-ops) functions when responding to requests.
1. Safe
1. Async
1. Threshold
1. Fast
The default mode is `Safe`. ### 5.1 Initialization
The service is started simply by calling: [`cuprate_database::service::init()`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/service/free.rs#L23).
This means that upon each transaction commit, all the data that was written will be fully synced to disk. This is the slowest, but safest mode of operation. This function initializes the database, spawns threads, and returns a:
- Read handle to the database (cloneable)
- Write handle to the database (not cloneable)
Note that upon any database `Drop`, whether via `service` or dropping the database directly, the current implementation will sync to disk regardless of any configuration. These "handles" implement the `tower::Service` trait, which allows sending requests and receiving responses `async`hronously.
For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144). ### 5.2 Requests
Along with the 2 handles, there are 2 types of requests:
- [`ReadRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L23-L90)
- [`WriteRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L93-L105)
## 6. Thread model `ReadRequest` is for retrieving various types of information from the database.
As noted in the [`Layers`](#layers) section, the base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread.
However, the actual API `cuprate_database` exposes for practical usage for the main `cuprated` binary (and other `async` use-cases) is the asynchronous `service` API, which _does_ have a thread model backing it. `WriteRequest` currently only has 1 variant: to write a block to the database.
As such, when [`cuprate_database::service`'s initialization function](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33-L44) is called, threads will be spawned and maintained until the user drops (disconnects) the returned handles. ### 5.3 Responses
After sending one of the above requests using the read/write handle, the value returned is _not_ the response, yet an `async`hronous channel that will eventually return the response:
```rust,ignore
// Send a request.
// tower::Service::call()
// V
let response_channel: Channel = read_handle.call(ReadResponse::ChainHeight)?;
The current behavior is: // Await the response.
let response: ReadResponse = response_channel.await?;
// Assert the response is what we expected.
assert_eq!(matches!(response), Response::ChainHeight(_));
```
After `await`ing the returned channel, a `Response` will eventually be returned when the `service` threadpool has fetched the value from the database and sent it off.
Both read/write requests variants match in name with `Response` variants, i.e.
- `ReadRequest::ChainHeight` leads to `Response::ChainHeight`
- `WriteRequest::WriteBlock` leads to `Response::WriteBlockOk`
### 5.4 Thread model
As mentioned in the [`4. Layers`](#4-layers) section, the base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread.
However, the `cuprate_database::service` API, _does_ have a thread model backing it.
When [`cuprate_database::service`'s initialization function](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33-L44) is called, threads will be spawned and maintained until the user drops (disconnects) the returned handles.
The current behavior for thread count is:
- [1 writer thread](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/write.rs#L52-L66) - [1 writer thread](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/write.rs#L52-L66)
- [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L104-L126) - [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L104-L126)
@ -307,7 +346,27 @@ The reader threads are managed by [`rayon`](https://docs.rs/rayon).
For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_database` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L490-L503). For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_database` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L490-L503).
Once the [handles](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33) to these threads are `Drop`ed, the backing thread(pool) will gracefully exit, automatically. ### 5.5 Shutdown
Once the read/write handles are `Drop`ed, the backing thread(pool) will gracefully exit, automatically.
Note the writer thread and reader threadpool aren't connected whatsoever; dropping the write handle will make the writer thread exit, however, the reader handle is free to be held onto and can be continued to be read from - and vice-versa for the write handle.
## 6. Syncing
`cuprate_database`'s database has 5 disk syncing modes.
1. FastThenSafe
1. Safe
1. Async
1. Threshold
1. Fast
The default mode is `Safe`.
This means that upon each transaction commit, all the data that was written will be fully synced to disk. This is the slowest, but safest mode of operation.
Note that upon any database `Drop`, whether via `service` or dropping the database directly, the current implementation will sync to disk regardless of any configuration.
For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144).
## 7. Resizing ## 7. Resizing
Database backends that require manually resizing will, by default, use a similar algorithm as `monerod`'s. Database backends that require manually resizing will, by default, use a similar algorithm as `monerod`'s.
@ -327,6 +386,8 @@ All types stored inside the database are either bytes already, or are perfectly
As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions. As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions.
The size & layout of types is stable across compiler versions, as they are set and determined with [`#[repr(C)]`](https://doc.rust-lang.org/nomicon/other-reprs.html#reprc) and `bytemuck`'s derive macros such as [`bytemuck::Pod`](https://docs.rs/bytemuck/latest/bytemuck/derive.Pod.html).
Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type. Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type.
The main deserialization `trait` for database storage is: [`cuprate_database::Storable`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L16-L115). The main deserialization `trait` for database storage is: [`cuprate_database::Storable`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L16-L115).
@ -362,3 +423,176 @@ Compatibility structs also exist for any `Storable` containers:
- [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241) - [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241)
Again, it's unfortunate that these must be owned, although in `service`'s use-case, they would have to be owned anyway. Again, it's unfortunate that these must be owned, although in `service`'s use-case, they would have to be owned anyway.
## 9. Schema
This following section contains Cuprate's database schema, it may change throughout the development of Cuprate, as such, nothing here is final.
### 9.1 Tables
The `CamelCase` names of the table headers documented here (e.g. `TxIds`) are the actual type name of the table within `cuprate_database`.
Note that words written within `code blocks` mean that it is a real type defined and usable within `cuprate_database`. Other standard types like u64 and type aliases (TxId) are written normally.
Within `cuprate_database::tables`, the below table is essentially defined as-is with [a macro](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/tables.rs#L369-L470).
Many of the data types stored are the same data types, although are different semantically, as such, a map of aliases used and their real data types is also provided below.
| Alias | Real Type |
|----------------------------------------------------|-----------|
| BlockHeight, Amount, AmountIndex, TxId, UnlockTime | u64
| BlockHash, KeyImage, TxHash, PrunableHash | [u8; 32]
| Table | Key | Value | Description |
|-------------------|----------------------|--------------------|-------------|
| `BlockBlobs` | BlockHeight | `StorableVec<u8>` | Maps a block's height to a serialized byte form of a block
| `BlockHeights` | BlockHash | BlockHeight | Maps a block's hash to its height
| `BlockInfos` | BlockHeight | `BlockInfo` | Contains metadata of all blocks
| `KeyImages` | KeyImage | () | This table is a set with no value, it stores transaction key images
| `NumOutputs` | Amount | u64 | Maps an output's amount to the number of outputs with that amount
| `Outputs` | `PreRctOutputId` | `Output` | This table contains legacy CryptoNote outputs which have clear amounts. This table will not contain an output with 0 amount.
| `PrunedTxBlobs` | TxId | `StorableVec<u8>` | Contains pruned transaction blobs (even if the database is not pruned)
| `PrunableTxBlobs` | TxId | `StorableVec<u8>` | Contains the prunable part of a transaction
| `PrunableHashes` | TxId | PrunableHash | Contains the hash of the prunable part of a transaction
| `RctOutputs` | AmountIndex | `RctOutput` | Contains RingCT outputs mapped from their global RCT index
| `TxBlobs` | TxId | `StorableVec<u8>` | Serialized transaction blobs (bytes)
| `TxIds` | TxHash | TxId | Maps a transaction's hash to its index/ID
| `TxHeights` | TxId | BlockHeight | Maps a transaction's ID to the height of the block it comes from
| `TxOutputs` | TxId | `StorableVec<u64>` | Gives the amount indices of a transaction's outputs
| `TxUnlockTime` | TxId | UnlockTime | Stores the unlock time of a transaction (only if it has a non-zero lock time)
The definitions for aliases and types (e.g. `RctOutput`) are within the [`cuprate_database::types`](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/types.rs#L51) module.
<!-- TODO(Boog900): We could split this table again into `RingCT (non-miner) Outputs` and `RingCT (miner) Outputs` as for miner outputs we can store the amount instead of commitment saving 24 bytes per miner output. -->
### 9.2 Multimap tables
When referencing outputs, Monero will [use the amount and the amount index](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L3447-L3449). This means 2 keys are needed to reach an output.
With LMDB you can set the `DUP_SORT` flag on a table and then set the key/value to:
```rust
Key = KEY_PART_1
```
```rust
Value = {
KEY_PART_2,
VALUE // The actual value we are storing.
}
```
Then you can set a custom value sorting function that only takes `KEY_PART_2` into account; this is how `monerod` does it.
This requires that the underlying database supports:
- multimap tables
- custom sort functions on values
- setting a cursor on a specific key/value
---
Another way to implement this is as follows:
```rust
Key = { KEY_PART_1, KEY_PART_2 }
```
```rust
Value = VALUE
```
Then the key type is simply used to look up the value; this is how `cuprate_database` does it.
For example, the key/value pair for outputs is:
```rust
PreRctOutputId => Output
```
where `PreRctOutputId` looks like this:
```rust
struct PreRctOutputId {
amount: u64,
amount_index: u64,
}
```
## 10. Known issues and tradeoffs
`cuprate_database` takes many tradeoffs, whether due to:
- Prioritizing certain values over others
- Not having a better solution
- Being "good enough"
This is a list of the larger ones, along with issues that don't have answers yet.
### 10.1 Traits abstracting backends
Although all database backends used are very similar, they have some crucial differences in small implementation details that must be worked around when conforming them to `cuprate_database`'s traits.
Put simply: using `cuprate_database`'s traits is less efficient and more awkward than using the backend directly.
For example:
- [Data types must be wrapped in compatibility layers when they otherwise wouldn't be](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/backend/heed/env.rs#L101-L116)
- [There are types that only apply to a specific backend, but are visible to all](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/error.rs#L86-L89)
- [There are extra layers of abstraction to smoothen the differences between all backends](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/env.rs#L62-L68)
- [Existing functionality of backends must be taken away, as it isn't supported in the others](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/database.rs#L27-L34)
This is a _tradeoff_ that `cuprate_database` takes, as:
- The backend itself is usually not the source of bottlenecks in the greater system, as such, small inefficiencies are OK
- None of the lost functionality is crucial for operation
- The ability to use, test, and swap between multiple database backends is [worth it](https://github.com/Cuprate/cuprate/pull/35#issuecomment-1952804393)
### 10.2 Hot-swappable backends
Using a different backend is really as simple as re-building `cuprate_database` with a different feature flag:
```bash
# Use LMDB.
cargo build --package cuprate-database --features heed
# Use redb.
cargo build --package cuprate-database --features redb
```
This is "good enough" for now, however ideally, this hot-swapping of backends would be able to be done at _runtime_.
As it is now, `cuprate_database` cannot compile both backends and swap based on user input at runtime; it must be compiled with a certain backend, which will produce a binary with only that backend.
This also means things like [CI testing multiple backends is awkward](https://github.com/Cuprate/cuprate/blob/main/.github/workflows/ci.yml#L132-L136), as we must re-compile with different feature flags instead.
### 10.3 Copying unaligned bytes
As mentioned in [`8. (De)serialization`](#8-deserialization), bytes are _copied_ when they are turned into a type `T` due to unaligned bytes being returned from database backends.
Using a regular reference cast results in an improperly aligned type `T`; [such a type even existing causes undefined behavior](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). In our case, `bytemuck` saves us by panicking before this occurs.
Thus, when using `cuprate_database`'s database traits, an _owned_ `T` is returned.
This is doubly unfortunately for `&[u8]` as this does not even need deserialization.
For example, `StorableVec` could have been this:
```rust
enum StorableBytes<'a, T: Storable> {
Owned(T),
Ref(&'a T),
}
```
but this would require supporting types that must be copied regardless with the occasional `&[u8]` that can be returned without casting. This was hard to do so in a generic way, thus all `[u8]`'s are copied and returned as owned `StorableVec`s.
This is a _tradeoff_ `cuprate_database` takes as:
- `bytemuck::pod_read_unaligned` is cheap enough
- The main API, `service`, needs to return owned value anyway
- Having no references removes a lot of lifetime complexity
The alternative is either:
- Using proper (de)serialization instead of casting (which comes with its own costs)
- Somehow fixing the alignment issues in the backends mentioned previously
### 10.4 Endianness
`cuprate_database`'s (de)serialization and storage of bytes are native-endian, as in, byte storage order will depend on the machine it is running on.
As Cuprate's build-targets are all little-endian ([big-endian by default machines barely exist](https://en.wikipedia.org/wiki/Endianness#Hardware)), this doesn't matter much and the byte ordering can be seen as a constant.
Practically, this means `cuprated`'s database files can be transferred across computers, as can `monerod`'s.
### 10.5 Extra table data
Some of `cuprate_database`'s tables differ from `monerod`'s tables, for example, the way [`9.2 Multimap tables`](#92-multimap-tables) tables are done requires that the primary key is stored _for all_ entries, compared to `monerod` only needing to store it once.
For example:
```rust
// `monerod` only stores `amount: 1` once,
// `cuprated` stores it each time it appears.
struct PreRctOutputId { amount: 1, amount_index: 0 }
struct PreRctOutputId { amount: 1, amount_index: 1 }
```
This means `cuprated`'s database will be slightly larger than `monerod`'s.
The current method `cuprate_database` uses will be "good enough" until usage shows that it must be optimized as multimap tables are tricky to implement across all backends.

View file

@ -401,16 +401,16 @@ tables! {
NumOutputs, NumOutputs,
Amount => u64, Amount => u64,
/// Pre-RCT output data.
Outputs,
PreRctOutputId => Output,
/// Pruned transaction blobs (bytes). /// Pruned transaction blobs (bytes).
/// ///
/// Contains the pruned portion of serialized transaction data. /// Contains the pruned portion of serialized transaction data.
PrunedTxBlobs, PrunedTxBlobs,
TxId => PrunedBlob, TxId => PrunedBlob,
/// Pre-RCT output data.
Outputs,
PreRctOutputId => Output,
/// Prunable transaction blobs (bytes). /// Prunable transaction blobs (bytes).
/// ///
/// Contains the prunable portion of serialized transaction data. /// Contains the prunable portion of serialized transaction data.

View file

@ -1,33 +0,0 @@
[package]
name = "cuprate-database"
version = "0.0.1"
edition = "2021"
license = "AGPL-3.0-only"
# All Contributors on github
authors=[
"SyntheticBird45 <@someoneelse495495:matrix.org>",
"Boog900"
]
[features]
mdbx = ["dep:libmdbx"]
hse = []
[dependencies]
monero = {workspace = true, features = ["serde"]}
tiny-keccak = { version = "2.0", features = ["sha3"] }
serde = { workspace = true}
thiserror = {workspace = true }
bincode = { workspace = true }
libmdbx = { version = "0.3.1", optional = true }
[build]
linker="clang"
rustflags=[
"-Clink-arg=-fuse-ld=mold",
"-Zcf-protection=full",
"-Zsanitizer=cfi",
"-Crelocation-model=pie",
"-Cstack-protector=all",
]

View file

@ -1,14 +0,0 @@
Copyright (C) 2023 Cuprate Contributors
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.

View file

@ -1,78 +0,0 @@
//! ### Encoding module
//! The encoding module contains a trait that permit compatibility between `monero-rs` consensus encoding/decoding logic and `bincode` traits.
//! The database tables only accept types that implement [`bincode::Encode`] and [`bincode::Decode`] and since we can't implement these on `monero-rs` types directly
//! we use a wrapper struct `Compat<T>` that permit us to use `monero-rs`'s `consensus_encode`/`consensus_decode` functions under bincode traits.
//! The choice of using `bincode` comes from performance measurement at encoding. Sometimes `bincode` implementations was 5 times faster than `monero-rs` impl.
use bincode::{de::read::Reader, enc::write::Writer};
use monero::consensus::{Decodable, Encodable};
use std::{fmt::Debug, io::Read, ops::Deref};
#[derive(Debug, Clone)]
/// A single-tuple struct, used to contains monero-rs types that implement [`monero::consensus::Encodable`] and [`monero::consensus::Decodable`]
pub struct Compat<T: Encodable + Decodable>(pub T);
/// A wrapper around a [`bincode::de::read::Reader`] type. Permit us to use [`std::io::Read`] and feed monero-rs functions with an actual `&[u8]`
pub struct ReaderCompat<'src, R: Reader>(pub &'src mut R);
// Actual implementation of `std::io::read` for `bincode`'s `Reader` types
impl<'src, R: Reader> Read for ReaderCompat<'src, R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.0
.read(buf)
.map_err(|_| std::io::Error::new(std::io::ErrorKind::Other, "bincode reader Error"))?;
Ok(buf.len())
}
}
// Convenient implementation. `Deref` and `From`
impl<T: Encodable + Decodable> Deref for Compat<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: Encodable + Decodable> From<T> for Compat<T> {
fn from(value: T) -> Self {
Compat(value)
}
}
// TODO: Investigate specialization optimization
// Implementation of `bincode::Decode` for monero-rs `Decodable` type
impl<T: Encodable + Decodable + Debug> bincode::Decode for Compat<T> {
fn decode<D: bincode::de::Decoder>(
decoder: &mut D,
) -> Result<Self, bincode::error::DecodeError> {
Ok(Compat(
Decodable::consensus_decode(&mut ReaderCompat(decoder.reader()))
.map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?,
))
}
}
// Implementation of `bincode::BorrowDecode` for monero-rs `Decodable` type
impl<'de, T: Encodable + Decodable + Debug> bincode::BorrowDecode<'de> for Compat<T> {
fn borrow_decode<D: bincode::de::BorrowDecoder<'de>>(
decoder: &mut D,
) -> Result<Self, bincode::error::DecodeError> {
Ok(Compat(
Decodable::consensus_decode(&mut ReaderCompat(decoder.borrow_reader()))
.map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?,
))
}
}
// Implementation of `bincode::Encode` for monero-rs `Encodable` type
impl<T: Encodable + Decodable + Debug> bincode::Encode for Compat<T> {
fn encode<E: bincode::enc::Encoder>(
&self,
encoder: &mut E,
) -> Result<(), bincode::error::EncodeError> {
let writer = encoder.writer();
let buf = monero::consensus::serialize(&self.0);
writer.write(&buf)
}
}

View file

@ -1,53 +0,0 @@
//! ### Error module
//! This module contains all errors abstraction used by the database crate. By implementing [`From<E>`] to the specific errors of storage engine crates, it let us
//! handle more easily any type of error that can happen. This module does **NOT** contain interpretation of these errors, as these are defined for Blockchain abstraction. This is another difference
//! from monerod which interpret these errors directly in its database functions:
//! ```cpp
//! /**
//! * @brief A base class for BlockchainDB exceptions
//! */
//! class DB_EXCEPTION : public std::exception
//! ```
//! see `blockchain_db/blockchain_db.h` in monerod `src/` folder for more details.
#[derive(thiserror::Error, Debug)]
/// `DB_FAILURES` is an enum for backend-agnostic, internal database errors. The `From` Trait must be implemented to the specific backend errors to match DB_FAILURES.
pub enum DB_FAILURES {
#[error("MDBX returned an error {0}")]
MDBX_Error(#[from] libmdbx::Error),
#[error("\n<DB_FAILURES::EncodingError> Failed to encode some data : `{0}`")]
SerializeIssue(DB_SERIAL),
#[error("\nObject already exist in the database : {0}")]
AlreadyExist(&'static str),
#[error("NotFound? {0}")]
NotFound(&'static str),
#[error("\n<DB_FAILURES::Other> `{0}`")]
Other(&'static str),
#[error(
"\n<DB_FAILURES::FailedToCommit> A transaction tried to commit to the db, but failed."
)]
FailedToCommit,
}
#[derive(thiserror::Error, Debug)]
pub enum DB_SERIAL {
#[error("An object failed to be serialized into bytes. It is likely an issue from monero-rs library. Please report this error on cuprate's github : https://github.com/Cuprate/cuprate/issues")]
ConsensusEncode,
#[error("Bytes failed to be deserialized into the requested object. It is likely an issue from monero-rs library. Please report this error on cuprate's github : https://github.com/Cuprate/cuprate/issues")]
ConsensusDecode(Vec<u8>),
#[error("monero-rs encoding|decoding logic failed : {0}")]
MoneroEncode(#[from] monero::consensus::encode::Error),
#[error("Bincode failed to decode a type from the database : {0}")]
BincodeDecode(#[from] bincode::error::DecodeError),
#[error("Bincode failed to encode a type for the database : {0}")]
BincodeEncode(#[from] bincode::error::EncodeError),
}

View file

@ -1,11 +0,0 @@
/* There is nothing here as no wrapper exist for HSE yet */
/* KVS supported functions :
-------------------------------------
hse_kvs_delete
hse_kvs_get
hse_kvs_name_get
hse_kvs_param_get
hse_kvs_prefix_delete
hse_kvs_put
*/

File diff suppressed because it is too large Load diff

View file

@ -1,221 +0,0 @@
// Copyright (C) 2023 Cuprate Contributors
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! The cuprate-db crate implement (as its name suggests) the relations between the blockchain/txpool objects and their databases.
//! `lib.rs` contains all the generics, trait and specification for interfaces between blockchain and a backend-agnostic database
//! Every other files in this folder are implementation of these traits/methods to real storage engine.
//!
//! At the moment, the only storage engine available is MDBX.
//! The next storage engine planned is HSE (Heteregeonous Storage Engine) from Micron.
//!
//! For more information, please consult this docs:
#![deny(unused_attributes)]
#![forbid(unsafe_code)]
#![allow(non_camel_case_types)]
#![deny(clippy::expect_used, clippy::panic)]
#![allow(dead_code, unused_macros)] // temporary
use monero::{util::ringct::RctSig, Block, BlockHeader, Hash};
use std::ops::Range;
use thiserror::Error;
#[cfg(feature = "mdbx")]
pub mod mdbx;
//#[cfg(feature = "hse")]
//pub mod hse;
pub mod encoding;
pub mod error;
pub mod interface;
pub mod table;
pub mod types;
const DEFAULT_BLOCKCHAIN_DATABASE_DIRECTORY: &str = "blockchain";
const DEFAULT_TXPOOL_DATABASE_DIRECTORY: &str = "txpool_mem";
const BINCODE_CONFIG: bincode::config::Configuration<
bincode::config::LittleEndian,
bincode::config::Fixint,
> = bincode::config::standard().with_fixed_int_encoding();
// ------------------------------------------| Database |------------------------------------------
pub mod database {
//! This module contains the Database abstraction trait. Any key/value storage engine implemented need
//! to fulfil these associated types and functions, in order to be usable. This module also contains the
//! Interface struct which is used by the DB Reactor to interact with the database.
use crate::{
error::DB_FAILURES,
transaction::{Transaction, WriteTransaction},
};
use std::{ops::Deref, path::PathBuf, sync::Arc};
/// `Database` Trait implement all the methods necessary to generate transactions as well as execute specific functions. It also implement generic associated types to identify the
/// different transaction modes (read & write) and it's native errors.
pub trait Database<'a> {
type TX: Transaction<'a>;
type TXMut: WriteTransaction<'a>;
type Error: Into<DB_FAILURES>;
// Create a transaction from the database
fn tx(&'a self) -> Result<Self::TX, Self::Error>;
// Create a mutable transaction from the database
fn tx_mut(&'a self) -> Result<Self::TXMut, Self::Error>;
// Open a database from the specified path
fn open(path: PathBuf) -> Result<Self, Self::Error>
where
Self: std::marker::Sized;
// Check if the database is built.
fn check_all_tables_exist(&'a self) -> Result<(), Self::Error>;
// Build the database
fn build(&'a self) -> Result<(), Self::Error>;
}
/// `Interface` is a struct containing a shared pointer to the database and transaction's to be used for the implemented method of Interface.
pub struct Interface<'a, D: Database<'a>> {
pub db: Arc<D>,
pub tx: Option<<D as Database<'a>>::TXMut>,
}
// Convenient implementations for database
impl<'service, D: Database<'service>> Interface<'service, D> {
fn from(db: Arc<D>) -> Result<Self, DB_FAILURES> {
Ok(Self { db, tx: None })
}
fn open(&'service mut self) -> Result<(), DB_FAILURES> {
let tx = self.db.tx_mut().map_err(Into::into)?;
self.tx = Some(tx);
Ok(())
}
}
impl<'service, D: Database<'service>> Deref for Interface<'service, D> {
type Target = <D as Database<'service>>::TXMut;
fn deref(&self) -> &Self::Target {
return self.tx.as_ref().unwrap();
}
}
}
// ------------------------------------------| DatabaseTx |------------------------------------------
pub mod transaction {
//! This module contains the abstractions of Transactional Key/Value database functions.
//! Any key/value database/storage engine can be implemented easily for Cuprate as long as
//! these functions or equivalent logic exist for it.
use crate::{
error::DB_FAILURES,
table::{DupTable, Table},
};
// Abstraction of a read-only cursor, for simple tables
#[allow(clippy::type_complexity)]
pub trait Cursor<'t, T: Table> {
fn first(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn get_cursor(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn last(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn next(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn prev(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn set(&mut self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES>;
}
// Abstraction of a read-only cursor with support for duplicated tables. DupCursor inherit Cursor methods as
// a duplicated table can be treated as a simple table.
#[allow(clippy::type_complexity)]
pub trait DupCursor<'t, T: DupTable>: Cursor<'t, T> {
fn first_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES>;
fn get_dup(
&mut self,
key: &T::Key,
subkey: &T::SubKey,
) -> Result<Option<T::Value>, DB_FAILURES>;
fn last_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES>;
fn next_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES>;
fn prev_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES>;
}
// Abstraction of a read-write cursor, for simple tables. WriteCursor inherit Cursor methods.
pub trait WriteCursor<'t, T: Table>: Cursor<'t, T> {
fn put_cursor(&mut self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES>;
fn del(&mut self) -> Result<(), DB_FAILURES>;
}
// Abstraction of a read-write cursor with support for duplicated tables. DupWriteCursor inherit DupCursor and WriteCursor methods.
pub trait DupWriteCursor<'t, T: DupTable>: WriteCursor<'t, T> {
fn put_cursor_dup(
&mut self,
key: &T::Key,
subkey: &T::SubKey,
value: &T::Value,
) -> Result<(), DB_FAILURES>;
/// Delete all data under associated to its key
fn del_nodup(&mut self) -> Result<(), DB_FAILURES>;
}
// Abstraction of a read-only transaction.
pub trait Transaction<'a>: Send + Sync {
type Cursor<T: Table>: Cursor<'a, T>;
type DupCursor<T: DupTable>: DupCursor<'a, T> + Cursor<'a, T>;
fn get<T: Table>(&self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES>;
fn commit(self) -> Result<(), DB_FAILURES>;
fn cursor<T: Table>(&self) -> Result<Self::Cursor<T>, DB_FAILURES>;
fn cursor_dup<T: DupTable>(&self) -> Result<Self::DupCursor<T>, DB_FAILURES>;
fn num_entries<T: Table>(&self) -> Result<usize, DB_FAILURES>;
}
// Abstraction of a read-write transaction. WriteTransaction inherits Transaction methods.
pub trait WriteTransaction<'a>: Transaction<'a> {
type WriteCursor<T: Table>: WriteCursor<'a, T>;
type DupWriteCursor<T: DupTable>: DupWriteCursor<'a, T> + DupCursor<'a, T>;
fn put<T: Table>(&self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES>;
fn delete<T: Table>(
&self,
key: &T::Key,
value: &Option<T::Value>,
) -> Result<(), DB_FAILURES>;
fn clear<T: Table>(&self) -> Result<(), DB_FAILURES>;
fn write_cursor<T: Table>(&self) -> Result<Self::WriteCursor<T>, DB_FAILURES>;
fn write_cursor_dup<T: DupTable>(&self) -> Result<Self::DupWriteCursor<T>, DB_FAILURES>;
}
}

View file

@ -1,474 +0,0 @@
//! ### MDBX implementation
//! This module contains the implementation of all the database traits for the MDBX storage engine.
//! This include basic transactions methods, cursors and errors conversion.
use crate::{
database::Database,
error::{DB_FAILURES, DB_SERIAL},
table::{self, DupTable, Table},
transaction::{Transaction, WriteTransaction},
BINCODE_CONFIG,
};
use libmdbx::{
Cursor, DatabaseFlags, DatabaseKind, Geometry, Mode, PageSize, SyncMode, TableFlags,
TransactionKind, WriteFlags, RO, RW,
};
use std::ops::Range;
// Constant used in mdbx implementation
const MDBX_DEFAULT_SYNC_MODE: SyncMode = SyncMode::Durable;
const MDBX_MAX_MAP_SIZE: usize = 4 * 1024usize.pow(3); // 4TB
const MDBX_GROWTH_STEP: isize = 100 * 1024isize.pow(2); // 100MB
const MDBX_PAGE_SIZE: Option<PageSize> = None;
const MDBX_GEOMETRY: Geometry<Range<usize>> = Geometry {
size: Some(0..MDBX_MAX_MAP_SIZE),
growth_step: Some(MDBX_GROWTH_STEP),
shrink_threshold: None,
page_size: MDBX_PAGE_SIZE,
};
/// [`mdbx_decode`] is a function which the supplied bytes will be deserialized using `bincode::decode_from_slice(src, BINCODE_CONFIG)`
/// function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeDecode(err)))` if it failed to decode the value. It is used for clarity purpose.
fn mdbx_decode<T: bincode::Decode>(src: &[u8]) -> Result<(T, usize), DB_FAILURES> {
bincode::decode_from_slice(src, BINCODE_CONFIG)
.map_err(|e| DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeDecode(e)))
}
/// [`mdbx_encode`] is a function that serialize a given value into a vector using `bincode::encode_to_vec(src, BINCODE_CONFIG)`
/// function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(err)))` if it failed to encode the value. It is used for clarity purpose.
fn mdbx_encode<T: bincode::Encode>(src: &T) -> Result<Vec<u8>, DB_FAILURES> {
bincode::encode_to_vec(src, BINCODE_CONFIG)
.map_err(|e| DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(e)))
}
/// [`mdbx_open_table`] is a simple function used for syntax clarity. It try to open the table, and return a `DB_FAILURES` if it failed.
fn mdbx_open_table<'db, K: TransactionKind, E: DatabaseKind, T: Table>(
tx: &'db libmdbx::Transaction<'db, K, E>,
) -> Result<libmdbx::Table, DB_FAILURES> {
tx.open_table(Some(T::TABLE_NAME))
.map_err(std::convert::Into::<DB_FAILURES>::into)
}
/// [`cursor_pair_decode`] is a function defining a conditional return used in (almost) every cursor functions. If a pair of key/value effectively exist from the cursor,
/// the two values are decoded using `mdbx_decode` function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(err)))` if it failed to encode the value.
/// It is used for clarity purpose.
fn cursor_pair_decode<L: bincode::Decode, R: bincode::Decode>(
pair: Option<(Vec<u8>, Vec<u8>)>,
) -> Result<Option<(L, R)>, DB_FAILURES> {
if let Some(pair) = pair {
let decoded_key = mdbx_decode(pair.0.as_slice())?;
let decoded_value = mdbx_decode(pair.1.as_slice())?;
Ok(Some((decoded_key.0, decoded_value.0)))
} else {
Ok(None)
}
}
// Implementation of the database trait with mdbx types
impl<'a, E> Database<'a> for libmdbx::Database<E>
where
E: DatabaseKind,
{
type TX = libmdbx::Transaction<'a, RO, E>;
type TXMut = libmdbx::Transaction<'a, RW, E>;
type Error = libmdbx::Error;
// Open a Read-Only transaction
fn tx(&'a self) -> Result<Self::TX, Self::Error> {
self.begin_ro_txn()
}
// Open a Read-Write transaction
fn tx_mut(&'a self) -> Result<Self::TXMut, Self::Error> {
self.begin_rw_txn()
}
// Open the database with the given path
fn open(path: std::path::PathBuf) -> Result<Self, Self::Error> {
let db: libmdbx::Database<E> = libmdbx::Database::new()
.set_flags(DatabaseFlags::from(Mode::ReadWrite {
sync_mode: MDBX_DEFAULT_SYNC_MODE,
}))
.set_geometry(MDBX_GEOMETRY)
.set_max_readers(32)
.set_max_tables(15)
.open(path.as_path())?;
Ok(db)
}
// Open each tables to verify if the database is complete.
fn check_all_tables_exist(&'a self) -> Result<(), Self::Error> {
let ro_tx = self.begin_ro_txn()?;
// ----- BLOCKS -----
ro_tx.open_table(Some(table::blockhash::TABLE_NAME))?;
ro_tx.open_table(Some(table::blockmetadata::TABLE_NAME))?;
ro_tx.open_table(Some(table::blocks::TABLE_NAME))?;
ro_tx.open_table(Some(table::altblock::TABLE_NAME))?;
// ------ TXNs ------
ro_tx.open_table(Some(table::txspruned::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsprunablehash::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsprunabletip::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsprunable::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsoutputs::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsidentifier::TABLE_NAME))?;
// ---- OUTPUTS -----
ro_tx.open_table(Some(table::prerctoutputmetadata::TABLE_NAME))?;
ro_tx.open_table(Some(table::outputmetadata::TABLE_NAME))?;
// ---- SPT KEYS ----
ro_tx.open_table(Some(table::spentkeys::TABLE_NAME))?;
// --- PROPERTIES ---
ro_tx.open_table(Some(table::properties::TABLE_NAME))?;
Ok(())
}
// Construct the table of the database
fn build(&'a self) -> Result<(), Self::Error> {
let rw_tx = self.begin_rw_txn()?;
// Constructing the tables
// ----- BLOCKS -----
rw_tx.create_table(
Some(table::blockhash::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(
Some(table::blockmetadata::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(Some(table::blocks::TABLE_NAME), TableFlags::INTEGER_KEY)?;
rw_tx.create_table(Some(table::altblock::TABLE_NAME), TableFlags::INTEGER_KEY)?;
// ------ TXNs ------
rw_tx.create_table(Some(table::txspruned::TABLE_NAME), TableFlags::INTEGER_KEY)?;
rw_tx.create_table(
Some(table::txsprunable::TABLE_NAME),
TableFlags::INTEGER_KEY,
)?;
rw_tx.create_table(
Some(table::txsprunablehash::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(
Some(table::txsprunabletip::TABLE_NAME),
TableFlags::INTEGER_KEY,
)?;
rw_tx.create_table(
Some(table::txsoutputs::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(
Some(table::txsidentifier::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
// ---- OUTPUTS -----
rw_tx.create_table(
Some(table::prerctoutputmetadata::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(
Some(table::outputmetadata::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
// ---- SPT KEYS ----
rw_tx.create_table(
Some(table::spentkeys::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
// --- PROPERTIES ---
rw_tx.create_table(Some(table::properties::TABLE_NAME), TableFlags::INTEGER_KEY)?;
rw_tx.commit()?;
Ok(())
}
}
// Implementation of the Cursor trait for mdbx's Cursors
impl<'a, T, R> crate::transaction::Cursor<'a, T> for Cursor<'a, R>
where
T: Table,
R: TransactionKind,
{
fn first(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES> {
let pair = self
.first::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn get_cursor(
&mut self,
) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
let pair = self
.get_current::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn last(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
let pair = self
.last::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn next(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
let pair = self
.next::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn prev(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
let pair = self
.prev::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn set(&mut self, key: &T::Key) -> Result<Option<<T as Table>::Value>, DB_FAILURES> {
let encoded_key = mdbx_encode(key)?;
let value = self
.set::<Vec<u8>>(&encoded_key)
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
}
// Implementation of the DupCursor trait for mdbx's Cursors
impl<'t, T, R> crate::transaction::DupCursor<'t, T> for Cursor<'t, R>
where
R: TransactionKind,
T: DupTable,
{
fn first_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES> {
let value = self
.first_dup::<Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
fn get_dup(
&mut self,
key: &T::Key,
subkey: &T::SubKey,
) -> Result<Option<<T>::Value>, DB_FAILURES> {
let (encoded_key, encoded_subkey) = (mdbx_encode(key)?, mdbx_encode(subkey)?);
let value = self
.get_both::<Vec<u8>>(&encoded_key, &encoded_subkey)
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
fn last_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES> {
let value = self
.last_dup::<Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
fn next_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES> {
let pair = self
.next_dup::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(pair) = pair {
let (decoded_key, decoded_value) = (
mdbx_decode(pair.0.as_slice())?,
mdbx_decode(pair.1.as_slice())?,
);
return Ok(Some((decoded_key.0, decoded_value.0)));
}
Ok(None)
}
fn prev_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES> {
let pair = self
.prev_dup::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(pair) = pair {
let (decoded_key, decoded_value) = (
mdbx_decode(pair.0.as_slice())?,
mdbx_decode(pair.1.as_slice())?,
);
return Ok(Some((decoded_key.0, decoded_value.0)));
}
Ok(None)
}
}
// Implementation of the WriteCursor trait for mdbx's Cursors in RW permission
impl<'a, T> crate::transaction::WriteCursor<'a, T> for Cursor<'a, RW>
where
T: Table,
{
fn put_cursor(&mut self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES> {
let (encoded_key, encoded_value) = (mdbx_encode(key)?, mdbx_encode(value)?);
self.put(&encoded_key, &encoded_value, WriteFlags::empty())
.map_err(Into::into)
}
fn del(&mut self) -> Result<(), DB_FAILURES> {
self.del(WriteFlags::empty()).map_err(Into::into)
}
}
// Implementation of the DupWriteCursor trait for mdbx's Cursors in RW permission
impl<'a, T> crate::transaction::DupWriteCursor<'a, T> for Cursor<'a, RW>
where
T: DupTable,
{
fn put_cursor_dup(
&mut self,
key: &<T>::Key,
subkey: &<T as DupTable>::SubKey,
value: &<T>::Value,
) -> Result<(), DB_FAILURES> {
let (encoded_key, mut encoded_subkey, mut encoded_value) =
(mdbx_encode(key)?, mdbx_encode(subkey)?, mdbx_encode(value)?);
encoded_subkey.append(&mut encoded_value);
self.put(
encoded_key.as_slice(),
encoded_subkey.as_slice(),
WriteFlags::empty(),
)
.map_err(Into::into)
}
fn del_nodup(&mut self) -> Result<(), DB_FAILURES> {
self.del(WriteFlags::NO_DUP_DATA).map_err(Into::into)
}
}
// Implementation of the Transaction trait for mdbx's Transactions
impl<'a, E, R: TransactionKind> Transaction<'a> for libmdbx::Transaction<'_, R, E>
where
E: DatabaseKind,
{
type Cursor<T: Table> = Cursor<'a, R>;
type DupCursor<T: DupTable> = Cursor<'a, R>;
fn get<T: Table>(&self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
let encoded_key = mdbx_encode(key)?;
let value = self
.get::<Vec<u8>>(&table, &encoded_key)
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
fn cursor<T: Table>(&self) -> Result<Self::Cursor<T>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.cursor(&table).map_err(Into::into)
}
fn commit(self) -> Result<(), DB_FAILURES> {
let b = self
.commit()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if b {
Ok(())
} else {
Err(DB_FAILURES::FailedToCommit)
}
}
fn cursor_dup<T: DupTable>(&self) -> Result<Self::DupCursor<T>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.cursor(&table).map_err(Into::into)
}
fn num_entries<T: Table>(&self) -> Result<usize, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
let stat = self.table_stat(&table)?;
Ok(stat.entries())
}
}
// Implementation of the Transaction trait for mdbx's Transactions with RW permissions
impl<'a, E> WriteTransaction<'a> for libmdbx::Transaction<'a, RW, E>
where
E: DatabaseKind,
{
type WriteCursor<T: Table> = Cursor<'a, RW>;
type DupWriteCursor<T: DupTable> = Cursor<'a, RW>;
fn put<T: Table>(&self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
let (encoded_key, encoded_value) = (mdbx_encode(key)?, mdbx_encode(value)?);
self.put(&table, encoded_key, encoded_value, WriteFlags::empty())
.map_err(Into::into)
}
fn delete<T: Table>(&self, key: &T::Key, value: &Option<T::Value>) -> Result<(), DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
let encoded_key = mdbx_encode(key)?;
if let Some(value) = value {
let encoded_value = mdbx_encode(value)?;
return self
.del(&table, encoded_key, Some(encoded_value.as_slice()))
.map(|_| ())
.map_err(Into::into);
}
self.del(&table, encoded_key, None)
.map(|_| ())
.map_err(Into::into)
}
fn clear<T: Table>(&self) -> Result<(), DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.clear_table(&table).map_err(Into::into)
}
fn write_cursor<T: Table>(&self) -> Result<Self::WriteCursor<T>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.cursor(&table).map_err(Into::into)
}
fn write_cursor_dup<T: DupTable>(&self) -> Result<Self::DupWriteCursor<T>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.cursor(&table).map_err(Into::into)
}
}

View file

@ -1,181 +0,0 @@
//! ### Table module
//! This module contains the definition of the [`Table`] and [`DupTable`] trait, and the actual tables used in the database.
//! [`DupTable`] are just a trait used to define that they support DUPSORT|DUPFIXED operation (as of now we don't know the equivalent for HSE).
//! All tables are defined with docs explaining its purpose, what types are the key and data.
//! For more details please look at Cuprate's book : <link to cuprate book>
use crate::{
encoding::Compat,
types::{
/*OutTx,*/ AltBlock, BlockMetadata, /*RctOutkey,*/ OutputMetadata,
TransactionPruned, TxIndex, /*OutAmountIdx,*/ /*KeyImage,*/ TxOutputIdx,
},
};
use bincode::{de::Decode, enc::Encode};
use monero::{blockdata::transaction::KeyImage, Block, Hash};
/// A trait implementing a table interaction for the database. It is implemented to an empty struct to specify the name and table's associated types. These associated
/// types are used to simplify deserialization process.
pub trait Table: Send + Sync + 'static + Clone {
// name of the table
const TABLE_NAME: &'static str;
// Definition of a key & value types of the database
type Key: Encode + Decode;
type Value: Encode + Decode;
}
/// A trait implementing a table with duplicated data support.
pub trait DupTable: Table {
// Subkey of the table (prefix of the data)
type SubKey: Encode + Decode;
}
/// This declarative macro declare a new empty struct and impl the specified name, and corresponding types.
macro_rules! impl_table {
( $(#[$docs:meta])* $table:ident , $key:ty , $value:ty ) => {
#[derive(Clone)]
$(#[$docs])*
pub(crate) struct $table;
impl Table for $table {
const TABLE_NAME: &'static str = stringify!($table);
type Key = $key;
type Value = $value;
}
};
}
/// This declarative macro declare extend the original impl_table! macro by implementy DupTable trait.
macro_rules! impl_duptable {
($(#[$docs:meta])* $table:ident, $key:ty, $subkey:ty, $value:ty) => {
impl_table!($(#[$docs])* $table, $key, $value);
impl DupTable for $table {
type SubKey = $subkey;
}
};
}
// ------------------------------------------| Tables definition |------------------------------------------
// ----- BLOCKS -----
impl_duptable!(
/// `blockhash` is table defining a relation between the hash of a block and its height. Its primary use is to quickly find block's hash by its height.
blockhash,
(),
Compat<Hash>,
u64
);
impl_duptable!(
/// `blockmetadata` store block metadata alongside their corresponding Hash. The blocks metadata can contains the total_coins_generated, weight, long_term_block_weight & cumulative RingCT
blockmetadata,
(),
u64,
BlockMetadata
);
impl_table!(
/// `blockbody` store blocks' bodies along their Hash. The blocks body contains the coinbase transaction and its corresponding mined transactions' hashes.
blocks,
u64,
Compat<Block>
);
/*
impl_table!(
/// `blockhfversion` keep track of block's hard fork version. If an outdated node continue to run after a hard fork, it needs to know, after updating, what blocks needs to be update.
blockhfversion, u64, u8);
*/
impl_table!(
/// `altblock` is a table that permits the storage of blocks from an alternative chain, which may cause a re-org. These blocks can be fetch by their corresponding hash.
altblock,
Compat<Hash>,
AltBlock
);
// ------- TXNs -------
impl_table!(
/// `txspruned` is table storing TransactionPruned (or Pruned Tx). These can be fetch by the corresponding Transaction ID.
txspruned,
u64,
TransactionPruned
);
impl_table!(
/// `txsprunable` is a table storing the Prunable part of transactions (Signatures and RctSig), stored as raw bytes. These can be fetch by the corresponding Transaction ID.
txsprunable,
u64,
Vec<u8>
);
impl_duptable!(
/// `txsprunablehash` is a table storing hashes of prunable part of transactions. These hash can be fetch by the corresponding Transaction ID.
txsprunablehash,
u64,
(),
Compat<Hash>
);
impl_table!(
/// `txsprunabletip` is a table used for optimization purpose. It defines at which block's height this transaction belong as long as the block is with Tip blocks. These can be fetch by the corresponding Transaction ID.
txsprunabletip,
u64,
u64
);
impl_duptable!(
/// `txsoutputs` is a table storing output indices used in a transaction. These can be fetch by the corresponding Transaction ID.
txsoutputs,
u64,
(),
TxOutputIdx
);
impl_duptable!(
/// `txsidentifier` is a table defining a relation between the hash of a transaction and its transaction Indexes. Its primarily used to quickly find tx's ID by its hash.
txsidentifier,
Compat<Hash>,
(),
TxIndex
);
// ---- OUTPUTS ----
impl_duptable!(
/// `prerctoutputmetadata` is a duplicated table storing Pre-RingCT output's metadata. The key is the amount of this output, and the subkey is its amount idx.
prerctoutputmetadata,
u64,
u64,
OutputMetadata
);
impl_duptable!(
/// `prerctoutputmetadata` is a table storing RingCT output's metadata. The key is the amount idx of this output since amount is always 0 for RingCT outputs.
outputmetadata,
(),
u64,
OutputMetadata
);
// ---- SPT KEYS ----
impl_duptable!(
/// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
spentkeys,
(),
Compat<KeyImage>,
()
);
// ---- PROPERTIES ----
impl_table!(
/// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
properties,
u32,
u32
);

View file

@ -1,516 +0,0 @@
//! ### Types module
//! This module contains definition and implementations of some of the structures stored in the database.
//! Some of these types are just Wrapper for convenience or re-definition of `monero-rs` database type (see Boog900/monero-rs, "db" branch)
//! Since the database do not use dummy keys, these redefined structs are the same as monerod without the prefix data used as a key.
//! All these types implement [`bincode::Encode`] and [`bincode::Decode`]. They can store `monero-rs` types in their field. In this case, these field
//! use the [`Compat<T>`] wrapper.
use crate::encoding::{Compat, ReaderCompat};
use bincode::{enc::write::Writer, Decode, Encode};
use monero::{
consensus::{encode, Decodable},
util::ringct::{Key, RctSig, RctSigBase, RctSigPrunable, RctType, Signature},
Block, Hash, PublicKey, Transaction, TransactionPrefix, TxIn,
};
// ---- BLOCKS ----
#[derive(Clone, Debug, Encode, Decode)]
/// [`BlockMetadata`] is a struct containing metadata of a block such as the block's `timestamp`, the `total_coins_generated` at this height, its `weight`, its difficulty (`diff_lo`)
/// and cumulative difficulty (`diff_hi`), the `block_hash`, the cumulative RingCT (`cum_rct`) and its long term weight (`long_term_block_weight`). The monerod's struct equivalent is `mdb_block_info_4`
/// This struct is used in [`crate::table::blockmetadata`] table.
pub struct BlockMetadata {
/// Block's timestamp (the time at which it started to be mined)
pub timestamp: u64,
/// Total monero supply, this block included
pub total_coins_generated: u64,
/// Block's weight (sum of all transactions weights)
pub weight: u64,
/// Block's cumulative_difficulty. In monerod this field would have been split into two `u64`, since cpp don't support *natively* `uint128_t`/`u128`
pub cumulative_difficulty: u128,
/// Block's hash
pub block_hash: Compat<Hash>,
/// Cumulative number of RingCT outputs up to this block
pub cum_rct: u64,
/// Block's long term weight
pub long_term_block_weight: u64,
}
#[derive(Clone, Debug, Encode, Decode)]
/// [`AltBlock`] is a struct containing an alternative `block` (defining an alternative mainchain) and its metadata (`block_height`, `cumulative_weight`,
/// `cumulative_difficulty_low`, `cumulative_difficulty_high`, `already_generated_coins`).
/// This struct is used in [`crate::table::altblock`] table.
pub struct AltBlock {
/// Alternative block's height.
pub height: u64,
/// Cumulative weight median at this block
pub cumulative_weight: u64,
/// Cumulative difficulty
pub cumulative_difficulty: u128,
/// Total generated coins excluding this block's coinbase reward + fees
pub already_generated_coins: u64,
/// Actual block data, with Prefix and Transactions.
/// It is worth noting that monerod implementation do not contain the block in its struct, but still append it at the end of metadata.
pub block: Compat<Block>,
}
// ---- TRANSACTIONS ----
#[derive(Clone, Debug)]
/// [`TransactionPruned`] is, as its name suggest, the pruned part of a transaction, which is the Transaction Prefix and its RingCT ring.
/// This struct is used in the [`crate::table::txsprefix`] table.
pub struct TransactionPruned {
/// The transaction prefix.
pub prefix: TransactionPrefix,
/// The RingCT ring, will only contain the 'sig' field.
pub rct_signatures: RctSig,
}
impl bincode::Decode for TransactionPruned {
fn decode<D: bincode::de::Decoder>(
decoder: &mut D,
) -> Result<Self, bincode::error::DecodeError> {
let mut r = ReaderCompat(decoder.reader());
// We first decode the TransactionPrefix and get the n° of inputs/outputs
let prefix: TransactionPrefix = Decodable::consensus_decode(&mut r)
.map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?;
let (inputs, outputs) = (prefix.inputs.len(), prefix.outputs.len());
// Handle the prefix accordingly to its version
match *prefix.version {
// First transaction format, Pre-RingCT, so the ring are None
1 => Ok(TransactionPruned {
prefix,
rct_signatures: RctSig { sig: None, p: None },
}),
_ => {
let mut rct_signatures = RctSig { sig: None, p: None };
// No inputs so no RingCT
if inputs == 0 {
return Ok(TransactionPruned {
prefix,
rct_signatures,
});
}
// Otherwise get the RingCT ring for the tx inputs
if let Some(sig) = RctSigBase::consensus_decode(&mut r, inputs, outputs)
.map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?
{
rct_signatures = RctSig {
sig: Some(sig),
p: None,
};
}
// And we return it
Ok(TransactionPruned {
prefix,
rct_signatures,
})
}
}
}
}
impl bincode::Encode for TransactionPruned {
fn encode<E: bincode::enc::Encoder>(
&self,
encoder: &mut E,
) -> Result<(), bincode::error::EncodeError> {
let writer = encoder.writer();
// Encoding the Transaction prefix first
let buf = monero::consensus::serialize(&self.prefix);
writer.write(&buf)?;
match *self.prefix.version {
1 => {} // First transaction format, Pre-RingCT, so the there is no Rct ring to add
_ => {
if let Some(sig) = &self.rct_signatures.sig {
// If there is ring then we append it at the end
let buf = monero::consensus::serialize(sig);
writer.write(&buf)?;
}
}
}
Ok(())
}
}
impl TransactionPruned {
/// Turns a pruned transaction to a normal transaction with the missing pruned data
pub fn into_transaction(self, prunable: &[u8]) -> Result<Transaction, encode::Error> {
let mut r = std::io::Cursor::new(prunable);
match *self.prefix.version {
// Pre-RingCT transactions
1 => {
let signatures: Result<Vec<Vec<Signature>>, encode::Error> = self
.prefix
.inputs
.iter()
.filter_map(|input| match input {
TxIn::ToKey { key_offsets, .. } => {
let sigs: Result<Vec<Signature>, encode::Error> = key_offsets
.iter()
.map(|_| Decodable::consensus_decode(&mut r))
.collect();
Some(sigs)
}
_ => None,
})
.collect();
Ok(Transaction {
prefix: self.prefix,
signatures: signatures?,
rct_signatures: RctSig { sig: None, p: None },
})
}
// Post-RingCT Transactions
_ => {
let signatures = Vec::new();
let mut rct_signatures = RctSig { sig: None, p: None };
if self.prefix.inputs.is_empty() {
return Ok(Transaction {
prefix: self.prefix,
signatures,
rct_signatures: RctSig { sig: None, p: None },
});
}
if let Some(sig) = self.rct_signatures.sig {
let p = {
if sig.rct_type != RctType::Null {
let mixin_size = if !self.prefix.inputs.is_empty() {
match &self.prefix.inputs[0] {
TxIn::ToKey { key_offsets, .. } => key_offsets.len() - 1,
_ => 0,
}
} else {
0
};
RctSigPrunable::consensus_decode(
&mut r,
sig.rct_type,
self.prefix.inputs.len(),
self.prefix.outputs.len(),
mixin_size,
)?
} else {
None
}
};
rct_signatures = RctSig { sig: Some(sig), p };
}
Ok(Transaction {
prefix: self.prefix,
signatures,
rct_signatures,
})
}
}
}
}
pub fn get_transaction_prunable_blob<W: std::io::Write + ?Sized>(
tx: &monero::Transaction,
w: &mut W,
) -> Result<usize, std::io::Error> {
let mut len = 0;
match tx.prefix.version.0 {
1 => {
for sig in tx.signatures.iter() {
for c in sig {
len += monero::consensus::encode::Encodable::consensus_encode(c, w)?;
}
}
}
_ => {
if let Some(sig) = &tx.rct_signatures.sig {
if let Some(p) = &tx.rct_signatures.p {
len += p.consensus_encode(w, sig.rct_type)?;
}
}
}
}
Ok(len)
}
pub fn calculate_prunable_hash(tx: &monero::Transaction, tx_prunable_blob: &[u8]) -> Option<Hash> {
// V1 transaction don't have prunable hash
if tx.prefix.version.0 == 1 {
return None;
}
// Checking if it's a miner tx
if let TxIn::Gen { height: _ } = &tx.prefix.inputs[0] {
if tx.prefix.inputs.len() == 1 {
// Returning miner tx's empty hash
return Some(Hash::from_slice(&[
0x70, 0xa4, 0x85, 0x5d, 0x04, 0xd8, 0xfa, 0x7b, 0x3b, 0x27, 0x82, 0xca, 0x53, 0xb6,
0x00, 0xe5, 0xc0, 0x03, 0xc7, 0xdc, 0xb2, 0x7d, 0x7e, 0x92, 0x3c, 0x23, 0xf7, 0x86,
0x01, 0x46, 0xd2, 0xc5,
]));
}
};
// Calculating the hash
Some(Hash::new(tx_prunable_blob))
}
#[derive(Clone, Debug, Encode, Decode)]
/// [`TxIndex`] is a struct used in the [`crate::table::txsidentifier`]. It store the `unlock_time` of a transaction, the `height` of the block
/// whose transaction belong to and the Transaction ID (`tx_id`)
pub struct TxIndex {
/// Transaction ID
pub tx_id: u64,
/// The unlock time of this transaction (the height at which it is unlocked, it is not a timestamp)
pub unlock_time: u64,
/// The height of the block whose transaction belong to
pub height: u64, // TODO USELESS already in txs_prunable_tip
}
#[derive(Clone, Debug, Encode, Decode)]
/// [`TxOutputIdx`] is a single-tuple struct used to contain the indexes (amount and amount indices) of the transactions outputs. It is defined for more clarity on its role.
/// This struct is used in [`crate::table::txsoutputs`] table.
pub struct TxOutputIdx(pub Vec<u64>);
// ---- OUTPUTS ----
#[derive(Clone, Debug, Encode, Decode)]
/// [`RctOutkey`] is a struct containing RingCT metadata and an output ID. It is equivalent to the `output_data_t` struct in monerod
/// This struct is used in [`crate::table::outputamounts`]
pub struct RctOutkey {
// /// amount_index
//pub amount_index: u64,
/// The output's ID
pub output_id: u64,
/// The output's public key (for spend verification)
pub pubkey: Compat<PublicKey>,
/// The output's unlock time (the height at which it is unlocked, it is not a timestamp)
pub unlock_time: u64,
/// The height of the block which used this output
pub height: u64,
/// The output's amount commitment (for spend verification)
/// For compatibility with Pre-RingCT outputs, this field is an option. In fact, monerod distinguish between `pre_rct_output_data_t` and `output_data_t` field like that :
/// ```cpp
/// // This MUST be identical to output_data_t, without the extra rct data at the end
/// struct pre_rct_output_data_t
/// ```
pub commitment: Option<Compat<Key>>,
}
#[derive(Clone, Debug, Encode, Decode)]
/// [`OutputMetadata`] is a struct containing Outputs Metadata. It is used in [`crate::table::outputmetadata`]. It is a struct merging the
/// `out_tx_index` tuple with `output_data_t` structure in monerod, without the output ID.
pub struct OutputMetadata {
pub tx_hash: Compat<Hash>,
pub local_index: u64,
pub pubkey: Option<Compat<PublicKey>>,
pub unlock_time: u64,
pub height: u64,
pub commitment: Option<Compat<Key>>,
}
//#[derive(Clone, Debug, Encode, Decode)]
//// [`OutAmountIdx`] is a struct tuple used to contain the two keys used in [`crate::table::outputamounts`] table.
//// In monerod, the database key is the amount while the *cursor key* (the amount index) is the prefix of the actual data being returned.
//// As we prefer to note use cursor with partial data, we prefer to concat these two into a unique key
//pub struct OutAmountIdx(u64,u64);
// MAYBE NOT FINALLY
//#[derive(Clone, Debug, Encode, Decode)]
// /// [`OutTx`] is a struct containing the hash of the transaction whose output belongs to, and the local index of this output.
// /// This struct is used in [`crate::table::outputinherit`].
/*pub struct OutTx {
/// Output's transaction hash
pub tx_hash: Compat<Hash>,
/// Local index of the output
pub local_index: u64,
}*/
#[cfg(test)]
mod tests {
use monero::Hash;
use super::get_transaction_prunable_blob;
#[test]
fn calculate_tx_prunable_hash() {
let prunable_blob: Vec<u8> = vec![
1, 113, 10, 7, 87, 70, 119, 97, 244, 126, 155, 133, 254, 167, 60, 204, 134, 45, 71, 17,
87, 21, 252, 8, 218, 233, 219, 192, 84, 181, 196, 74, 213, 2, 246, 222, 66, 45, 152,
159, 156, 19, 224, 251, 110, 154, 188, 91, 129, 53, 251, 82, 134, 46, 93, 119, 136, 35,
13, 190, 235, 231, 44, 183, 134, 221, 12, 131, 222, 209, 246, 52, 14, 33, 94, 173, 251,
233, 18, 154, 91, 72, 229, 180, 43, 35, 152, 130, 38, 82, 56, 179, 36, 168, 54, 41, 62,
49, 208, 35, 245, 29, 27, 81, 72, 140, 104, 4, 59, 22, 120, 252, 67, 197, 130, 245, 93,
100, 129, 134, 19, 137, 228, 237, 166, 89, 5, 42, 1, 110, 139, 39, 81, 89, 159, 40,
239, 211, 251, 108, 82, 68, 125, 182, 75, 152, 129, 74, 73, 208, 215, 15, 63, 3, 106,
168, 35, 56, 126, 66, 2, 189, 53, 201, 77, 187, 102, 127, 154, 60, 209, 33, 217, 109,
81, 217, 183, 252, 114, 90, 245, 21, 229, 174, 254, 177, 147, 130, 74, 49, 118, 203,
14, 7, 118, 221, 81, 181, 78, 97, 224, 76, 160, 134, 73, 206, 204, 199, 201, 30, 201,
77, 4, 78, 237, 167, 76, 92, 104, 247, 247, 203, 141, 243, 72, 52, 83, 61, 35, 147,
231, 124, 21, 115, 81, 83, 67, 222, 61, 225, 171, 66, 243, 185, 195, 51, 72, 243, 80,
104, 4, 166, 54, 199, 235, 193, 175, 4, 242, 42, 146, 170, 90, 212, 101, 208, 113, 58,
65, 121, 55, 179, 206, 92, 50, 94, 171, 33, 67, 108, 220, 19, 193, 155, 30, 58, 46, 9,
227, 48, 246, 187, 82, 230, 61, 64, 95, 197, 183, 150, 62, 203, 252, 36, 157, 135, 160,
120, 189, 52, 94, 186, 93, 5, 36, 120, 160, 62, 254, 178, 101, 11, 228, 63, 128, 249,
182, 56, 100, 9, 5, 2, 81, 243, 229, 245, 43, 234, 35, 216, 212, 46, 165, 251, 183,
133, 10, 76, 172, 95, 106, 231, 13, 216, 222, 15, 92, 122, 103, 68, 238, 190, 108, 124,
138, 62, 255, 243, 22, 209, 2, 138, 45, 178, 101, 240, 18, 186, 71, 239, 137, 191, 134,
128, 221, 181, 173, 242, 111, 117, 45, 255, 138, 101, 79, 242, 42, 4, 144, 245, 193,
79, 14, 44, 201, 223, 0, 193, 123, 75, 155, 140, 248, 0, 226, 246, 230, 126, 7, 32,
107, 173, 193, 206, 184, 11, 33, 148, 104, 32, 79, 149, 71, 68, 150, 6, 47, 90, 231,
151, 14, 121, 196, 169, 249, 117, 154, 167, 139, 103, 62, 97, 250, 131, 160, 92, 239,
18, 236, 110, 184, 102, 30, 194, 175, 243, 145, 169, 183, 163, 141, 244, 186, 172, 251,
3, 78, 165, 33, 12, 2, 136, 180, 178, 83, 117, 0, 184, 170, 255, 69, 131, 123, 8, 212,
158, 162, 119, 137, 146, 63, 95, 133, 186, 91, 255, 152, 187, 107, 113, 147, 51, 219,
207, 5, 160, 169, 97, 9, 1, 202, 152, 186, 128, 160, 110, 120, 7, 176, 103, 87, 30,
137, 240, 67, 55, 79, 147, 223, 45, 177, 210, 101, 225, 22, 25, 129, 111, 101, 21, 213,
20, 254, 36, 57, 67, 70, 93, 192, 11, 180, 75, 99, 185, 77, 75, 74, 63, 182, 183, 208,
16, 69, 237, 96, 76, 96, 212, 242, 6, 169, 14, 250, 168, 129, 18, 141, 240, 101, 196,
96, 120, 88, 90, 51, 77, 12, 133, 212, 192, 107, 131, 238, 34, 237, 93, 157, 108, 13,
255, 187, 163, 106, 148, 108, 105, 244, 243, 174, 189, 180, 48, 102, 57, 170, 118, 211,
110, 126, 222, 165, 93, 36, 157, 90, 14, 135, 184, 197, 185, 7, 99, 199, 224, 225, 243,
212, 116, 149, 137, 186, 16, 196, 73, 23, 11, 248, 248, 67, 167, 149, 154, 64, 76, 218,
119, 135, 239, 34, 48, 66, 57, 109, 246, 3, 141, 169, 42, 157, 222, 21, 40, 183, 168,
97, 195, 106, 244, 229, 61, 122, 136, 59, 255, 120, 86, 30, 63, 226, 18, 65, 218, 188,
195, 217, 85, 12, 211, 221, 188, 27, 8, 98, 103, 211, 213, 217, 65, 82, 229, 145, 80,
147, 220, 57, 143, 20, 189, 253, 106, 13, 21, 170, 60, 24, 48, 162, 234, 0, 240, 226,
4, 28, 76, 93, 56, 3, 187, 223, 58, 31, 184, 58, 234, 198, 140, 223, 217, 1, 147, 94,
218, 199, 154, 121, 137, 44, 229, 0, 1, 10, 133, 250, 140, 64, 150, 89, 64, 112, 178,
221, 87, 19, 24, 104, 252, 28, 65, 207, 28, 195, 217, 73, 12, 16, 83, 55, 199, 84, 117,
175, 123, 13, 234, 10, 54, 63, 245, 161, 74, 235, 92, 189, 247, 47, 62, 176, 41, 159,
40, 250, 116, 63, 33, 193, 78, 72, 29, 215, 9, 191, 233, 243, 87, 14, 195, 7, 89, 101,
0, 28, 0, 234, 205, 59, 142, 119, 119, 52, 143, 80, 151, 211, 184, 235, 98, 222, 206,
170, 166, 4, 155, 3, 235, 26, 62, 8, 171, 19, 14, 53, 245, 77, 114, 175, 246, 170, 139,
227, 212, 141, 72, 223, 134, 63, 91, 26, 12, 78, 253, 198, 162, 152, 202, 207, 170,
254, 8, 4, 4, 175, 207, 84, 10, 108, 179, 157, 132, 110, 76, 201, 247, 227, 158, 106,
59, 41, 206, 229, 128, 2, 60, 203, 65, 71, 160, 232, 186, 227, 51, 12, 142, 85, 93, 89,
234, 236, 157, 230, 247, 167, 99, 7, 37, 146, 13, 53, 39, 255, 209, 177, 179, 17, 131,
59, 16, 75, 180, 21, 119, 88, 4, 12, 49, 140, 3, 110, 235, 231, 92, 13, 41, 137, 21,
37, 46, 138, 44, 250, 44, 161, 179, 114, 94, 63, 207, 192, 81, 234, 35, 125, 54, 2,
214, 10, 57, 116, 154, 150, 147, 223, 232, 36, 108, 152, 145, 157, 132, 190, 103, 233,
155, 141, 243, 249, 120, 72, 168, 14, 196, 35, 54, 107, 167, 218, 209, 1, 209, 197,
187, 242, 76, 86, 229, 114, 131, 196, 69, 171, 118, 28, 51, 192, 146, 14, 140, 84, 66,
155, 237, 194, 167, 121, 160, 166, 198, 166, 57, 13, 66, 162, 234, 148, 102, 133, 111,
18, 166, 77, 156, 75, 84, 220, 80, 35, 81, 141, 23, 197, 162, 23, 167, 187, 187, 187,
137, 184, 96, 140, 162, 6, 49, 63, 39, 84, 107, 85, 202, 168, 51, 194, 214, 132, 253,
253, 189, 231, 1, 226, 118, 104, 84, 147, 244, 58, 233, 250, 66, 26, 109, 223, 34, 2,
2, 112, 141, 147, 230, 134, 73, 45, 105, 180, 223, 52, 95, 40, 235, 209, 50, 67, 193,
22, 176, 176, 128, 140, 238, 252, 129, 220, 175, 79, 133, 12, 123, 209, 64, 5, 160, 39,
47, 66, 122, 245, 65, 102, 133, 58, 74, 138, 153, 217, 48, 59, 84, 135, 117, 92, 131,
44, 109, 40, 105, 69, 29, 14, 142, 71, 87, 112, 68, 134, 0, 14, 158, 14, 68, 15, 180,
150, 108, 49, 196, 94, 82, 27, 208, 163, 103, 81, 85, 124, 61, 242, 151, 29, 74, 87,
134, 166, 145, 186, 110, 207, 162, 99, 92, 133, 121, 137, 124, 90, 134, 5, 249, 231,
181, 222, 38, 170, 141, 113, 204, 172, 169, 173, 63, 81, 170, 76,
];
let prunable_hash = Hash::from_slice(&[
0x5c, 0x5e, 0x69, 0xd8, 0xfc, 0x0d, 0x22, 0x6a, 0x60, 0x91, 0x47, 0xda, 0x98, 0x36,
0x06, 0x00, 0xf4, 0xea, 0x49, 0xcc, 0x49, 0x45, 0x2c, 0x5e, 0xf8, 0xba, 0x20, 0xf5,
0x93, 0xd4, 0x80, 0x7d,
]);
assert_eq!(prunable_hash, Hash::new(prunable_blob));
}
#[test]
fn get_prunable_tx_blob() {
let mut pruned_p_blob: Vec<u8> = vec![
2, 0, 1, 2, 0, 16, 180, 149, 135, 30, 237, 231, 156, 1, 132, 145, 47, 182, 251, 153, 1,
225, 234, 94, 219, 134, 23, 222, 210, 30, 208, 213, 12, 136, 158, 5, 159, 148, 15, 206,
144, 2, 132, 63, 135, 22, 151, 8, 134, 8, 178, 26, 194, 111, 101, 192, 45, 104, 18,
115, 178, 194, 100, 255, 227, 10, 253, 165, 53, 62, 81, 67, 202, 169, 56, 99, 42, 146,
175, 137, 85, 195, 27, 151, 2, 0, 3, 207, 28, 183, 85, 7, 58, 81, 205, 53, 9, 191, 141,
209, 70, 58, 30, 38, 225, 212, 68, 14, 4, 216, 204, 101, 163, 66, 156, 101, 143, 255,
196, 134, 0, 3, 254, 66, 159, 187, 180, 41, 78, 252, 85, 255, 154, 55, 239, 222, 199,
37, 159, 210, 71, 186, 188, 46, 134, 181, 236, 221, 173, 43, 93, 50, 138, 249, 221, 44,
1, 34, 67, 111, 182, 199, 28, 219, 56, 238, 143, 188, 101, 103, 205, 139, 160, 144,
226, 34, 92, 235, 221, 75, 38, 7, 104, 255, 108, 208, 1, 184, 169, 2, 9, 1, 84, 62, 77,
107, 119, 22, 148, 222, 6, 128, 128, 211, 14, 242, 200, 16, 137, 239, 249, 55, 59, 16,
193, 192, 140, 240, 153, 129, 228, 115, 222, 247, 41, 128, 219, 241, 249, 198, 214, 75,
31, 82, 225, 1, 158, 183, 226, 220, 126, 228, 191, 211, 79, 43, 220, 95, 124, 109, 14,
162, 170, 68, 37, 62, 21, 139, 182, 246, 152, 36, 156, 172, 197, 20, 145, 85, 9, 8,
106, 237, 112, 63, 189, 172, 145, 49, 234, 68, 152, 200, 241, 0, 37,
];
let prunable_blob: Vec<u8> = vec![
1, 113, 10, 7, 87, 70, 119, 97, 244, 126, 155, 133, 254, 167, 60, 204, 134, 45, 71, 17,
87, 21, 252, 8, 218, 233, 219, 192, 84, 181, 196, 74, 213, 2, 246, 222, 66, 45, 152,
159, 156, 19, 224, 251, 110, 154, 188, 91, 129, 53, 251, 82, 134, 46, 93, 119, 136, 35,
13, 190, 235, 231, 44, 183, 134, 221, 12, 131, 222, 209, 246, 52, 14, 33, 94, 173, 251,
233, 18, 154, 91, 72, 229, 180, 43, 35, 152, 130, 38, 82, 56, 179, 36, 168, 54, 41, 62,
49, 208, 35, 245, 29, 27, 81, 72, 140, 104, 4, 59, 22, 120, 252, 67, 197, 130, 245, 93,
100, 129, 134, 19, 137, 228, 237, 166, 89, 5, 42, 1, 110, 139, 39, 81, 89, 159, 40,
239, 211, 251, 108, 82, 68, 125, 182, 75, 152, 129, 74, 73, 208, 215, 15, 63, 3, 106,
168, 35, 56, 126, 66, 2, 189, 53, 201, 77, 187, 102, 127, 154, 60, 209, 33, 217, 109,
81, 217, 183, 252, 114, 90, 245, 21, 229, 174, 254, 177, 147, 130, 74, 49, 118, 203,
14, 7, 118, 221, 81, 181, 78, 97, 224, 76, 160, 134, 73, 206, 204, 199, 201, 30, 201,
77, 4, 78, 237, 167, 76, 92, 104, 247, 247, 203, 141, 243, 72, 52, 83, 61, 35, 147,
231, 124, 21, 115, 81, 83, 67, 222, 61, 225, 171, 66, 243, 185, 195, 51, 72, 243, 80,
104, 4, 166, 54, 199, 235, 193, 175, 4, 242, 42, 146, 170, 90, 212, 101, 208, 113, 58,
65, 121, 55, 179, 206, 92, 50, 94, 171, 33, 67, 108, 220, 19, 193, 155, 30, 58, 46, 9,
227, 48, 246, 187, 82, 230, 61, 64, 95, 197, 183, 150, 62, 203, 252, 36, 157, 135, 160,
120, 189, 52, 94, 186, 93, 5, 36, 120, 160, 62, 254, 178, 101, 11, 228, 63, 128, 249,
182, 56, 100, 9, 5, 2, 81, 243, 229, 245, 43, 234, 35, 216, 212, 46, 165, 251, 183,
133, 10, 76, 172, 95, 106, 231, 13, 216, 222, 15, 92, 122, 103, 68, 238, 190, 108, 124,
138, 62, 255, 243, 22, 209, 2, 138, 45, 178, 101, 240, 18, 186, 71, 239, 137, 191, 134,
128, 221, 181, 173, 242, 111, 117, 45, 255, 138, 101, 79, 242, 42, 4, 144, 245, 193,
79, 14, 44, 201, 223, 0, 193, 123, 75, 155, 140, 248, 0, 226, 246, 230, 126, 7, 32,
107, 173, 193, 206, 184, 11, 33, 148, 104, 32, 79, 149, 71, 68, 150, 6, 47, 90, 231,
151, 14, 121, 196, 169, 249, 117, 154, 167, 139, 103, 62, 97, 250, 131, 160, 92, 239,
18, 236, 110, 184, 102, 30, 194, 175, 243, 145, 169, 183, 163, 141, 244, 186, 172, 251,
3, 78, 165, 33, 12, 2, 136, 180, 178, 83, 117, 0, 184, 170, 255, 69, 131, 123, 8, 212,
158, 162, 119, 137, 146, 63, 95, 133, 186, 91, 255, 152, 187, 107, 113, 147, 51, 219,
207, 5, 160, 169, 97, 9, 1, 202, 152, 186, 128, 160, 110, 120, 7, 176, 103, 87, 30,
137, 240, 67, 55, 79, 147, 223, 45, 177, 210, 101, 225, 22, 25, 129, 111, 101, 21, 213,
20, 254, 36, 57, 67, 70, 93, 192, 11, 180, 75, 99, 185, 77, 75, 74, 63, 182, 183, 208,
16, 69, 237, 96, 76, 96, 212, 242, 6, 169, 14, 250, 168, 129, 18, 141, 240, 101, 196,
96, 120, 88, 90, 51, 77, 12, 133, 212, 192, 107, 131, 238, 34, 237, 93, 157, 108, 13,
255, 187, 163, 106, 148, 108, 105, 244, 243, 174, 189, 180, 48, 102, 57, 170, 118, 211,
110, 126, 222, 165, 93, 36, 157, 90, 14, 135, 184, 197, 185, 7, 99, 199, 224, 225, 243,
212, 116, 149, 137, 186, 16, 196, 73, 23, 11, 248, 248, 67, 167, 149, 154, 64, 76, 218,
119, 135, 239, 34, 48, 66, 57, 109, 246, 3, 141, 169, 42, 157, 222, 21, 40, 183, 168,
97, 195, 106, 244, 229, 61, 122, 136, 59, 255, 120, 86, 30, 63, 226, 18, 65, 218, 188,
195, 217, 85, 12, 211, 221, 188, 27, 8, 98, 103, 211, 213, 217, 65, 82, 229, 145, 80,
147, 220, 57, 143, 20, 189, 253, 106, 13, 21, 170, 60, 24, 48, 162, 234, 0, 240, 226,
4, 28, 76, 93, 56, 3, 187, 223, 58, 31, 184, 58, 234, 198, 140, 223, 217, 1, 147, 94,
218, 199, 154, 121, 137, 44, 229, 0, 1, 10, 133, 250, 140, 64, 150, 89, 64, 112, 178,
221, 87, 19, 24, 104, 252, 28, 65, 207, 28, 195, 217, 73, 12, 16, 83, 55, 199, 84, 117,
175, 123, 13, 234, 10, 54, 63, 245, 161, 74, 235, 92, 189, 247, 47, 62, 176, 41, 159,
40, 250, 116, 63, 33, 193, 78, 72, 29, 215, 9, 191, 233, 243, 87, 14, 195, 7, 89, 101,
0, 28, 0, 234, 205, 59, 142, 119, 119, 52, 143, 80, 151, 211, 184, 235, 98, 222, 206,
170, 166, 4, 155, 3, 235, 26, 62, 8, 171, 19, 14, 53, 245, 77, 114, 175, 246, 170, 139,
227, 212, 141, 72, 223, 134, 63, 91, 26, 12, 78, 253, 198, 162, 152, 202, 207, 170,
254, 8, 4, 4, 175, 207, 84, 10, 108, 179, 157, 132, 110, 76, 201, 247, 227, 158, 106,
59, 41, 206, 229, 128, 2, 60, 203, 65, 71, 160, 232, 186, 227, 51, 12, 142, 85, 93, 89,
234, 236, 157, 230, 247, 167, 99, 7, 37, 146, 13, 53, 39, 255, 209, 177, 179, 17, 131,
59, 16, 75, 180, 21, 119, 88, 4, 12, 49, 140, 3, 110, 235, 231, 92, 13, 41, 137, 21,
37, 46, 138, 44, 250, 44, 161, 179, 114, 94, 63, 207, 192, 81, 234, 35, 125, 54, 2,
214, 10, 57, 116, 154, 150, 147, 223, 232, 36, 108, 152, 145, 157, 132, 190, 103, 233,
155, 141, 243, 249, 120, 72, 168, 14, 196, 35, 54, 107, 167, 218, 209, 1, 209, 197,
187, 242, 76, 86, 229, 114, 131, 196, 69, 171, 118, 28, 51, 192, 146, 14, 140, 84, 66,
155, 237, 194, 167, 121, 160, 166, 198, 166, 57, 13, 66, 162, 234, 148, 102, 133, 111,
18, 166, 77, 156, 75, 84, 220, 80, 35, 81, 141, 23, 197, 162, 23, 167, 187, 187, 187,
137, 184, 96, 140, 162, 6, 49, 63, 39, 84, 107, 85, 202, 168, 51, 194, 214, 132, 253,
253, 189, 231, 1, 226, 118, 104, 84, 147, 244, 58, 233, 250, 66, 26, 109, 223, 34, 2,
2, 112, 141, 147, 230, 134, 73, 45, 105, 180, 223, 52, 95, 40, 235, 209, 50, 67, 193,
22, 176, 176, 128, 140, 238, 252, 129, 220, 175, 79, 133, 12, 123, 209, 64, 5, 160, 39,
47, 66, 122, 245, 65, 102, 133, 58, 74, 138, 153, 217, 48, 59, 84, 135, 117, 92, 131,
44, 109, 40, 105, 69, 29, 14, 142, 71, 87, 112, 68, 134, 0, 14, 158, 14, 68, 15, 180,
150, 108, 49, 196, 94, 82, 27, 208, 163, 103, 81, 85, 124, 61, 242, 151, 29, 74, 87,
134, 166, 145, 186, 110, 207, 162, 99, 92, 133, 121, 137, 124, 90, 134, 5, 249, 231,
181, 222, 38, 170, 141, 113, 204, 172, 169, 173, 63, 81, 170, 76,
];
let mut tx_blob: Vec<u8> = Vec::new();
tx_blob.append(&mut pruned_p_blob);
tx_blob.append(&mut prunable_blob.clone());
let mut buf = Vec::new();
#[allow(clippy::expect_used)]
let tx: monero::Transaction =
monero::consensus::encode::deserialize(&tx_blob).expect("failed to serialize");
#[allow(clippy::expect_used)]
get_transaction_prunable_blob(&tx, &mut buf).expect("failed to get out prunable blob");
assert_eq!(prunable_blob, buf);
}
}

View file

@ -0,0 +1,36 @@
[package]
name = "cuprate-p2p"
version = "0.1.0"
edition = "2021"
license = "MIT"
authors = ["Boog900"]
[dependencies]
fixed-bytes = { path = "../../net/fixed-bytes" }
monero-wire = { path = "../../net/monero-wire" }
monero-p2p = { path = "../monero-p2p", features = ["borsh"] }
monero-address-book = { path = "../address-book" }
monero-pruning = { path = "../../pruning" }
cuprate-helper = { path = "../../helper", features = ["asynch"] }
monero-serai = { workspace = true, features = ["std"] }
tower = { workspace = true }
tokio = { workspace = true, features = ["rt"] }
rayon = { workspace = true }
tokio-util = { workspace = true }
tokio-stream = { workspace = true, features = ["sync", "time"] }
futures = { workspace = true, features = ["std"] }
pin-project = { workspace = true }
dashmap = { workspace = true }
thiserror = { workspace = true }
bytes = { workspace = true, features = ["std"] }
indexmap = { workspace = true, features = ["std"] }
rand = { workspace = true, features = ["std", "std_rng"] }
rand_distr = { workspace = true, features = ["std"] }
hex = { workspace = true, features = ["std"] }
tracing = { workspace = true, features = ["std", "attributes"] }
[dev-dependencies]
cuprate-test-utils = { path = "../../test-utils" }

View file

@ -0,0 +1,148 @@
//! # Client Pool.
//!
//! The [`ClientPool`], is a pool of currently connected peers that can be pulled from.
//! It does _not_ necessarily contain every connected peer as another place could have
//! taken a peer from the pool.
//!
//! When taking peers from the pool they are wrapped in [`ClientPoolDropGuard`], which
//! returns the peer to the pool when it is dropped.
//!
//! Internally the pool is a [`DashMap`] which means care should be taken in `async` code
//! as internally this uses blocking RwLocks.
//!
use std::sync::Arc;
use dashmap::{DashMap, DashSet};
use tokio::sync::mpsc;
use monero_p2p::{
client::{Client, InternalPeerID},
handles::ConnectionHandle,
ConnectionDirection, NetworkZone,
};
mod disconnect_monitor;
mod drop_guard_client;
pub use drop_guard_client::ClientPoolDropGuard;
/// The client pool, which holds currently connected free peers.
///
/// See the [module docs](self) for more.
pub struct ClientPool<N: NetworkZone> {
/// The connected [`Client`]s.
clients: DashMap<InternalPeerID<N::Addr>, Client<N>>,
/// A set of outbound clients, as these allow accesses/mutation from different threads,
/// a peer ID in here does not mean the peer is necessarily in `clients` as it could have been removed
/// by another thread. However, if the peer is in both here and `clients` it is definitely
/// an outbound peer.
outbound_clients: DashSet<InternalPeerID<N::Addr>>,
/// A channel to send new peer ids down to monitor for disconnect.
new_connection_tx: mpsc::UnboundedSender<(ConnectionHandle, InternalPeerID<N::Addr>)>,
}
impl<N: NetworkZone> ClientPool<N> {
/// Returns a new [`ClientPool`] wrapped in an [`Arc`].
pub fn new() -> Arc<ClientPool<N>> {
let (tx, rx) = mpsc::unbounded_channel();
let pool = Arc::new(ClientPool {
clients: DashMap::new(),
outbound_clients: DashSet::new(),
new_connection_tx: tx,
});
tokio::spawn(disconnect_monitor::disconnect_monitor(rx, pool.clone()));
pool
}
/// Adds a [`Client`] to the pool, the client must have previously been taken from the
/// pool.
///
/// See [`ClientPool::add_new_client`] to add a [`Client`] which was not taken from the pool before.
///
/// # Panics
/// This function panics if `client` already exists in the pool.
fn add_client(&self, client: Client<N>) {
let handle = client.info.handle.clone();
let id = client.info.id;
// Fast path: if the client is disconnected don't add it to the peer set.
if handle.is_closed() {
return;
}
if client.info.direction == ConnectionDirection::OutBound {
self.outbound_clients.insert(id);
}
let res = self.clients.insert(id, client);
assert!(res.is_none());
// We have to check this again otherwise we could have a race condition where a
// peer is disconnected after the first check, the disconnect monitor tries to remove it,
// and then it is added to the pool.
if handle.is_closed() {
self.remove_client(&id);
}
}
/// Adds a _new_ [`Client`] to the pool, this client should be a new connection, and not already
/// from the pool.
///
/// # Panics
/// This function panics if `client` already exists in the pool.
pub fn add_new_client(&self, client: Client<N>) {
self.new_connection_tx
.send((client.info.handle.clone(), client.info.id))
.unwrap();
self.add_client(client);
}
/// Remove a [`Client`] from the pool.
///
/// [`None`] is returned if the client did not exist in the pool.
fn remove_client(&self, peer: &InternalPeerID<N::Addr>) -> Option<Client<N>> {
self.outbound_clients.remove(peer);
self.clients.remove(peer).map(|(_, client)| client)
}
/// Borrows a [`Client`] from the pool.
///
/// The [`Client`] is wrapped in [`ClientPoolDropGuard`] which
/// will return the client to the pool when it's dropped.
///
/// See [`Self::borrow_clients`] for borrowing multiple clients.
pub fn borrow_client(
self: &Arc<Self>,
peer: &InternalPeerID<N::Addr>,
) -> Option<ClientPoolDropGuard<N>> {
self.remove_client(peer).map(|client| ClientPoolDropGuard {
pool: Arc::clone(self),
client: Some(client),
})
}
/// Borrows multiple [`Client`]s from the pool.
///
/// Note that the returned iterator is not guaranteed to contain every peer asked for.
///
/// See [`Self::borrow_client`] for borrowing a single client.
#[allow(private_interfaces)] // TODO: Remove me when 2024 Rust
pub fn borrow_clients<'a, 'b>(
self: &'a Arc<Self>,
peers: &'b [InternalPeerID<N::Addr>],
) -> impl Iterator<Item = ClientPoolDropGuard<N>> + Captures<(&'a (), &'b ())> {
peers.iter().filter_map(|peer| self.borrow_client(peer))
}
}
/// TODO: Remove me when 2024 Rust
///
/// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick
trait Captures<U> {}
impl<T: ?Sized, U> Captures<U> for T {}

View file

@ -0,0 +1,72 @@
//! # Disconnect Monitor
//!
//! This module contains the [`disconnect_monitor`] task, which monitors connected peers for disconnection
//! and then removes them from the [`ClientPool`] if they do.
use std::{
future::Future,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use futures::{stream::FuturesUnordered, StreamExt};
use tokio::sync::mpsc;
use tokio_util::sync::WaitForCancellationFutureOwned;
use tracing::instrument;
use monero_p2p::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone};
use super::ClientPool;
/// The disconnect monitor task.
#[instrument(level = "info", skip_all)]
pub async fn disconnect_monitor<N: NetworkZone>(
mut new_connection_rx: mpsc::UnboundedReceiver<(ConnectionHandle, InternalPeerID<N::Addr>)>,
client_pool: Arc<ClientPool<N>>,
) {
tracing::info!("Starting peer disconnect monitor.");
let mut futs: FuturesUnordered<PeerDisconnectFut<N>> = FuturesUnordered::new();
loop {
tokio::select! {
Some((con_handle, peer_id)) = new_connection_rx.recv() => {
tracing::debug!("Monitoring {peer_id} for disconnect");
futs.push(PeerDisconnectFut {
closed_fut: con_handle.closed(),
peer_id: Some(peer_id),
});
}
Some(peer_id) = futs.next() => {
tracing::debug!("{peer_id} has disconnected, removing from client pool.");
client_pool.remove_client(&peer_id);
}
else => {
tracing::info!("Peer disconnect monitor shutting down.");
return;
}
}
}
}
/// A [`Future`] that resolves when a peer disconnects.
#[pin_project::pin_project]
struct PeerDisconnectFut<N: NetworkZone> {
/// The inner [`Future`] that resolves when a peer disconnects.
#[pin]
closed_fut: WaitForCancellationFutureOwned,
/// The peers ID.
peer_id: Option<InternalPeerID<N::Addr>>,
}
impl<N: NetworkZone> Future for PeerDisconnectFut<N> {
type Output = InternalPeerID<N::Addr>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
this.closed_fut
.poll(cx)
.map(|_| this.peer_id.take().unwrap())
}
}

View file

@ -0,0 +1,41 @@
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use monero_p2p::{client::Client, NetworkZone};
use crate::client_pool::ClientPool;
/// A wrapper around [`Client`] which returns the client to the [`ClientPool`] when dropped.
pub struct ClientPoolDropGuard<N: NetworkZone> {
/// The [`ClientPool`] to return the peer to.
pub(super) pool: Arc<ClientPool<N>>,
/// The [`Client`].
///
/// This is set to [`Some`] when this guard is created, then
/// ### [`take`](Option::take)n and returned to the pool when dropped.
pub(super) client: Option<Client<N>>,
}
impl<N: NetworkZone> Deref for ClientPoolDropGuard<N> {
type Target = Client<N>;
fn deref(&self) -> &Self::Target {
self.client.as_ref().unwrap()
}
}
impl<N: NetworkZone> DerefMut for ClientPoolDropGuard<N> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.client.as_mut().unwrap()
}
}
impl<N: NetworkZone> Drop for ClientPoolDropGuard<N> {
fn drop(&mut self) {
let client = self.client.take().unwrap();
self.pool.add_client(client);
}
}

View file

@ -0,0 +1,12 @@
/// P2P config.
#[derive(Clone, Debug)]
pub struct P2PConfig {
/// The number of outbound connections to make and try keep.
pub outbound_connections: usize,
/// The amount of extra connections we can make if we are under load from the rest of Cuprate.
pub extra_outbound_connections: usize,
/// The percent of outbound peers that should be gray aka never connected to before.
///
/// Only values 0..=1 are valid.
pub gray_peers_percent: f64,
}

View file

@ -0,0 +1,291 @@
//! Outbound Connection Maintainer.
//!
//! This module handles maintaining the number of outbound connections defined in the [`P2PConfig`].
//! It also handles making extra connections when the peer set is under load or when we need data that
//! no connected peer has.
use std::sync::Arc;
use rand::{distributions::Bernoulli, prelude::*};
use tokio::{
sync::{mpsc, OwnedSemaphorePermit, Semaphore},
task::JoinSet,
time::{sleep, timeout},
};
use tower::{Service, ServiceExt};
use tracing::instrument;
use monero_p2p::{
client::{Client, ConnectRequest, HandshakeError},
services::{AddressBookRequest, AddressBookResponse},
AddressBook, NetworkZone,
};
use crate::{
client_pool::ClientPool,
config::P2PConfig,
constants::{HANDSHAKE_TIMEOUT, MAX_SEED_CONNECTIONS, OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT},
};
enum OutboundConnectorError {
MaxConnections,
FailedToConnectToSeeds,
NoAvailablePeers,
}
/// A request from the peer set to make an outbound connection.
///
/// This will only be sent when the peer set is under load from the rest of Cuprate or the peer
/// set needs specific data that none of the currently connected peers have.
pub struct MakeConnectionRequest {
/// The block needed that no connected peers have due to pruning.
block_needed: Option<u64>,
}
/// The outbound connection count keeper.
///
/// This handles maintaining a minimum number of connections and making extra connections when needed, upto a maximum.
pub struct OutboundConnectionKeeper<N: NetworkZone, A, C> {
/// The pool of currently connected peers.
pub client_pool: Arc<ClientPool<N>>,
/// The channel that tells us to make new _extra_ outbound connections.
pub make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
/// The address book service
pub address_book_svc: A,
/// The service to connect to a specific peer.
pub connector_svc: C,
/// A semaphore to keep the amount of outbound peers constant.
pub outbound_semaphore: Arc<Semaphore>,
/// The amount of peers we connected to because we needed more peers. If the `outbound_semaphore`
/// is full, and we need to connect to more peers for blocks or because not enough peers are ready
/// we add a permit to the semaphore and keep track here, upto a value in config.
pub extra_peers: usize,
/// The p2p config.
pub config: P2PConfig,
/// The [`Bernoulli`] distribution, when sampled will return true if we should connect to a gray peer or
/// false if we should connect to a white peer.
///
/// This is weighted to the percentage given in `config`.
pub peer_type_gen: Bernoulli,
}
impl<N, A, C> OutboundConnectionKeeper<N, A, C>
where
N: NetworkZone,
A: AddressBook<N>,
C: Service<ConnectRequest<N>, Response = Client<N>, Error = HandshakeError>,
C::Future: Send + 'static,
{
pub fn new(
config: P2PConfig,
client_pool: Arc<ClientPool<N>>,
make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
address_book_svc: A,
connector_svc: C,
) -> Self {
let peer_type_gen = Bernoulli::new(config.gray_peers_percent)
.expect("Gray peer percent is incorrect should be 0..=1");
Self {
client_pool,
make_connection_rx,
address_book_svc,
connector_svc,
outbound_semaphore: Arc::new(Semaphore::new(config.outbound_connections)),
extra_peers: 0,
config,
peer_type_gen,
}
}
/// Connects to random seeds to get peers and immediately disconnects
#[instrument(level = "info", skip(self))]
async fn connect_to_random_seeds(&mut self) -> Result<(), OutboundConnectorError> {
let seeds = N::SEEDS.choose_multiple(&mut thread_rng(), MAX_SEED_CONNECTIONS);
if seeds.len() == 0 {
panic!("No seed nodes available to get peers from");
}
// This isn't really needed here to limit connections as the seed nodes will be dropped when we have got
// peers from them.
let semaphore = Arc::new(Semaphore::new(seeds.len()));
let mut allowed_errors = seeds.len();
let mut handshake_futs = JoinSet::new();
for seed in seeds {
tracing::info!("Getting peers from seed node: {}", seed);
let fut = timeout(
HANDSHAKE_TIMEOUT,
self.connector_svc
.ready()
.await
.expect("Connector had an error in `poll_ready`")
.call(ConnectRequest {
addr: *seed,
permit: semaphore
.clone()
.try_acquire_owned()
.expect("This must have enough permits as we just set the amount."),
}),
);
// Spawn the handshake on a separate task with a timeout, so we don't get stuck connecting to a peer.
handshake_futs.spawn(fut);
}
while let Some(res) = handshake_futs.join_next().await {
if matches!(res, Err(_) | Ok(Err(_)) | Ok(Ok(Err(_)))) {
allowed_errors -= 1;
}
}
if allowed_errors == 0 {
Err(OutboundConnectorError::FailedToConnectToSeeds)
} else {
Ok(())
}
}
/// Connects to a given outbound peer.
#[instrument(level = "info", skip(self, permit), fields(%addr))]
async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) {
let client_pool = self.client_pool.clone();
let connection_fut = self
.connector_svc
.ready()
.await
.expect("Connector had an error in `poll_ready`")
.call(ConnectRequest { addr, permit });
tokio::spawn(async move {
if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await {
client_pool.add_new_client(peer);
}
});
}
/// Handles a request from the peer set for more peers.
async fn handle_peer_request(
&mut self,
req: &MakeConnectionRequest,
) -> Result<(), OutboundConnectorError> {
// try to get a permit.
let permit = self
.outbound_semaphore
.clone()
.try_acquire_owned()
.or_else(|_| {
// if we can't get a permit add one if we are below the max number of connections.
if self.extra_peers >= self.config.extra_outbound_connections {
// If we can't add a permit return an error.
Err(OutboundConnectorError::MaxConnections)
} else {
self.outbound_semaphore.add_permits(1);
self.extra_peers += 1;
Ok(self.outbound_semaphore.clone().try_acquire_owned().unwrap())
}
})?;
// try to get a random peer on any network zone from the address book.
let peer = self
.address_book_svc
.ready()
.await
.expect("Error in address book!")
.call(AddressBookRequest::TakeRandomPeer {
height: req.block_needed,
})
.await;
match peer {
Err(_) => {
// TODO: We should probably send peer requests to our connected peers rather than go to seeds.
tracing::warn!("No peers in address book which are available and have the data we need. Getting peers from seed nodes.");
self.connect_to_random_seeds().await?;
Err(OutboundConnectorError::NoAvailablePeers)
}
Ok(AddressBookResponse::Peer(peer)) => {
self.connect_to_outbound_peer(permit, peer.adr).await;
Ok(())
}
Ok(_) => panic!("peer list sent incorrect response!"),
}
}
/// Handles a free permit, by either connecting to a new peer or by removing a permit if we are above the
/// minimum number of outbound connections.
#[instrument(level = "debug", skip(self, permit))]
async fn handle_free_permit(
&mut self,
permit: OwnedSemaphorePermit,
) -> Result<(), OutboundConnectorError> {
if self.extra_peers > 0 {
tracing::debug!(
"Permit available but we are over the minimum number of peers, forgetting permit."
);
permit.forget();
self.extra_peers -= 1;
return Ok(());
}
tracing::debug!("Permit available, making outbound connection.");
let req = if self.peer_type_gen.sample(&mut thread_rng()) {
AddressBookRequest::TakeRandomGrayPeer { height: None }
} else {
// This will try white peers first then gray.
AddressBookRequest::TakeRandomPeer { height: None }
};
let Ok(AddressBookResponse::Peer(peer)) = self
.address_book_svc
.ready()
.await
.expect("Error in address book!")
.call(req)
.await
else {
tracing::warn!("No peers in peer list to make connection to.");
self.connect_to_random_seeds().await?;
return Err(OutboundConnectorError::NoAvailablePeers);
};
self.connect_to_outbound_peer(permit, peer.adr).await;
Ok(())
}
/// Runs the outbound connection count keeper.
pub async fn run(mut self) {
tracing::info!(
"Starting outbound connection maintainer, target outbound connections: {}",
self.config.outbound_connections
);
loop {
tokio::select! {
biased;
peer_req = self.make_connection_rx.recv() => {
let Some(peer_req) = peer_req else {
tracing::info!("Shutting down outbound connector, make connection channel closed.");
return;
};
// We can't really do much about errors in this function.
let _ = self.handle_peer_request(&peer_req).await;
},
// This future is not cancellation safe as you will lose your space in the queue but as we are the only place
// that actually requires permits that should be ok.
Ok(permit) = self.outbound_semaphore.clone().acquire_owned() => {
if self.handle_free_permit(permit).await.is_err() {
// if we got an error then we still have a permit free so to prevent this from just looping
// uncontrollably add a timeout.
sleep(OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT).await;
}
}
}
}
}
}

View file

@ -0,0 +1,10 @@
use std::time::Duration;
/// The timeout we set on handshakes.
pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(30);
/// The maximum amount of connections to make to seed nodes for when we need peers.
pub(crate) const MAX_SEED_CONNECTIONS: usize = 3;
/// The timeout for when we fail to find a peer to connect to.
pub(crate) const OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_secs(5);

View file

@ -0,0 +1,15 @@
//! Cuprate's P2P Crate.
//!
//! This crate contains a [`ClientPool`](client_pool::ClientPool) which holds connected peers on a single [`NetworkZone`](monero_p2p::NetworkZone).
//!
//! This crate also contains the different routing methods that control how messages should be sent, i.e. broadcast to all,
//! or send to a single peer.
//!
#![allow(dead_code)]
pub mod client_pool;
pub mod config;
pub mod connection_maintainer;
mod constants;
pub use config::P2PConfig;

27
p2p/dandelion/Cargo.toml Normal file
View file

@ -0,0 +1,27 @@
[package]
name = "dandelion_tower"
version = "0.1.0"
edition = "2021"
license = "MIT"
authors = ["Boog900"]
[features]
default = ["txpool"]
txpool = ["dep:rand_distr", "dep:tokio-util", "dep:tokio"]
[dependencies]
tower = { workspace = true, features = ["discover", "util"] }
tracing = { workspace = true, features = ["std"] }
futures = { workspace = true, features = ["std"] }
tokio = { workspace = true, features = ["rt", "sync", "macros"], optional = true}
tokio-util = { workspace = true, features = ["time"], optional = true }
rand = { workspace = true, features = ["std", "std_rng"] }
rand_distr = { workspace = true, features = ["std"], optional = true }
thiserror = { workspace = true }
[dev-dependencies]
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync"] }
proptest = { workspace = true, features = ["default"] }

149
p2p/dandelion/src/config.rs Normal file
View file

@ -0,0 +1,149 @@
use std::{
ops::{Mul, Neg},
time::Duration,
};
/// When calculating the embargo timeout using the formula: `(-k*(k-1)*hop)/(2*log(1-ep))`
///
/// (1 - ep) is the probability that a transaction travels for `k` hops before a nodes embargo timeout fires, this constant is (1 - ep).
const EMBARGO_FULL_TRAVEL_PROBABILITY: f64 = 0.90;
/// The graph type to use for dandelion routing, the dandelion paper recommends [Graph::FourRegular].
///
/// The decision between line graphs and 4-regular graphs depend on the priorities of the system, if
/// linkability of transactions is a first order concern then line graphs may be better, however 4-regular graphs
/// can give constant-order privacy benefits against adversaries with knowledge of the graph.
///
/// See appendix C of the dandelion++ paper.
#[derive(Default, Debug, Copy, Clone)]
pub enum Graph {
/// Line graph.
///
/// When this is selected one peer will be chosen from the outbound peers each epoch to route transactions
/// to.
///
/// In general this is not recommend over [`Graph::FourRegular`] but may be better for certain systems.
Line,
/// Quasi-4-Regular.
///
/// When this is selected two peers will be chosen from the outbound peers each epoch, each stem transaction
/// received will then be sent to one of these two peers. Transactions from the same node will always go to the
/// same peer.
#[default]
FourRegular,
}
/// The config used to initialize dandelion.
///
/// One notable missing item from the config is `Tbase` AKA the timeout parameter to prevent black hole
/// attacks. This is removed from the config for simplicity, `Tbase` is calculated using the formula provided
/// in the D++ paper:
///
/// `(-k*(k-1)*hop)/(2*log(1-ep))`
///
/// Where `k` is calculated from the fluff probability, `hop` is `time_between_hop` and `ep` is fixed at `0.1`.
///
#[derive(Debug, Clone, Copy)]
pub struct DandelionConfig {
/// The time it takes for a stem transaction to pass through a node, including network latency.
///
/// It's better to be safe and put a slightly higher value than lower.
pub time_between_hop: Duration,
/// The duration of an epoch.
pub epoch_duration: Duration,
/// `q` in the dandelion paper, this is the probability that a node will be in the fluff state for
/// a certain epoch.
///
/// The dandelion paper recommends to make this value small, but the smaller this value, the higher
/// the broadcast latency.
///
/// It is recommended for this value to be <= `0.2`, this value *MUST* be in range `0.0..=1.0`.
pub fluff_probability: f64,
/// The graph type.
pub graph: Graph,
}
impl DandelionConfig {
/// Returns the number of outbound peers to use to stem transactions.
///
/// This value depends on the [`Graph`] chosen.
pub fn number_of_stems(&self) -> usize {
match self.graph {
Graph::Line => 1,
Graph::FourRegular => 2,
}
}
/// Returns the average embargo timeout, `Tbase` in the dandelion++ paper.
///
/// This is the average embargo timeout _only including this node_ with `k` nodes also putting an embargo timeout
/// using the exponential distribution, the average until one of them fluffs is `Tbase / k`.
pub fn average_embargo_timeout(&self) -> Duration {
// we set k equal to the expected stem length with this fluff probability.
let k = self.expected_stem_length();
let time_between_hop = self.time_between_hop.as_secs_f64();
Duration::from_secs_f64(
// (-k*(k-1)*hop)/(2*ln(1-ep))
((k.neg() * (k - 1.0) * time_between_hop)
/ EMBARGO_FULL_TRAVEL_PROBABILITY.ln().mul(2.0))
.ceil(),
)
}
/// Returns the expected length of a stem.
pub fn expected_stem_length(&self) -> f64 {
self.fluff_probability.recip()
}
}
#[cfg(test)]
mod tests {
use std::{
f64::consts::E,
ops::{Mul, Neg},
time::Duration,
};
use proptest::{prop_assert, proptest};
use super::*;
#[test]
fn monerod_average_embargo_timeout() {
let cfg = DandelionConfig {
time_between_hop: Duration::from_millis(175),
epoch_duration: Default::default(),
fluff_probability: 0.125,
graph: Default::default(),
};
assert_eq!(cfg.average_embargo_timeout(), Duration::from_secs(47));
}
proptest! {
#[test]
fn embargo_full_travel_probablity_correct(time_between_hop in 1_u64..1_000_000, fluff_probability in 0.000001..1.0) {
let cfg = DandelionConfig {
time_between_hop: Duration::from_millis(time_between_hop),
epoch_duration: Default::default(),
fluff_probability,
graph: Default::default(),
};
// assert that the `average_embargo_timeout` is high enough that the probability of `k` nodes
// not diffusing before expected diffusion is greater than or equal to `EMBARGO_FULL_TRAVEL_PROBABLY`
//
// using the formula from in appendix B.5
let k = cfg.expected_stem_length();
let time_between_hop = cfg.time_between_hop.as_secs_f64();
let average_embargo_timeout = cfg.average_embargo_timeout().as_secs_f64();
let probability =
E.powf((k.neg() * (k - 1.0) * time_between_hop) / average_embargo_timeout.mul(2.0));
prop_assert!(probability >= EMBARGO_FULL_TRAVEL_PROBABILITY, "probability = {probability}, average_embargo_timeout = {average_embargo_timeout}");
}
}
}

70
p2p/dandelion/src/lib.rs Normal file
View file

@ -0,0 +1,70 @@
//! # Dandelion Tower
//!
//! This crate implements [dandelion++](https://arxiv.org/pdf/1805.11060.pdf), using [`tower`].
//!
//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPool`](pool::DandelionPool).
//! The router is pretty minimal and only handles the absolute necessary data to route transactions, whereas the
//! pool keeps track of all data necessary for dandelion++ but requires you to provide a backing tx-pool.
//!
//! This split was done not because the [`DandelionPool`](pool::DandelionPool) is unnecessary but because it is hard
//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPool`](pool::DandelionPool)
//! requires you to implement part of the paper yourself.
//!
//! # Features
//!
//! This crate only has one feature `txpool` which enables [`DandelionPool`](pool::DandelionPool).
//!
//! # Needed Services
//!
//! To use this crate you need to provide a few types.
//!
//! ## Diffuse Service
//!
//! This service should implement diffusion, which is sending the transaction to every peer, with each peer
//! having a timer using the exponential distribution and batch sending all txs that were queued in that time.
//!
//! The diffuse service should have a request of [`DiffuseRequest`](traits::DiffuseRequest) and it's error
//! should be [`tower::BoxError`].
//!
//! ## Outbound Peer Discoverer
//!
//! The outbound peer [`Discover`](tower::discover::Discover) should provide a stream of randomly selected outbound
//! peers, these peers will then be used to route stem txs to.
//!
//! The peers will not be returned anywhere, so it is recommended to wrap them in some sort of drop guard that returns
//! them back to a peer set.
//!
//! ## Peer Service
//!
//! This service represents a connection to an individual peer, this should be returned from the Outbound Peer
//! Discover. This should immediately send the transaction to the peer when requested, i.e. it should _not_ set
//! a timer.
//!
//! The diffuse service should have a request of [`StemRequest`](traits::StemRequest) and it's error
//! should be [`tower::BoxError`].
//!
//! ## Backing Pool
//!
//! ([`DandelionPool`](pool::DandelionPool) only)
//!
//! This service is a backing tx-pool, in memory or on disk.
//! The backing pool should have a request of [`TxStoreRequest`](traits::TxStoreRequest) and a response of
//! [`TxStoreResponse`](traits::TxStoreResponse), with an error of [`tower::BoxError`].
//!
//! Users should keep a handle to the backing pool to request data from it, when requesting data you _must_
//! make sure you only look in the public pool if you are going to be giving data to peers, as stem transactions
//! must stay private.
//!
//! When removing data, for example because of a new block, you can remove from both pools provided it doesn't leak
//! any data about stem transactions. You will probably want to set up a task that monitors the tx pool for stuck transactions,
//! transactions that slipped in just as one was removed etc, this crate does not handle that.
mod config;
#[cfg(feature = "txpool")]
pub mod pool;
mod router;
#[cfg(test)]
mod tests;
pub mod traits;
pub use config::*;
pub use router::*;

510
p2p/dandelion/src/pool.rs Normal file
View file

@ -0,0 +1,510 @@
//! # Dandelion++ Pool
//!
//! This module contains [`DandelionPool`] which is a thin wrapper around a backing transaction store,
//! which fully implements the dandelion++ protocol.
//!
//! ### How To Get Txs From [`DandelionPool`].
//!
//! [`DandelionPool`] does not provide a full tx-pool API. You cannot retrieve transactions from it or
//! check what transactions are in it, to do this you must keep a handle to the backing transaction store
//! yourself.
//!
//! The reason for this is, the [`DandelionPool`] will only itself be passing these requests onto the backing
//! pool, so it makes sense to remove the "middle man".
//!
//! ### Keep Stem Transactions Hidden
//!
//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden.
//! So handle any requests to the tx-pool like the stem side of the pool does not exist.
//!
use std::{
collections::{HashMap, HashSet},
future::Future,
hash::Hash,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use futures::{FutureExt, StreamExt};
use rand::prelude::*;
use rand_distr::Exp;
use tokio::{
sync::{mpsc, oneshot},
task::JoinSet,
};
use tokio_util::{sync::PollSender, time::DelayQueue};
use tower::{Service, ServiceExt};
use tracing::Instrument;
use crate::{
traits::{TxStoreRequest, TxStoreResponse},
DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState,
};
/// Start the [`DandelionPool`].
///
/// This function spawns the [`DandelionPool`] and returns [`DandelionPoolService`] which can be used to send
/// requests to the pool.
///
/// ### Args
///
/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPool`].
/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow
/// user to customise routing functionality.
/// - `backing_pool` is the backing transaction storage service
/// - `config` is [`DandelionConfig`].
pub fn start_dandelion_pool<P, R, Tx, TxID, PID>(
buffer_size: usize,
dandelion_router: R,
backing_pool: P,
config: DandelionConfig,
) -> DandelionPoolService<Tx, TxID, PID>
where
Tx: Clone + Send + 'static,
TxID: Hash + Eq + Clone + Send + 'static,
PID: Hash + Eq + Clone + Send + 'static,
P: Service<
TxStoreRequest<Tx, TxID>,
Response = TxStoreResponse<Tx, TxID>,
Error = tower::BoxError,
> + Send
+ 'static,
P::Future: Send + 'static,
R: Service<DandelionRouteReq<Tx, PID>, Response = State, Error = DandelionRouterError>
+ Send
+ 'static,
R::Future: Send + 'static,
{
let (tx, rx) = mpsc::channel(buffer_size);
let pool = DandelionPool {
dandelion_router,
backing_pool,
routing_set: JoinSet::new(),
stem_origins: HashMap::new(),
embargo_timers: DelayQueue::new(),
embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(),
config,
_tx: PhantomData,
};
let span = tracing::debug_span!("dandelion_pool");
tokio::spawn(pool.run(rx).instrument(span));
DandelionPoolService {
tx: PollSender::new(tx),
}
}
#[derive(Copy, Clone, Debug, thiserror::Error)]
#[error("The dandelion pool was shutdown")]
pub struct DandelionPoolShutDown;
/// An incoming transaction for the [`DandelionPool`] to handle.
///
/// Users may notice there is no way to check if the dandelion-pool wants a tx according to an inventory message like seen
/// in Bitcoin, only having a request for a full tx. Users should look in the *public* backing pool to handle inv messages,
/// and request txs even if they are in the stem pool.
pub struct IncomingTx<Tx, TxID, PID> {
/// The transaction.
///
/// It is recommended to put this in an [`Arc`](std::sync::Arc) as it needs to be cloned to send to the backing
/// tx pool and [`DandelionRouter`](crate::DandelionRouter)
pub tx: Tx,
/// The transaction ID.
pub tx_id: TxID,
/// The routing state of this transaction.
pub tx_state: TxState<PID>,
}
/// The dandelion tx pool service.
#[derive(Clone)]
pub struct DandelionPoolService<Tx, TxID, PID> {
/// The channel to [`DandelionPool`].
tx: PollSender<(IncomingTx<Tx, TxID, PID>, oneshot::Sender<()>)>,
}
impl<Tx, TxID, PID> Service<IncomingTx<Tx, TxID, PID>> for DandelionPoolService<Tx, TxID, PID>
where
Tx: Clone + Send,
TxID: Hash + Eq + Clone + Send + 'static,
PID: Hash + Eq + Clone + Send + 'static,
{
type Response = ();
type Error = DandelionPoolShutDown;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown)
}
fn call(&mut self, req: IncomingTx<Tx, TxID, PID>) -> Self::Future {
// although the channel isn't sending anything we want to wait for the request to be handled before continuing.
let (tx, rx) = oneshot::channel();
let res = self
.tx
.send_item((req, tx))
.map_err(|_| DandelionPoolShutDown);
async move {
res?;
rx.await.expect("Oneshot dropped before response!");
Ok(())
}
.boxed()
}
}
/// The dandelion++ tx pool.
///
/// See the [module docs](self) for more.
pub struct DandelionPool<P, R, Tx, TxID, PID> {
/// The dandelion++ router
dandelion_router: R,
/// The backing tx storage.
backing_pool: P,
/// The set of tasks that are running the future returned from `dandelion_router`.
routing_set: JoinSet<(TxID, Result<State, TxState<PID>>)>,
/// The origin of stem transactions.
stem_origins: HashMap<TxID, HashSet<PID>>,
/// Current stem pool embargo timers.
embargo_timers: DelayQueue<TxID>,
/// The distrobution to sample to get embargo timers.
embargo_dist: Exp<f64>,
/// The d++ config.
config: DandelionConfig,
_tx: PhantomData<Tx>,
}
impl<P, R, Tx, TxID, PID> DandelionPool<P, R, Tx, TxID, PID>
where
Tx: Clone + Send,
TxID: Hash + Eq + Clone + Send + 'static,
PID: Hash + Eq + Clone + Send + 'static,
P: Service<
TxStoreRequest<Tx, TxID>,
Response = TxStoreResponse<Tx, TxID>,
Error = tower::BoxError,
>,
P::Future: Send + 'static,
R: Service<DandelionRouteReq<Tx, PID>, Response = State, Error = DandelionRouterError>,
R::Future: Send + 'static,
{
/// Stores the tx in the backing pools stem pool, setting the embargo timer, stem origin and steming the tx.
async fn store_tx_and_stem(
&mut self,
tx: Tx,
tx_id: TxID,
from: Option<PID>,
) -> Result<(), tower::BoxError> {
self.backing_pool
.ready()
.await?
.call(TxStoreRequest::Store(
tx.clone(),
tx_id.clone(),
State::Stem,
))
.await?;
let embargo_timer = self.embargo_dist.sample(&mut thread_rng());
tracing::debug!(
"Setting embargo timer for stem tx: {} seconds.",
embargo_timer
);
self.embargo_timers
.insert(tx_id.clone(), Duration::from_secs_f64(embargo_timer));
self.stem_tx(tx, tx_id, from).await
}
/// Stems the tx, setting the stem origin, if it wasn't already set.
///
/// This function does not add the tx to the backing pool.
async fn stem_tx(
&mut self,
tx: Tx,
tx_id: TxID,
from: Option<PID>,
) -> Result<(), tower::BoxError> {
if let Some(peer) = &from {
self.stem_origins
.entry(tx_id.clone())
.or_default()
.insert(peer.clone());
}
let state = from
.map(|from| TxState::Stem { from })
.unwrap_or(TxState::Local);
let fut = self
.dandelion_router
.ready()
.await?
.call(DandelionRouteReq {
tx,
state: state.clone(),
});
self.routing_set
.spawn(fut.map(|res| (tx_id, res.map_err(|_| state))));
Ok(())
}
/// Stores the tx in the backing pool and fluffs the tx, removing the stem data for this tx.
async fn store_and_fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> {
// fluffs the tx first to prevent timing attacks where we could fluff at different average times
// depending on if the tx was in the stem pool already or not.
// Massively overkill but this is a minimal change.
self.fluff_tx(tx.clone(), tx_id.clone()).await?;
// Remove the tx from the maps used during the stem phase.
self.stem_origins.remove(&tx_id);
self.backing_pool
.ready()
.await?
.call(TxStoreRequest::Store(tx, tx_id, State::Fluff))
.await?;
// The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
// map. These timers should be relatively short, so it shouldn't be a problem.
//self.embargo_timers.try_remove(&tx_id);
Ok(())
}
/// Fluffs a tx, does not add the tx to the tx pool.
async fn fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> {
let fut = self
.dandelion_router
.ready()
.await?
.call(DandelionRouteReq {
tx,
state: TxState::Fluff,
});
self.routing_set
.spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff))));
Ok(())
}
/// Function to handle an incoming [`DandelionPoolRequest::IncomingTx`].
async fn handle_incoming_tx(
&mut self,
tx: Tx,
tx_state: TxState<PID>,
tx_id: TxID,
) -> Result<(), tower::BoxError> {
let TxStoreResponse::Contains(have_tx) = self
.backing_pool
.ready()
.await?
.call(TxStoreRequest::Contains(tx_id.clone()))
.await?
else {
panic!("Backing tx pool responded with wrong response for request.");
};
// If we have already fluffed this tx then we don't need to do anything.
if have_tx == Some(State::Fluff) {
tracing::debug!("Already fluffed incoming tx, ignoring.");
return Ok(());
}
match tx_state {
TxState::Stem { from } => {
if self
.stem_origins
.get(&tx_id)
.is_some_and(|peers| peers.contains(&from))
{
tracing::debug!("Received stem tx twice from same peer, fluffing it");
// The same peer sent us a tx twice, fluff it.
self.promote_and_fluff_tx(tx_id).await
} else {
// This could be a new tx or it could have already been stemed, but we still stem it again
// unless the same peer sends us a tx twice.
tracing::debug!("Steming incoming tx");
self.store_tx_and_stem(tx, tx_id, Some(from)).await
}
}
TxState::Fluff => {
tracing::debug!("Fluffing incoming tx");
self.store_and_fluff_tx(tx, tx_id).await
}
TxState::Local => {
// If we have already stemed this tx then nothing to do.
if have_tx.is_some() {
tracing::debug!("Received a local tx that we already have, skipping");
return Ok(());
}
tracing::debug!("Steming local transaction");
self.store_tx_and_stem(tx, tx_id, None).await
}
}
}
/// Promotes a tx to the clear pool.
async fn promote_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> {
// Remove the tx from the maps used during the stem phase.
self.stem_origins.remove(&tx_id);
// The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
// map. These timers should be relatively short, so it shouldn't be a problem.
//self.embargo_timers.try_remove(&tx_id);
self.backing_pool
.ready()
.await?
.call(TxStoreRequest::Promote(tx_id))
.await?;
Ok(())
}
/// Promotes a tx to the public fluff pool and fluffs the tx.
async fn promote_and_fluff_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> {
tracing::debug!("Promoting transaction to public pool and fluffing it.");
let TxStoreResponse::Transaction(tx) = self
.backing_pool
.ready()
.await?
.call(TxStoreRequest::Get(tx_id.clone()))
.await?
else {
panic!("Backing tx pool responded with wrong response for request.");
};
let Some((tx, state)) = tx else {
tracing::debug!("Could not find tx, skipping.");
return Ok(());
};
if state == State::Fluff {
tracing::debug!("Transaction already fluffed, skipping.");
return Ok(());
}
self.promote_tx(tx_id.clone()).await?;
self.fluff_tx(tx, tx_id).await
}
/// Returns a tx stored in the fluff _OR_ stem pool.
async fn get_tx_from_pool(&mut self, tx_id: TxID) -> Result<Option<Tx>, tower::BoxError> {
let TxStoreResponse::Transaction(tx) = self
.backing_pool
.ready()
.await?
.call(TxStoreRequest::Get(tx_id))
.await?
else {
panic!("Backing tx pool responded with wrong response for request.");
};
Ok(tx.map(|tx| tx.0))
}
/// Starts the [`DandelionPool`].
async fn run(
mut self,
mut rx: mpsc::Receiver<(IncomingTx<Tx, TxID, PID>, oneshot::Sender<()>)>,
) {
tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config);
// On start up we just fluff all txs left in the stem pool.
let Ok(TxStoreResponse::IDs(ids)) = (&mut self.backing_pool)
.oneshot(TxStoreRequest::IDsInStemPool)
.await
else {
tracing::error!("Failed to get transactions in stem pool.");
return;
};
tracing::debug!(
"Fluffing {} txs that are currently in the stem pool",
ids.len()
);
for id in ids {
if let Err(e) = self.promote_and_fluff_tx(id).await {
tracing::error!("Failed to fluff tx in the stem pool at start up, {e}.");
return;
}
}
loop {
tracing::trace!("Waiting for next event.");
tokio::select! {
// biased to handle current txs before routing new ones.
biased;
Some(fired) = self.embargo_timers.next() => {
tracing::debug!("Embargo timer fired, did not see stem tx in time.");
let tx_id = fired.into_inner();
if let Err(e) = self.promote_and_fluff_tx(tx_id).await {
tracing::error!("Error handling fired embargo timer: {e}");
return;
}
}
Some(Ok((tx_id, res))) = self.routing_set.join_next() => {
tracing::trace!("Received d++ routing result.");
let res = match res {
Ok(State::Fluff) => {
tracing::debug!("Transaction was fluffed upgrading it to the public pool.");
self.promote_tx(tx_id).await
}
Err(tx_state) => {
tracing::debug!("Error routing transaction, trying again.");
match self.get_tx_from_pool(tx_id.clone()).await {
Ok(Some(tx)) => match tx_state {
TxState::Fluff => self.fluff_tx(tx, tx_id).await,
TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await,
TxState::Local => self.stem_tx(tx, tx_id, None).await,
}
Err(e) => Err(e),
_ => continue,
}
}
Ok(State::Stem) => continue,
};
if let Err(e) = res {
tracing::error!("Error handling transaction routing return: {e}");
return;
}
}
req = rx.recv() => {
tracing::debug!("Received new tx to route.");
let Some((IncomingTx { tx, tx_state, tx_id }, res_tx)) = req else {
return;
};
if let Err(e) = self.handle_incoming_tx(tx, tx_state, tx_id).await {
let _ = res_tx.send(());
tracing::error!("Error handling transaction in dandelion pool: {e}");
return;
}
let _ = res_tx.send(());
}
}
}
}
}

348
p2p/dandelion/src/router.rs Normal file
View file

@ -0,0 +1,348 @@
//! # Dandelion++ Router
//!
//! This module contains [`DandelionRouter`] which is a [`Service`]. It that handles keeping the
//! current dandelion++ [`State`] and deciding where to send transactions based on their [`TxState`].
//!
//! ### What The Router Does Not Do
//!
//! It does not handle anything to do with keeping transactions long term, i.e. embargo timers and handling
//! loops in the stem. It is up to implementers to do this if they decide not top use [`DandelionPool`](crate::pool::DandelionPool)
//!
use std::{
collections::HashMap,
future::Future,
hash::Hash,
marker::PhantomData,
pin::Pin,
task::{ready, Context, Poll},
time::Instant,
};
use futures::TryFutureExt;
use rand::{distributions::Bernoulli, prelude::*, thread_rng};
use tower::{
discover::{Change, Discover},
Service,
};
use crate::{
traits::{DiffuseRequest, StemRequest},
DandelionConfig,
};
/// An error returned from the [`DandelionRouter`]
#[derive(thiserror::Error, Debug)]
pub enum DandelionRouterError {
/// This error is probably recoverable so the request should be retried.
#[error("Peer chosen to route stem txs to had an err: {0}.")]
PeerError(tower::BoxError),
/// The broadcast service returned an error.
#[error("Broadcast service returned an err: {0}.")]
BroadcastError(tower::BoxError),
/// The outbound peer discoverer returned an error, this is critical.
#[error("The outbound peer discoverer returned an err: {0}.")]
OutboundPeerDiscoverError(tower::BoxError),
/// The outbound peer discoverer returned [`None`].
#[error("The outbound peer discoverer exited.")]
OutboundPeerDiscoverExited,
}
/// The dandelion++ state.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum State {
/// Fluff state, in this state we are diffusing stem transactions to all peers.
Fluff,
/// Stem state, in this state we are stemming stem transactions to a single outbound peer.
Stem,
}
/// The routing state of a transaction.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum TxState<ID> {
/// Fluff state.
Fluff,
/// Stem state.
Stem {
/// The peer who sent us this transaction's ID.
from: ID,
},
/// Local - the transaction originated from our node.
Local,
}
/// A request to route a transaction.
pub struct DandelionRouteReq<Tx, ID> {
/// The transaction.
pub tx: Tx,
/// The transaction state.
pub state: TxState<ID>,
}
/// The dandelion router service.
pub struct DandelionRouter<P, B, ID, S, Tx> {
// pub(crate) is for tests
/// A [`Discover`] where we can get outbound peers from.
outbound_peer_discover: Pin<Box<P>>,
/// A [`Service`] which handle broadcasting (diffusing) transactions.
broadcast_svc: B,
/// The current state.
current_state: State,
/// The time at which this epoch started.
epoch_start: Instant,
/// The stem our local transactions will be sent to.
local_route: Option<ID>,
/// A [`HashMap`] linking peer's IDs to IDs in `stem_peers`.
stem_routes: HashMap<ID, ID>,
/// Peers we are using for stemming.
///
/// This will contain peers, even in [`State::Fluff`] to allow us to stem [`TxState::Local`]
/// transactions.
pub(crate) stem_peers: HashMap<ID, S>,
/// The distribution to sample to get the [`State`], true is [`State::Fluff`].
state_dist: Bernoulli,
/// The config.
config: DandelionConfig,
/// The routers tracing span.
span: tracing::Span,
_tx: PhantomData<Tx>,
}
impl<Tx, ID, P, B, S> DandelionRouter<P, B, ID, S, Tx>
where
ID: Hash + Eq + Clone,
P: Discover<Key = ID, Service = S, Error = tower::BoxError>,
B: Service<DiffuseRequest<Tx>, Error = tower::BoxError>,
S: Service<StemRequest<Tx>, Error = tower::BoxError>,
{
/// Creates a new [`DandelionRouter`], with the provided services and config.
///
/// # Panics
/// This function panics if [`DandelionConfig::fluff_probability`] is not `0.0..=1.0`.
pub fn new(broadcast_svc: B, outbound_peer_discover: P, config: DandelionConfig) -> Self {
// get the current state
let state_dist = Bernoulli::new(config.fluff_probability)
.expect("Fluff probability was not between 0 and 1");
let current_state = if state_dist.sample(&mut thread_rng()) {
State::Fluff
} else {
State::Stem
};
DandelionRouter {
outbound_peer_discover: Box::pin(outbound_peer_discover),
broadcast_svc,
current_state,
epoch_start: Instant::now(),
local_route: None,
stem_routes: HashMap::new(),
stem_peers: HashMap::new(),
state_dist,
config,
span: tracing::debug_span!("dandelion_router", state = ?current_state),
_tx: PhantomData,
}
}
/// This function gets the number of outbound peers from the [`Discover`] required for the selected [`Graph`](crate::Graph).
fn poll_prepare_graph(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(), DandelionRouterError>> {
let peers_needed = match self.current_state {
State::Stem => self.config.number_of_stems(),
// When in the fluff state we only need one peer, the one for our txs.
State::Fluff => 1,
};
while self.stem_peers.len() < peers_needed {
match ready!(self
.outbound_peer_discover
.as_mut()
.poll_discover(cx)
.map_err(DandelionRouterError::OutboundPeerDiscoverError))
.ok_or(DandelionRouterError::OutboundPeerDiscoverExited)??
{
Change::Insert(key, svc) => {
self.stem_peers.insert(key, svc);
}
Change::Remove(key) => {
self.stem_peers.remove(&key);
}
}
}
Poll::Ready(Ok(()))
}
fn fluff_tx(&mut self, tx: Tx) -> B::Future {
self.broadcast_svc.call(DiffuseRequest(tx))
}
fn stem_tx(&mut self, tx: Tx, from: ID) -> S::Future {
loop {
let stem_route = self.stem_routes.entry(from.clone()).or_insert_with(|| {
self.stem_peers
.iter()
.choose(&mut thread_rng())
.expect("No peers in `stem_peers` was poll_ready called?")
.0
.clone()
});
let Some(peer) = self.stem_peers.get_mut(stem_route) else {
self.stem_routes.remove(&from);
continue;
};
return peer.call(StemRequest(tx));
}
}
fn stem_local_tx(&mut self, tx: Tx) -> S::Future {
loop {
let stem_route = self.local_route.get_or_insert_with(|| {
self.stem_peers
.iter()
.choose(&mut thread_rng())
.expect("No peers in `stem_peers` was poll_ready called?")
.0
.clone()
});
let Some(peer) = self.stem_peers.get_mut(stem_route) else {
self.local_route.take();
continue;
};
return peer.call(StemRequest(tx));
}
}
}
/*
## Generics ##
Tx: The tx type
ID: Peer Id type - unique identifier for nodes.
P: Peer Set discover - where we can get outbound peers from
B: Broadcast service - where we send txs to get diffused.
S: The Peer service - handles routing messages to a single node.
*/
impl<Tx, ID, P, B, S> Service<DandelionRouteReq<Tx, ID>> for DandelionRouter<P, B, ID, S, Tx>
where
ID: Hash + Eq + Clone,
P: Discover<Key = ID, Service = S, Error = tower::BoxError>,
B: Service<DiffuseRequest<Tx>, Error = tower::BoxError>,
B::Future: Send + 'static,
S: Service<StemRequest<Tx>, Error = tower::BoxError>,
S::Future: Send + 'static,
{
type Response = State;
type Error = DandelionRouterError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.epoch_start.elapsed() > self.config.epoch_duration {
// clear all the stem routing data.
self.stem_peers.clear();
self.stem_routes.clear();
self.local_route.take();
self.current_state = if self.state_dist.sample(&mut thread_rng()) {
State::Fluff
} else {
State::Stem
};
self.span
.record("state", format!("{:?}", self.current_state));
tracing::debug!(parent: &self.span, "Starting new d++ epoch",);
self.epoch_start = Instant::now();
}
let mut peers_pending = false;
let span = &self.span;
self.stem_peers
.retain(|_, peer_svc| match peer_svc.poll_ready(cx) {
Poll::Ready(res) => res
.inspect_err(|e| {
tracing::debug!(
parent: span,
"Peer returned an error on `poll_ready`: {e}, removing from router.",
)
})
.is_ok(),
Poll::Pending => {
// Pending peers should be kept - they have not errored yet.
peers_pending = true;
true
}
});
if peers_pending {
return Poll::Pending;
}
// now we have removed the failed peers check if we still have enough for the graph chosen.
ready!(self.poll_prepare_graph(cx)?);
ready!(self
.broadcast_svc
.poll_ready(cx)
.map_err(DandelionRouterError::BroadcastError)?);
Poll::Ready(Ok(()))
}
fn call(&mut self, req: DandelionRouteReq<Tx, ID>) -> Self::Future {
tracing::trace!(parent: &self.span, "Handling route request.");
match req.state {
TxState::Fluff => Box::pin(
self.fluff_tx(req.tx)
.map_ok(|_| State::Fluff)
.map_err(DandelionRouterError::BroadcastError),
),
TxState::Stem { from } => match self.current_state {
State::Fluff => {
tracing::debug!(parent: &self.span, "Fluffing stem tx.");
Box::pin(
self.fluff_tx(req.tx)
.map_ok(|_| State::Fluff)
.map_err(DandelionRouterError::BroadcastError),
)
}
State::Stem => {
tracing::trace!(parent: &self.span, "Steming transaction");
Box::pin(
self.stem_tx(req.tx, from)
.map_ok(|_| State::Stem)
.map_err(DandelionRouterError::PeerError),
)
}
},
TxState::Local => {
tracing::debug!(parent: &self.span, "Steming local tx.");
Box::pin(
self.stem_local_tx(req.tx)
.map_ok(|_| State::Stem)
.map_err(DandelionRouterError::PeerError),
)
}
}
}
}

View file

@ -0,0 +1,130 @@
mod pool;
mod router;
use std::{collections::HashMap, future::Future, hash::Hash, sync::Arc};
use futures::TryStreamExt;
use tokio::sync::mpsc::{self, UnboundedReceiver};
use tower::{
discover::{Discover, ServiceList},
util::service_fn,
Service, ServiceExt,
};
use crate::{
traits::{TxStoreRequest, TxStoreResponse},
State,
};
pub fn mock_discover_svc<Req: Send + 'static>() -> (
impl Discover<
Key = usize,
Service = impl Service<
Req,
Future = impl Future<Output = Result<(), tower::BoxError>> + Send + 'static,
Error = tower::BoxError,
> + Send
+ 'static,
Error = tower::BoxError,
>,
UnboundedReceiver<(u64, Req)>,
) {
let (tx, rx) = mpsc::unbounded_channel();
let discover = ServiceList::new((0..).map(move |i| {
let tx_2 = tx.clone();
service_fn(move |req| {
tx_2.send((i, req)).unwrap();
async move { Ok::<(), tower::BoxError>(()) }
})
}))
.map_err(Into::into);
(discover, rx)
}
pub fn mock_broadcast_svc<Req: Send + 'static>() -> (
impl Service<
Req,
Future = impl Future<Output = Result<(), tower::BoxError>> + Send + 'static,
Error = tower::BoxError,
> + Send
+ 'static,
UnboundedReceiver<Req>,
) {
let (tx, rx) = mpsc::unbounded_channel();
(
service_fn(move |req| {
tx.send(req).unwrap();
async move { Ok::<(), tower::BoxError>(()) }
}),
rx,
)
}
#[allow(clippy::type_complexity)] // just test code.
pub fn mock_in_memory_backing_pool<
Tx: Clone + Send + 'static,
TxID: Clone + Hash + Eq + Send + 'static,
>() -> (
impl Service<
TxStoreRequest<Tx, TxID>,
Response = TxStoreResponse<Tx, TxID>,
Future = impl Future<Output = Result<TxStoreResponse<Tx, TxID>, tower::BoxError>>
+ Send
+ 'static,
Error = tower::BoxError,
> + Send
+ 'static,
Arc<std::sync::Mutex<HashMap<TxID, (Tx, State)>>>,
) {
let txs = Arc::new(std::sync::Mutex::new(HashMap::new()));
let txs_2 = txs.clone();
(
service_fn(move |req: TxStoreRequest<Tx, TxID>| {
let txs = txs.clone();
async move {
match req {
TxStoreRequest::Store(tx, tx_id, state) => {
txs.lock().unwrap().insert(tx_id, (tx, state));
Ok(TxStoreResponse::Ok)
}
TxStoreRequest::Get(tx_id) => {
let tx_state = txs.lock().unwrap().get(&tx_id).cloned();
Ok(TxStoreResponse::Transaction(tx_state))
}
TxStoreRequest::Contains(tx_id) => Ok(TxStoreResponse::Contains(
txs.lock().unwrap().get(&tx_id).map(|res| res.1),
)),
TxStoreRequest::IDsInStemPool => {
// horribly inefficient, but it's test code :)
let ids = txs
.lock()
.unwrap()
.iter()
.filter(|(_, (_, state))| matches!(state, State::Stem))
.map(|tx| tx.0.clone())
.collect::<Vec<_>>();
Ok(TxStoreResponse::IDs(ids))
}
TxStoreRequest::Promote(tx_id) => {
let _ = txs
.lock()
.unwrap()
.get_mut(&tx_id)
.map(|tx| tx.1 = State::Fluff);
Ok(TxStoreResponse::Ok)
}
}
}
}),
txs_2,
)
}

View file

@ -0,0 +1,42 @@
use std::time::Duration;
use crate::{
pool::{start_dandelion_pool, IncomingTx},
DandelionConfig, DandelionRouter, Graph, TxState,
};
use super::*;
#[tokio::test]
async fn basic_functionality() {
let config = DandelionConfig {
time_between_hop: Duration::from_millis(175),
epoch_duration: Duration::from_secs(0), // make every poll ready change state
fluff_probability: 0.2,
graph: Graph::FourRegular,
};
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
let router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
let (pool_svc, pool) = mock_in_memory_backing_pool();
let mut pool_svc = start_dandelion_pool(15, router, pool_svc, config);
pool_svc
.ready()
.await
.unwrap()
.call(IncomingTx {
tx: 0_usize,
tx_id: 1_usize,
tx_state: TxState::Fluff,
})
.await
.unwrap();
assert!(pool.lock().unwrap().contains_key(&1));
assert!(broadcast_rx.try_recv().is_ok())
}

View file

@ -0,0 +1,237 @@
use std::time::Duration;
use tower::{Service, ServiceExt};
use crate::{DandelionConfig, DandelionRouteReq, DandelionRouter, Graph, TxState};
use super::*;
/// make sure the number of stemm peers is correct.
#[tokio::test]
async fn number_stems_correct() {
let mut config = DandelionConfig {
time_between_hop: Duration::from_millis(175),
epoch_duration: Duration::from_secs(60_000),
fluff_probability: 0.0, // we want to be in stem state
graph: Graph::FourRegular,
};
let (broadcast_svc, _broadcast_rx) = mock_broadcast_svc();
let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
const FROM_PEER: usize = 20;
// send a request to make the generic bound inference work, without specifying types.
router
.ready()
.await
.unwrap()
.call(DandelionRouteReq {
tx: 0_usize,
state: TxState::Stem { from: FROM_PEER },
})
.await
.unwrap();
assert_eq!(router.stem_peers.len(), 2); // Graph::FourRegular
config.graph = Graph::Line;
let (broadcast_svc, _broadcast_rx) = mock_broadcast_svc();
let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
// send a request to make the generic bound inference work, without specifying types.
router
.ready()
.await
.unwrap()
.call(DandelionRouteReq {
tx: 0_usize,
state: TxState::Stem { from: FROM_PEER },
})
.await
.unwrap();
assert_eq!(router.stem_peers.len(), 1); // Graph::Line
}
/// make sure a tx from the same peer goes to the same peer.
#[tokio::test]
async fn routes_consistent() {
let config = DandelionConfig {
time_between_hop: Duration::from_millis(175),
epoch_duration: Duration::from_secs(60_000),
fluff_probability: 0.0, // we want this test to always stem
graph: Graph::FourRegular,
};
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
const FROM_PEER: usize = 20;
// The router will panic if it attempts to flush.
broadcast_rx.close();
for _ in 0..30 {
router
.ready()
.await
.unwrap()
.call(DandelionRouteReq {
tx: 0_usize,
state: TxState::Stem { from: FROM_PEER },
})
.await
.unwrap();
}
let mut stem_peer = None;
let mut total_txs = 0;
while let Ok((peer_id, _)) = outbound_rx.try_recv() {
let stem_peer = stem_peer.get_or_insert(peer_id);
// make sure all peer ids are the same (so the same svc got all txs).
assert_eq!(*stem_peer, peer_id);
total_txs += 1;
}
assert_eq!(total_txs, 30);
}
/// make sure local txs always stem - even in fluff state.
#[tokio::test]
async fn local_always_stem() {
let config = DandelionConfig {
time_between_hop: Duration::from_millis(175),
epoch_duration: Duration::from_secs(60_000),
fluff_probability: 1.0, // we want this test to always fluff
graph: Graph::FourRegular,
};
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
// The router will panic if it attempts to flush.
broadcast_rx.close();
for _ in 0..30 {
router
.ready()
.await
.unwrap()
.call(DandelionRouteReq {
tx: 0_usize,
state: TxState::Local,
})
.await
.unwrap();
}
let mut stem_peer = None;
let mut total_txs = 0;
while let Ok((peer_id, _)) = outbound_rx.try_recv() {
let stem_peer = stem_peer.get_or_insert(peer_id);
// make sure all peer ids are the same (so the same svc got all txs).
assert_eq!(*stem_peer, peer_id);
total_txs += 1;
}
assert_eq!(total_txs, 30);
}
/// make sure local txs always stem - even in fluff state.
#[tokio::test]
async fn stem_txs_fluff_in_state_fluff() {
let config = DandelionConfig {
time_between_hop: Duration::from_millis(175),
epoch_duration: Duration::from_secs(60_000),
fluff_probability: 1.0, // we want this test to always fluff
graph: Graph::FourRegular,
};
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
const FROM_PEER: usize = 20;
// The router will panic if it attempts to stem.
outbound_rx.close();
for _ in 0..30 {
router
.ready()
.await
.unwrap()
.call(DandelionRouteReq {
tx: 0_usize,
state: TxState::Stem { from: FROM_PEER },
})
.await
.unwrap();
}
let mut total_txs = 0;
while broadcast_rx.try_recv().is_ok() {
total_txs += 1;
}
assert_eq!(total_txs, 30);
}
/// make sure we get all txs sent to the router out in a stem or a fluff.
#[tokio::test]
async fn random_routing() {
let config = DandelionConfig {
time_between_hop: Duration::from_millis(175),
epoch_duration: Duration::from_secs(0), // make every poll ready change state
fluff_probability: 0.2,
graph: Graph::FourRegular,
};
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
for _ in 0..3000 {
router
.ready()
.await
.unwrap()
.call(DandelionRouteReq {
tx: 0_usize,
state: TxState::Stem {
from: rand::random(),
},
})
.await
.unwrap();
}
let mut total_txs = 0;
while broadcast_rx.try_recv().is_ok() {
total_txs += 1;
}
while outbound_rx.try_recv().is_ok() {
total_txs += 1;
}
assert_eq!(total_txs, 3000);
}

View file

@ -0,0 +1,49 @@
/// A request to diffuse a transaction to all connected peers.
///
/// This crate does not handle diffusion it is left to implementers.
pub struct DiffuseRequest<Tx>(pub Tx);
/// A request sent to a single peer to stem this transaction.
pub struct StemRequest<Tx>(pub Tx);
#[cfg(feature = "txpool")]
/// A request sent to the backing transaction pool storage.
pub enum TxStoreRequest<Tx, TxID> {
/// A request to store a transaction with the ID to store it under and the pool to store it in.
///
/// If the tx is already in the pool then do nothing, unless the tx is in the stem pool then move it
/// to the fluff pool, _if this request state is fluff_.
Store(Tx, TxID, crate::State),
/// A request to retrieve a `Tx` with the given ID from the pool, should not remove that tx from the pool.
///
/// Must return [`TxStoreResponse::Transaction`]
Get(TxID),
/// Promote a transaction from the stem pool to the public pool.
///
/// If the tx is already in the fluff pool do nothing.
///
/// This should not error if the tx isn't in the pool at all.
Promote(TxID),
/// A request to check if a translation is in the pool.
///
/// Must return [`TxStoreResponse::Contains`]
Contains(TxID),
/// Returns the IDs of all the transaction in the stem pool.
///
/// Must return [`TxStoreResponse::IDs`]
IDsInStemPool,
}
#[cfg(feature = "txpool")]
/// A response sent back from the backing transaction pool.
pub enum TxStoreResponse<Tx, TxID> {
/// A generic ok response.
Ok,
/// A response containing a [`Option`] for if the transaction is in the pool (Some) or not (None) and in which pool
/// the tx is in.
Contains(Option<crate::State>),
/// A response containing a requested transaction.
Transaction(Option<(Tx, crate::State)>),
/// A list of transaction IDs.
IDs(Vec<TxID>),
}