mirror of
https://github.com/Cuprate/cuprate.git
synced 2025-01-22 10:44:36 +00:00
Merge branch 'main' into peer-set2
This commit is contained in:
commit
63a3207316
65 changed files with 3034 additions and 1144 deletions
22
.github/workflows/ci.yml
vendored
22
.github/workflows/ci.yml
vendored
|
@ -53,10 +53,13 @@ jobs:
|
||||||
include:
|
include:
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
shell: msys2 {0}
|
shell: msys2 {0}
|
||||||
|
rust: stable-x86_64-pc-windows-gnu
|
||||||
- os: macos-latest
|
- os: macos-latest
|
||||||
shell: bash
|
shell: bash
|
||||||
|
rust: stable
|
||||||
- os: ubuntu-latest
|
- os: ubuntu-latest
|
||||||
shell: bash
|
shell: bash
|
||||||
|
rust: stable
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
|
@ -68,13 +71,16 @@ jobs:
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Install Rust
|
||||||
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ matrix.rust }}
|
||||||
|
components: clippy
|
||||||
|
|
||||||
- name: Cache
|
- name: Cache
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: target
|
||||||
target
|
|
||||||
~/.cargo
|
|
||||||
~/.rustup
|
|
||||||
key: ${{ matrix.os }}
|
key: ${{ matrix.os }}
|
||||||
|
|
||||||
- name: Download monerod
|
- name: Download monerod
|
||||||
|
@ -99,12 +105,6 @@ jobs:
|
||||||
update: true
|
update: true
|
||||||
install: mingw-w64-x86_64-toolchain mingw-w64-x86_64-boost msys2-runtime-devel git mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja
|
install: mingw-w64-x86_64-toolchain mingw-w64-x86_64-boost msys2-runtime-devel git mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja
|
||||||
|
|
||||||
- name: Switch target (Windows)
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
run: |
|
|
||||||
rustup toolchain install stable-x86_64-pc-windows-gnu -c clippy --no-self-update
|
|
||||||
rustup default stable-x86_64-pc-windows-gnu
|
|
||||||
|
|
||||||
- name: Documentation
|
- name: Documentation
|
||||||
run: cargo doc --workspace --all-features --no-deps
|
run: cargo doc --workspace --all-features --no-deps
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
cargo test --all-features --workspace
|
cargo test --all-features --workspace
|
||||||
cargo test --package cuprate-database --no-default-features --features redb --features service
|
cargo test --package cuprate-database --no-default-features --features redb --features service
|
||||||
|
|
||||||
# TODO: upload binaries with `actions/upload-artifact@v3`
|
# TODO: upload binaries with `actions/upload-artifact@v3`
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --all-features --all-targets --workspace
|
run: cargo build --all-features --all-targets --workspace
|
||||||
|
|
50
Cargo.lock
generated
50
Cargo.lock
generated
|
@ -576,36 +576,6 @@ dependencies = [
|
||||||
"windows",
|
"windows",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "cuprate-p2p"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"bytes",
|
|
||||||
"cuprate-helper",
|
|
||||||
"cuprate-test-utils",
|
|
||||||
"dashmap",
|
|
||||||
"fixed-bytes",
|
|
||||||
"futures",
|
|
||||||
"hex",
|
|
||||||
"indexmap 2.2.6",
|
|
||||||
"monero-address-book",
|
|
||||||
"monero-p2p",
|
|
||||||
"monero-pruning",
|
|
||||||
"monero-serai",
|
|
||||||
"monero-wire",
|
|
||||||
"pin-project",
|
|
||||||
"rand",
|
|
||||||
"rand_distr",
|
|
||||||
"rayon",
|
|
||||||
"thiserror",
|
|
||||||
"tokio",
|
|
||||||
"tokio-stream",
|
|
||||||
"tokio-util",
|
|
||||||
"tower",
|
|
||||||
"tracing",
|
|
||||||
"tracing-subscriber",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cuprate-test-utils"
|
name = "cuprate-test-utils"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
@ -687,16 +657,18 @@ dependencies = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dashmap"
|
name = "dandelion_tower"
|
||||||
version = "5.5.3"
|
version = "0.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"futures",
|
||||||
"hashbrown 0.14.5",
|
"proptest",
|
||||||
"lock_api",
|
"rand",
|
||||||
"once_cell",
|
"rand_distr",
|
||||||
"parking_lot_core",
|
"thiserror",
|
||||||
|
"tokio",
|
||||||
|
"tokio-util",
|
||||||
|
"tower",
|
||||||
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|
|
@ -12,6 +12,7 @@ members = [
|
||||||
"net/levin",
|
"net/levin",
|
||||||
"net/monero-wire",
|
"net/monero-wire",
|
||||||
"p2p/cuprate-p2p",
|
"p2p/cuprate-p2p",
|
||||||
|
"p2p/dandelion",
|
||||||
"p2p/monero-p2p",
|
"p2p/monero-p2p",
|
||||||
"p2p/address-book",
|
"p2p/address-book",
|
||||||
"pruning",
|
"pruning",
|
||||||
|
|
|
@ -1,33 +1,34 @@
|
||||||
# Database
|
# Database
|
||||||
Cuprate's database implementation.
|
Cuprate's database implementation.
|
||||||
|
|
||||||
<!-- Did you know markdown automatically increments number lists, even if they are all 1...? -->
|
- [1. Documentation](#1-documentation)
|
||||||
1. [Documentation](#documentation)
|
- [2. File Structure](#2-file-structure)
|
||||||
1. [File Structure](#file-structure)
|
- [2.1 `src/`](#21-src)
|
||||||
- [`src/`](#src)
|
- [2.2 `src/backend/`](#22-srcbackend)
|
||||||
- [`src/ops`](#src-ops)
|
- [2.3 `src/config`](#23-srcconfig)
|
||||||
- [`src/service/`](#src-service)
|
- [2.4 `src/ops`](#24-srcops)
|
||||||
- [`src/backend/`](#src-backend)
|
- [2.5 `src/service/`](#25-srcservice)
|
||||||
1. [Backends](#backends)
|
- [3. Backends](#3-backends)
|
||||||
- [`heed`](#heed)
|
- [3.1 heed](#31-heed)
|
||||||
- [`redb`](#redb)
|
- [3.2 redb](#32-redb)
|
||||||
- [`redb-memory`](#redb-memory)
|
- [3.3 redb-memory](#33-redb-memory)
|
||||||
- [`sanakirja`](#sanakirja)
|
- [3.4 sanakirja](#34-sanakirja)
|
||||||
- [`MDBX`](#mdbx)
|
- [3.5 MDBX](#35-mdbx)
|
||||||
1. [Layers](#layers)
|
- [4. Layers](#4-layers)
|
||||||
- [Database](#database)
|
- [4.1 Backend](#41-backend)
|
||||||
- [Trait](#trait)
|
- [4.2 Trait](#42-trait)
|
||||||
- [ConcreteEnv](#concreteenv)
|
- [4.3 ConcreteEnv](#43-concreteenv)
|
||||||
- [Thread-pool](#thread-pool)
|
- [4.4 `ops`](#44-ops)
|
||||||
- [Service](#service)
|
- [4.5 `service`](#45-service)
|
||||||
1. [Resizing](#resizing)
|
- [5. Syncing](#5-Syncing)
|
||||||
1. [Flushing](#flushing)
|
- [6. Thread model](#6-thread-model)
|
||||||
1. [(De)serialization](#deserialization)
|
- [7. Resizing](#7-resizing)
|
||||||
|
- [8. (De)serialization](#8-deserialization)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Documentation
|
## 1. Documentation
|
||||||
In general, documentation for `database/` is split into 3:
|
Documentation for `database/` is split into 3 locations:
|
||||||
|
|
||||||
| Documentation location | Purpose |
|
| Documentation location | Purpose |
|
||||||
|---------------------------|---------|
|
|---------------------------|---------|
|
||||||
|
@ -59,65 +60,41 @@ The code within `src/` is also littered with some `grep`-able comments containin
|
||||||
| `TODO` | This must be implemented; There should be 0 of these in production code
|
| `TODO` | This must be implemented; There should be 0 of these in production code
|
||||||
| `SOMEDAY` | This should be implemented... someday
|
| `SOMEDAY` | This should be implemented... someday
|
||||||
|
|
||||||
# File Structure
|
## 2. File Structure
|
||||||
A quick reference of the structure of the folders & files in `cuprate-database`.
|
A quick reference of the structure of the folders & files in `cuprate-database`.
|
||||||
|
|
||||||
Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`.
|
Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`.
|
||||||
|
|
||||||
## `src/`
|
### 2.1 `src/`
|
||||||
The top-level `src/` files.
|
The top-level `src/` files.
|
||||||
|
|
||||||
| File | Purpose |
|
| File | Purpose |
|
||||||
|---------------------|---------|
|
|------------------------|---------|
|
||||||
| `config.rs` | Database `Env` configuration
|
| `constants.rs` | General constants used throughout `cuprate-database`
|
||||||
| `constants.rs` | General constants used throughout `cuprate-database`
|
| `database.rs` | Abstracted database; `trait DatabaseR{o,w}`
|
||||||
| `database.rs` | Abstracted database; `trait DatabaseR{o,w}`
|
| `env.rs` | Abstracted database environment; `trait Env`
|
||||||
| `env.rs` | Abstracted database environment; `trait Env`
|
| `error.rs` | Database error types
|
||||||
| `error.rs` | Database error types
|
| `free.rs` | General free functions (related to the database)
|
||||||
| `free.rs` | General free functions (related to the database)
|
| `key.rs` | Abstracted database keys; `trait Key`
|
||||||
| `key.rs` | Abstracted database keys; `trait Key`
|
| `resize.rs` | Database resizing algorithms
|
||||||
| `resize.rs` | Database resizing algorithms
|
| `storable.rs` | Data (de)serialization; `trait Storable`
|
||||||
| `storable.rs` | Data (de)serialization; `trait Storable`
|
| `table.rs` | Database table abstraction; `trait Table`
|
||||||
| `table.rs` | Database table abstraction; `trait Table`
|
| `tables.rs` | All the table definitions used by `cuprate-database`
|
||||||
| `tables.rs` | All the table definitions used by `cuprate-database`
|
| `tests.rs` | Utilities for `cuprate_database` testing
|
||||||
| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}`
|
| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}`
|
||||||
| `types.rs` | Database table schema types
|
| `types.rs` | Database-specific types
|
||||||
|
| `unsafe_unsendable.rs` | Marker type to impl `Send` for objects not `Send`
|
||||||
|
|
||||||
## `src/ops/`
|
### 2.2 `src/backend/`
|
||||||
This folder contains the `cupate_database::ops` module.
|
This folder contains the implementation for actual databases used as the backend for `cuprate-database`.
|
||||||
|
|
||||||
TODO: more detailed descriptions.
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|-----------------|---------|
|
|
||||||
| `alt_block.rs` | Alternative blocks
|
|
||||||
| `block.rs` | Blocks
|
|
||||||
| `blockchain.rs` | Blockchain-related
|
|
||||||
| `output.rs` | Outputs
|
|
||||||
| `property.rs` | Properties
|
|
||||||
| `spent_key.rs` | Spent keys
|
|
||||||
| `tx.rs` | Transactions
|
|
||||||
|
|
||||||
## `src/service/`
|
|
||||||
This folder contains the `cupate_database::service` module.
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|----------------|---------|
|
|
||||||
| `free.rs` | General free functions used (related to `cuprate_database::service`)
|
|
||||||
| `read.rs` | Read thread-pool definitions and logic
|
|
||||||
| `tests.rs` | Thread-pool tests and test helper functions
|
|
||||||
| `types.rs` | `cuprate_database::service`-related type aliases
|
|
||||||
| `write.rs` | Write thread-pool definitions and logic
|
|
||||||
|
|
||||||
## `src/backend/`
|
|
||||||
This folder contains the actual database crates used as the backend for `cuprate-database`.
|
|
||||||
|
|
||||||
Each backend has its own folder.
|
Each backend has its own folder.
|
||||||
|
|
||||||
| Folder | Purpose |
|
| Folder/File | Purpose |
|
||||||
|--------------|---------|
|
|-------------|---------|
|
||||||
| `heed/` | Backend using using forked [`heed`](https://github.com/Cuprate/heed)
|
| `heed/` | Backend using using [`heed`](https://github.com/meilisearch/heed) (LMDB)
|
||||||
| `sanakirja/` | Backend using [`sanakirja`](https://docs.rs/sanakirja)
|
| `redb/` | Backend using [`redb`](https://github.com/cberner/redb)
|
||||||
|
| `tests.rs` | Backend-agnostic tests
|
||||||
|
|
||||||
All backends follow the same file structure:
|
All backends follow the same file structure:
|
||||||
|
|
||||||
|
@ -127,18 +104,56 @@ All backends follow the same file structure:
|
||||||
| `env.rs` | Implementation of `trait Env`
|
| `env.rs` | Implementation of `trait Env`
|
||||||
| `error.rs` | Implementation of backend's errors to `cuprate_database`'s error types
|
| `error.rs` | Implementation of backend's errors to `cuprate_database`'s error types
|
||||||
| `storable.rs` | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization
|
| `storable.rs` | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization
|
||||||
| `tests.rs` | Tests for the specific backend
|
|
||||||
| `transaction.rs` | Implementation of `trait TxR{o,w}`
|
| `transaction.rs` | Implementation of `trait TxR{o,w}`
|
||||||
| `types.rs` | Type aliases for long backend-specific types
|
| `types.rs` | Type aliases for long backend-specific types
|
||||||
|
|
||||||
# Backends
|
### 2.3 `src/config/`
|
||||||
`cuprate-database`'s `trait`s abstract over various actual databases.
|
This folder contains the `cupate_database::config` module; configuration options for the database.
|
||||||
|
|
||||||
Each database's implementation is located in its respective file in `src/backend/${DATABASE_NAME}.rs`.
|
| File | Purpose |
|
||||||
|
|---------------------|---------|
|
||||||
|
| `config.rs` | Main database `Config` struct
|
||||||
|
| `reader_threads.rs` | Reader thread configuration for `service` thread-pool
|
||||||
|
| `sync_mode.rs` | Disk sync configuration for backends
|
||||||
|
|
||||||
## `heed`
|
### 2.4 `src/ops/`
|
||||||
|
This folder contains the `cupate_database::ops` module.
|
||||||
|
|
||||||
|
These are higher-level functions abstracted over the database, that are Monero-related.
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|-----------------|---------|
|
||||||
|
| `block.rs` | Block related (main functions)
|
||||||
|
| `blockchain.rs` | Blockchain related (height, cumulative values, etc)
|
||||||
|
| `key_image.rs` | Key image related
|
||||||
|
| `macros.rs` | Macros specific to `ops/`
|
||||||
|
| `output.rs` | Output related
|
||||||
|
| `property.rs` | Database properties (pruned, version, etc)
|
||||||
|
| `tx.rs` | Transaction related
|
||||||
|
|
||||||
|
### 2.5 `src/service/`
|
||||||
|
This folder contains the `cupate_database::service` module.
|
||||||
|
|
||||||
|
The `async`hronous request/response API other Cuprate crates use instead of managing the database directly themselves.
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|----------------|---------|
|
||||||
|
| `free.rs` | General free functions used (related to `cuprate_database::service`)
|
||||||
|
| `read.rs` | Read thread-pool definitions and logic
|
||||||
|
| `tests.rs` | Thread-pool tests and test helper functions
|
||||||
|
| `types.rs` | `cuprate_database::service`-related type aliases
|
||||||
|
| `write.rs` | Writer thread definitions and logic
|
||||||
|
|
||||||
|
## 3. Backends
|
||||||
|
`cuprate-database`'s `trait`s allow abstracting over the actual database, such that any backend in particular could be used.
|
||||||
|
|
||||||
|
Each database's implementation for those `trait`'s are located in its respective folder in `src/backend/${DATABASE_NAME}/`.
|
||||||
|
|
||||||
|
### 3.1 heed
|
||||||
The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB).
|
The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB).
|
||||||
|
|
||||||
|
The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used.
|
||||||
|
|
||||||
`LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically.
|
`LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically.
|
||||||
|
|
||||||
`heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are:
|
`heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are:
|
||||||
|
@ -148,11 +163,11 @@ The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB
|
||||||
| `data.mdb` | Main data file
|
| `data.mdb` | Main data file
|
||||||
| `lock.mdb` | Database lock file
|
| `lock.mdb` | Database lock file
|
||||||
|
|
||||||
TODO: document max readers limit: https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372. Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for.
|
`heed`-specific notes:
|
||||||
|
- [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for.
|
||||||
|
- [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129).
|
||||||
|
|
||||||
TODO: document DB on remote filesystem: https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129.
|
### 3.2 redb
|
||||||
|
|
||||||
## `redb`
|
|
||||||
The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb).
|
The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb).
|
||||||
|
|
||||||
The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used.
|
The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used.
|
||||||
|
@ -163,45 +178,187 @@ The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used
|
||||||
|-------------|---------|
|
|-------------|---------|
|
||||||
| `data.redb` | Main data file
|
| `data.redb` | Main data file
|
||||||
|
|
||||||
TODO: document DB on remote filesystem (does redb allow this?)
|
<!-- TODO: document DB on remote filesystem (does redb allow this?) -->
|
||||||
|
|
||||||
## `redb-memory`
|
### 3.3 redb-memory
|
||||||
This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a key-value store that completely resides in memory instead of a file.
|
This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a key-value store that completely resides in memory instead of a file.
|
||||||
|
|
||||||
All other details about this should be the same as the normal `redb` backend.
|
All other details about this should be the same as the normal `redb` backend.
|
||||||
|
|
||||||
## `sanakirja`
|
### 3.4 sanakirja
|
||||||
[`sanakirja`](https://docs.rs/sanakirja) was a candidate as a backend, however there were problems with maximum value sizes.
|
[`sanakirja`](https://docs.rs/sanakirja) was a candidate as a backend, however there were problems with maximum value sizes.
|
||||||
|
|
||||||
The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.Storable.html) which was too small for our requirements. Using [`sanakirja::Slice`](https://docs.rs/sanakirja/1.4.1/sanakirja/union.Slice.html) and [sanakirja::UnsizedStorage](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.UnsizedStorable.html) was attempted, but there were bugs found when inserting a value in-between `512..=4096` bytes.
|
The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.Storable.html) which was too small for our requirements. Using [`sanakirja::Slice`](https://docs.rs/sanakirja/1.4.1/sanakirja/union.Slice.html) and [sanakirja::UnsizedStorage](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.UnsizedStorable.html) was attempted, but there were bugs found when inserting a value in-between `512..=4096` bytes.
|
||||||
|
|
||||||
As such, it is not implemented.
|
As such, it is not implemented.
|
||||||
|
|
||||||
## `MDBX`
|
### 3.5 MDBX
|
||||||
[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement duplicate tables. It is also quite similar to the main backend LMDB (of which it was originally a fork of).
|
[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement duplicate tables. It is also quite similar to the main backend LMDB (of which it was originally a fork of).
|
||||||
|
|
||||||
As such, it is not implemented (yet).
|
As such, it is not implemented (yet).
|
||||||
|
|
||||||
# Layers
|
## 4. Layers
|
||||||
TODO: update with accurate information when ready, update image.
|
`cuprate_database` is logically abstracted into 5 layers, starting from the lowest:
|
||||||
|
1. Backend
|
||||||
|
2. Trait
|
||||||
|
3. ConcreteEnv
|
||||||
|
4. `ops`
|
||||||
|
5. `service`
|
||||||
|
|
||||||
## Database
|
Each layer is built upon the last.
|
||||||
## Trait
|
|
||||||
## ConcreteEnv
|
|
||||||
## Thread
|
|
||||||
## Service
|
|
||||||
|
|
||||||
# Resizing
|
<!-- TODO: insert image here after database/ split -->
|
||||||
TODO: document resize algorithm:
|
|
||||||
- Exactly when it occurs
|
|
||||||
- How much bytes are added
|
|
||||||
|
|
||||||
All backends follow the same algorithm.
|
### 4.1 Backend
|
||||||
|
This is the actual database backend implementation (or a Rust shim over one).
|
||||||
|
|
||||||
# Flushing
|
Examples:
|
||||||
TODO: document disk flushing behavior.
|
- `heed` (LMDB)
|
||||||
- Config options
|
- `redb`
|
||||||
- Backend-specific behavior
|
|
||||||
|
|
||||||
# (De)serialization
|
`cuprate_database` itself just uses a backend, it does not implement one.
|
||||||
TODO: document `Storable` and how databases (de)serialize types when storing/fetching.
|
|
||||||
|
All backends have the following attributes:
|
||||||
|
- [Embedded](https://en.wikipedia.org/wiki/Embedded_database)
|
||||||
|
- [Multiversion concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control)
|
||||||
|
- [ACID](https://en.wikipedia.org/wiki/ACID)
|
||||||
|
- Are `(key, value)` oriented and have the expected API (`get()`, `insert()`, `delete()`)
|
||||||
|
- Are table oriented (`"table_name" -> (key, value)`)
|
||||||
|
- Allows concurrent readers
|
||||||
|
|
||||||
|
### 4.2 Trait
|
||||||
|
`cuprate_database` provides a set of `trait`s that abstract over the various database backends.
|
||||||
|
|
||||||
|
This allows the function signatures and behavior to stay the same but allows for swapping out databases in an easier fashion.
|
||||||
|
|
||||||
|
All common behavior of the backend's are encapsulated here and used instead of using the backend directly.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- [`trait Env`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/env.rs)
|
||||||
|
- [`trait {TxRo, TxRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/transaction.rs)
|
||||||
|
- [`trait {DatabaseRo, DatabaseRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/database.rs)
|
||||||
|
|
||||||
|
For example, instead of calling `LMDB` or `redb`'s `get()` function directly, `DatabaseRo::get()` is called.
|
||||||
|
|
||||||
|
### 4.3 ConcreteEnv
|
||||||
|
This is the non-generic, concrete `struct` provided by `cuprate_database` that contains all the data necessary to operate the database. The actual database backend `ConcreteEnv` will use internally depends on which backend feature is used.
|
||||||
|
|
||||||
|
`ConcreteEnv` implements `trait Env`, which opens the door to all the other traits.
|
||||||
|
|
||||||
|
The equivalent objects in the backends themselves are:
|
||||||
|
- [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html)
|
||||||
|
- [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html)
|
||||||
|
|
||||||
|
This is the main object used when handling the database directly, although that is not strictly necessary as a user if the `service` layer is used.
|
||||||
|
|
||||||
|
### 4.4 `ops`
|
||||||
|
These are Monero-specific functions that use the abstracted `trait` forms of the database.
|
||||||
|
|
||||||
|
Instead of dealing with the database directly (`get()`, `delete()`), the `ops` layer provides more abstract functions that deal with commonly used Monero operations (`add_block()`, `pop_block()`).
|
||||||
|
|
||||||
|
### 4.5 `service`
|
||||||
|
The final layer abstracts the database completely into a [Monero-specific `async` request/response API](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/types/src/service.rs#L18-L78), using [`tower::Service`](https://docs.rs/tower/latest/tower/trait.Service.html).
|
||||||
|
|
||||||
|
It handles the database using a separate writer thread & reader thread-pool, and uses the previously mentioned `ops` functions when responding to requests.
|
||||||
|
|
||||||
|
Instead of handling the database directly, this layer provides read/write handles that allow:
|
||||||
|
- Sending requests for data (e.g. Outputs)
|
||||||
|
- Receiving responses
|
||||||
|
|
||||||
|
For more information on the backing thread-pool, see [`Thread model`](#6-thread-model).
|
||||||
|
|
||||||
|
## 5. Syncing
|
||||||
|
`cuprate_database`'s database has 5 disk syncing modes.
|
||||||
|
|
||||||
|
1. FastThenSafe
|
||||||
|
1. Safe
|
||||||
|
1. Async
|
||||||
|
1. Threshold
|
||||||
|
1. Fast
|
||||||
|
|
||||||
|
The default mode is `Safe`.
|
||||||
|
|
||||||
|
This means that upon each transaction commit, all the data that was written will be fully synced to disk. This is the slowest, but safest mode of operation.
|
||||||
|
|
||||||
|
Note that upon any database `Drop`, whether via `service` or dropping the database directly, the current implementation will sync to disk regardless of any configuration.
|
||||||
|
|
||||||
|
For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144).
|
||||||
|
|
||||||
|
## 6. Thread model
|
||||||
|
As noted in the [`Layers`](#layers) section, the base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread.
|
||||||
|
|
||||||
|
However, the actual API `cuprate_database` exposes for practical usage for the main `cuprated` binary (and other `async` use-cases) is the asynchronous `service` API, which _does_ have a thread model backing it.
|
||||||
|
|
||||||
|
As such, when [`cuprate_database::service`'s initialization function](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33-L44) is called, threads will be spawned and maintained until the user drops (disconnects) the returned handles.
|
||||||
|
|
||||||
|
The current behavior is:
|
||||||
|
- [1 writer thread](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/write.rs#L52-L66)
|
||||||
|
- [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L104-L126)
|
||||||
|
|
||||||
|
For example, on a system with 32-threads, `cuprate_database` will spawn:
|
||||||
|
- 1 writer thread
|
||||||
|
- 32 reader threads
|
||||||
|
|
||||||
|
whose sole responsibility is to listen for database requests, access the database (potentially in parallel), and return a response.
|
||||||
|
|
||||||
|
Note that the `1 system thread = 1 reader thread` model is only the default setting, the reader thread count can be configured by the user to be any number between `1 .. amount_of_system_threads`.
|
||||||
|
|
||||||
|
The reader threads are managed by [`rayon`](https://docs.rs/rayon).
|
||||||
|
|
||||||
|
For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_database` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L490-L503).
|
||||||
|
|
||||||
|
Once the [handles](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33) to these threads are `Drop`ed, the backing thread(pool) will gracefully exit, automatically.
|
||||||
|
|
||||||
|
## 7. Resizing
|
||||||
|
Database backends that require manually resizing will, by default, use a similar algorithm as `monerod`'s.
|
||||||
|
|
||||||
|
Note that this only relates to the `service` module, where the database is handled by `cuprate_database` itself, not the user. In the case of a user directly using `cuprate_database`, it is up to them on how to resize.
|
||||||
|
|
||||||
|
Within `service`, the resizing logic defined [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/service/write.rs#L139-L201) does the following:
|
||||||
|
|
||||||
|
- If there's not enough space to fit a write request's data, start a resize
|
||||||
|
- Each resize adds around [`1_073_745_920`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) bytes to the current map size
|
||||||
|
- A resize will be attempted `3` times before failing
|
||||||
|
|
||||||
|
There are other [resizing algorithms](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L38-L47) that define how the database's memory map grows, although currently the behavior of [`monerod`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) is closely followed.
|
||||||
|
|
||||||
|
## 8. (De)serialization
|
||||||
|
All types stored inside the database are either bytes already, or are perfectly bitcast-able.
|
||||||
|
|
||||||
|
As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions.
|
||||||
|
|
||||||
|
Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type.
|
||||||
|
|
||||||
|
The main deserialization `trait` for database storage is: [`cuprate_database::Storable`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L16-L115).
|
||||||
|
|
||||||
|
- Before storage, the type is [simply cast into bytes](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L125)
|
||||||
|
- When fetching, the bytes are [simply cast into the type](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L130)
|
||||||
|
|
||||||
|
When a type is casted into bytes, [the reference is casted](https://docs.rs/bytemuck/latest/bytemuck/fn.bytes_of.html), i.e. this is zero-cost serialization.
|
||||||
|
|
||||||
|
However, it is worth noting that when bytes are casted into the type, [it is copied](https://docs.rs/bytemuck/latest/bytemuck/fn.pod_read_unaligned.html). This is due to byte alignment guarantee issues with both backends, see:
|
||||||
|
- https://github.com/AltSysrq/lmdb-zero/issues/8
|
||||||
|
- https://github.com/cberner/redb/issues/360
|
||||||
|
|
||||||
|
Without this, `bytemuck` will panic with [`TargetAlignmentGreaterAndInputNotAligned`](https://docs.rs/bytemuck/latest/bytemuck/enum.PodCastError.html#variant.TargetAlignmentGreaterAndInputNotAligned) when casting.
|
||||||
|
|
||||||
|
Copying the bytes fixes this problem, although it is more costly than necessary. However, in the main use-case for `cuprate_database` (the `service` module) the bytes would need to be owned regardless as the `Request/Response` API uses owned data types (`T`, `Vec<T>`, `HashMap<K, V>`, etc).
|
||||||
|
|
||||||
|
Practically speaking, this means lower-level database functions that normally look like such:
|
||||||
|
```rust
|
||||||
|
fn get(key: &Key) -> &Value;
|
||||||
|
```
|
||||||
|
end up looking like this in `cuprate_database`:
|
||||||
|
```rust
|
||||||
|
fn get(key: &Key) -> Value;
|
||||||
|
```
|
||||||
|
|
||||||
|
Since each backend has its own (de)serialization methods, our types are wrapped in compatibility types that map our `Storable` functions into whatever is required for the backend, e.g:
|
||||||
|
- [`StorableHeed<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/heed/storable.rs#L11-L45)
|
||||||
|
- [`StorableRedb<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/redb/storable.rs#L11-L30)
|
||||||
|
|
||||||
|
Compatibility structs also exist for any `Storable` containers:
|
||||||
|
- [`StorableVec<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L135-L191)
|
||||||
|
- [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241)
|
||||||
|
|
||||||
|
Again, it's unfortunate that these must be owned, although in `service`'s use-case, they would have to be owned anyway.
|
|
@ -1,16 +1,10 @@
|
||||||
//! Implementation of `trait Database` for `heed`.
|
//! Implementation of `trait Database` for `heed`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::{cell::RefCell, ops::RangeBounds};
|
||||||
borrow::{Borrow, Cow},
|
|
||||||
cell::RefCell,
|
|
||||||
fmt::Debug,
|
|
||||||
ops::RangeBounds,
|
|
||||||
sync::RwLockReadGuard,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
backend::heed::{storable::StorableHeed, types::HeedDb},
|
backend::heed::types::HeedDb,
|
||||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
table::Table,
|
table::Table,
|
||||||
|
|
|
@ -3,10 +3,8 @@
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::{
|
||||||
cell::RefCell,
|
cell::RefCell,
|
||||||
fmt::Debug,
|
|
||||||
num::NonZeroUsize,
|
num::NonZeroUsize,
|
||||||
ops::Deref,
|
sync::{RwLock, RwLockReadGuard},
|
||||||
sync::{RwLock, RwLockReadGuard, RwLockWriteGuard},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use heed::{DatabaseOpenOptions, EnvFlags, EnvOpenOptions};
|
use heed::{DatabaseOpenOptions, EnvFlags, EnvOpenOptions};
|
||||||
|
@ -23,10 +21,11 @@ use crate::{
|
||||||
error::{InitError, RuntimeError},
|
error::{InitError, RuntimeError},
|
||||||
resize::ResizeAlgorithm,
|
resize::ResizeAlgorithm,
|
||||||
table::Table,
|
table::Table,
|
||||||
|
tables::call_fn_on_all_tables_or_early_return,
|
||||||
};
|
};
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Consts
|
//---------------------------------------------------------------------------------------------------- Consts
|
||||||
/// TODO
|
/// Panic message when there's a table missing.
|
||||||
const PANIC_MSG_MISSING_TABLE: &str =
|
const PANIC_MSG_MISSING_TABLE: &str =
|
||||||
"cuprate_database::Env should uphold the invariant that all tables are already created";
|
"cuprate_database::Env should uphold the invariant that all tables are already created";
|
||||||
|
|
||||||
|
@ -49,7 +48,7 @@ pub struct ConcreteEnv {
|
||||||
/// `reader_count` would be spinned on until 0, at which point
|
/// `reader_count` would be spinned on until 0, at which point
|
||||||
/// we are safe to resize.
|
/// we are safe to resize.
|
||||||
///
|
///
|
||||||
/// Although, 3 atomic operations (check atomic bool, reader_count++, reader_count--)
|
/// Although, 3 atomic operations (check atomic bool, `reader_count++`, `reader_count--`)
|
||||||
/// turns out to be roughly as expensive as acquiring a non-contended `RwLock`,
|
/// turns out to be roughly as expensive as acquiring a non-contended `RwLock`,
|
||||||
/// the CPU sleeping instead of spinning is much better too.
|
/// the CPU sleeping instead of spinning is much better too.
|
||||||
///
|
///
|
||||||
|
@ -68,7 +67,7 @@ impl Drop for ConcreteEnv {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// INVARIANT: drop(ConcreteEnv) must sync.
|
// INVARIANT: drop(ConcreteEnv) must sync.
|
||||||
//
|
//
|
||||||
// TODO:
|
// SOMEDAY:
|
||||||
// "if the environment has the MDB_NOSYNC flag set the flushes will be omitted,
|
// "if the environment has the MDB_NOSYNC flag set the flushes will be omitted,
|
||||||
// and with MDB_MAPASYNC they will be asynchronous."
|
// and with MDB_MAPASYNC they will be asynchronous."
|
||||||
// <http://www.lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>
|
// <http://www.lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>
|
||||||
|
@ -76,7 +75,7 @@ impl Drop for ConcreteEnv {
|
||||||
// We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)`
|
// We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)`
|
||||||
// to clear the no sync and async flags such that the below `self.sync()`
|
// to clear the no sync and async flags such that the below `self.sync()`
|
||||||
// _actually_ synchronously syncs.
|
// _actually_ synchronously syncs.
|
||||||
if let Err(e) = crate::Env::sync(self) {
|
if let Err(_e) = crate::Env::sync(self) {
|
||||||
// TODO: log error?
|
// TODO: log error?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,10 +117,11 @@ impl Env for ConcreteEnv {
|
||||||
|
|
||||||
#[cold]
|
#[cold]
|
||||||
#[inline(never)] // called once.
|
#[inline(never)] // called once.
|
||||||
#[allow(clippy::items_after_statements)]
|
|
||||||
fn open(config: Config) -> Result<Self, InitError> {
|
fn open(config: Config) -> Result<Self, InitError> {
|
||||||
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
||||||
|
|
||||||
|
let mut env_open_options = EnvOpenOptions::new();
|
||||||
|
|
||||||
// Map our `Config` sync mode to the LMDB environment flags.
|
// Map our `Config` sync mode to the LMDB environment flags.
|
||||||
//
|
//
|
||||||
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
||||||
|
@ -129,11 +129,21 @@ impl Env for ConcreteEnv {
|
||||||
SyncMode::Safe => EnvFlags::empty(),
|
SyncMode::Safe => EnvFlags::empty(),
|
||||||
SyncMode::Async => EnvFlags::MAP_ASYNC,
|
SyncMode::Async => EnvFlags::MAP_ASYNC,
|
||||||
SyncMode::Fast => EnvFlags::NO_SYNC | EnvFlags::WRITE_MAP | EnvFlags::MAP_ASYNC,
|
SyncMode::Fast => EnvFlags::NO_SYNC | EnvFlags::WRITE_MAP | EnvFlags::MAP_ASYNC,
|
||||||
// TODO: dynamic syncs are not implemented.
|
// SOMEDAY: dynamic syncs are not implemented.
|
||||||
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
|
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut env_open_options = EnvOpenOptions::new();
|
// SAFETY: the flags we're setting are 'unsafe'
|
||||||
|
// from a data durability perspective, although,
|
||||||
|
// the user config wanted this.
|
||||||
|
//
|
||||||
|
// MAYBE: We may need to open/create tables with certain flags
|
||||||
|
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
||||||
|
// MAYBE: Set comparison functions for certain tables
|
||||||
|
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
||||||
|
unsafe {
|
||||||
|
env_open_options.flags(flags);
|
||||||
|
}
|
||||||
|
|
||||||
// Set the memory map size to
|
// Set the memory map size to
|
||||||
// (current disk size) + (a bit of leeway)
|
// (current disk size) + (a bit of leeway)
|
||||||
|
@ -152,7 +162,7 @@ impl Env for ConcreteEnv {
|
||||||
|
|
||||||
// Set the max amount of database tables.
|
// Set the max amount of database tables.
|
||||||
// We know at compile time how many tables there are.
|
// We know at compile time how many tables there are.
|
||||||
// TODO: ...how many?
|
// SOMEDAY: ...how many?
|
||||||
env_open_options.max_dbs(32);
|
env_open_options.max_dbs(32);
|
||||||
|
|
||||||
// LMDB documentation:
|
// LMDB documentation:
|
||||||
|
@ -167,19 +177,19 @@ impl Env for ConcreteEnv {
|
||||||
// - Use at least 126 reader threads
|
// - Use at least 126 reader threads
|
||||||
// - Add 16 extra reader threads if <126
|
// - Add 16 extra reader threads if <126
|
||||||
//
|
//
|
||||||
// TODO: This behavior is from `monerod`:
|
// FIXME: This behavior is from `monerod`:
|
||||||
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
||||||
// I believe this could be adjusted percentage-wise so very high
|
// I believe this could be adjusted percentage-wise so very high
|
||||||
// thread PCs can benefit from something like (cuprated + anything that uses the DB in the future).
|
// thread PCs can benefit from something like (cuprated + anything that uses the DB in the future).
|
||||||
// For now:
|
// For now:
|
||||||
// - No other program using our DB exists
|
// - No other program using our DB exists
|
||||||
// - Almost no-one has a 126+ thread CPU
|
// - Almost no-one has a 126+ thread CPU
|
||||||
#[allow(clippy::cast_possible_truncation)] // no-one has `u32::MAX`+ threads
|
let reader_threads =
|
||||||
let reader_threads = config.reader_threads.as_threads().get() as u32;
|
u32::try_from(config.reader_threads.as_threads().get()).unwrap_or(u32::MAX);
|
||||||
env_open_options.max_readers(if reader_threads < 110 {
|
env_open_options.max_readers(if reader_threads < 110 {
|
||||||
126
|
126
|
||||||
} else {
|
} else {
|
||||||
reader_threads + 16
|
reader_threads.saturating_add(16)
|
||||||
});
|
});
|
||||||
|
|
||||||
// Create the database directory if it doesn't exist.
|
// Create the database directory if it doesn't exist.
|
||||||
|
@ -189,18 +199,11 @@ impl Env for ConcreteEnv {
|
||||||
// <https://docs.rs/heed/0.20.0/heed/struct.EnvOpenOptions.html#method.open>
|
// <https://docs.rs/heed/0.20.0/heed/struct.EnvOpenOptions.html#method.open>
|
||||||
let env = unsafe { env_open_options.open(config.db_directory())? };
|
let env = unsafe { env_open_options.open(config.db_directory())? };
|
||||||
|
|
||||||
// TODO: Open/create tables with certain flags
|
|
||||||
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
|
||||||
// `heed` creates the database if it didn't exist.
|
|
||||||
// <https://docs.rs/heed/0.20.0-alpha.9/src/heed/env.rs.html#223-229>
|
|
||||||
|
|
||||||
/// Function that creates the tables based off the passed `T: Table`.
|
/// Function that creates the tables based off the passed `T: Table`.
|
||||||
fn create_table<T: Table>(
|
fn create_table<T: Table>(
|
||||||
env: &heed::Env,
|
env: &heed::Env,
|
||||||
tx_rw: &mut heed::RwTxn<'_>,
|
tx_rw: &mut heed::RwTxn<'_>,
|
||||||
) -> Result<(), InitError> {
|
) -> Result<(), InitError> {
|
||||||
println!("create_table(): {}", T::NAME); // TODO: use tracing.
|
|
||||||
|
|
||||||
DatabaseOpenOptions::new(env)
|
DatabaseOpenOptions::new(env)
|
||||||
.name(<T as Table>::NAME)
|
.name(<T as Table>::NAME)
|
||||||
.types::<StorableHeed<<T as Table>::Key>, StorableHeed<<T as Table>::Value>>()
|
.types::<StorableHeed<<T as Table>::Key>, StorableHeed<<T as Table>::Value>>()
|
||||||
|
@ -208,31 +211,17 @@ impl Env for ConcreteEnv {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::tables::{
|
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxBlobs, TxHeights, TxIds, TxOutputs,
|
|
||||||
TxUnlockTime,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut tx_rw = env.write_txn()?;
|
let mut tx_rw = env.write_txn()?;
|
||||||
create_table::<BlockBlobs>(&env, &mut tx_rw)?;
|
// Create all tables.
|
||||||
create_table::<BlockHeights>(&env, &mut tx_rw)?;
|
// FIXME: this macro is kinda awkward.
|
||||||
create_table::<BlockInfos>(&env, &mut tx_rw)?;
|
{
|
||||||
create_table::<KeyImages>(&env, &mut tx_rw)?;
|
let env = &env;
|
||||||
create_table::<NumOutputs>(&env, &mut tx_rw)?;
|
let tx_rw = &mut tx_rw;
|
||||||
create_table::<Outputs>(&env, &mut tx_rw)?;
|
match call_fn_on_all_tables_or_early_return!(create_table(env, tx_rw)) {
|
||||||
create_table::<PrunableHashes>(&env, &mut tx_rw)?;
|
Ok(_) => (),
|
||||||
create_table::<PrunableTxBlobs>(&env, &mut tx_rw)?;
|
Err(e) => return Err(e),
|
||||||
create_table::<PrunedTxBlobs>(&env, &mut tx_rw)?;
|
}
|
||||||
create_table::<RctOutputs>(&env, &mut tx_rw)?;
|
}
|
||||||
create_table::<TxBlobs>(&env, &mut tx_rw)?;
|
|
||||||
create_table::<TxHeights>(&env, &mut tx_rw)?;
|
|
||||||
create_table::<TxIds>(&env, &mut tx_rw)?;
|
|
||||||
create_table::<TxOutputs>(&env, &mut tx_rw)?;
|
|
||||||
create_table::<TxUnlockTime>(&env, &mut tx_rw)?;
|
|
||||||
|
|
||||||
// TODO: Set dupsort and comparison functions for certain tables
|
|
||||||
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
|
|
||||||
|
|
||||||
// INVARIANT: this should never return `ResizeNeeded` due to adding
|
// INVARIANT: this should never return `ResizeNeeded` due to adding
|
||||||
// some tables since we added some leeway to the memory map above.
|
// some tables since we added some leeway to the memory map above.
|
||||||
|
|
|
@ -20,7 +20,6 @@ impl From<heed::Error> for crate::InitError {
|
||||||
E1::Mdb(mdb_error) => match mdb_error {
|
E1::Mdb(mdb_error) => match mdb_error {
|
||||||
E2::Invalid => Self::Invalid,
|
E2::Invalid => Self::Invalid,
|
||||||
E2::VersionMismatch => Self::InvalidVersion,
|
E2::VersionMismatch => Self::InvalidVersion,
|
||||||
E2::Other(c_int) => Self::Unknown(Box::new(mdb_error)),
|
|
||||||
|
|
||||||
// "Located page was wrong type".
|
// "Located page was wrong type".
|
||||||
// <https://docs.rs/heed/latest/heed/enum.MdbError.html#variant.Corrupted>
|
// <https://docs.rs/heed/latest/heed/enum.MdbError.html#variant.Corrupted>
|
||||||
|
@ -31,6 +30,7 @@ impl From<heed::Error> for crate::InitError {
|
||||||
|
|
||||||
// These errors shouldn't be returned on database init.
|
// These errors shouldn't be returned on database init.
|
||||||
E2::Incompatible
|
E2::Incompatible
|
||||||
|
| E2::Other(_)
|
||||||
| E2::BadTxn
|
| E2::BadTxn
|
||||||
| E2::Problem
|
| E2::Problem
|
||||||
| E2::KeyExist
|
| E2::KeyExist
|
||||||
|
@ -108,7 +108,7 @@ impl From<heed::Error> for crate::RuntimeError {
|
||||||
// occurring indicates we did _not_ do that, which is a bug
|
// occurring indicates we did _not_ do that, which is a bug
|
||||||
// and we should panic.
|
// and we should panic.
|
||||||
//
|
//
|
||||||
// TODO: This can also mean _another_ process wrote to our
|
// FIXME: This can also mean _another_ process wrote to our
|
||||||
// LMDB file and increased the size. I don't think we need to accommodate for this.
|
// LMDB file and increased the size. I don't think we need to accommodate for this.
|
||||||
// <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
|
// <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
|
||||||
// Although `monerod` reacts to that instead of `MDB_MAP_FULL`
|
// Although `monerod` reacts to that instead of `MDB_MAP_FULL`
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
//! `cuprate_database::Storable` <-> `heed` serde trait compatibility layer.
|
//! `cuprate_database::Storable` <-> `heed` serde trait compatibility layer.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Use
|
//---------------------------------------------------------------------------------------------------- Use
|
||||||
use std::{borrow::Cow, fmt::Debug, marker::PhantomData};
|
use std::{borrow::Cow, marker::PhantomData};
|
||||||
|
|
||||||
use heed::{types::Bytes, BoxedError, BytesDecode, BytesEncode, Database};
|
use heed::{BoxedError, BytesDecode, BytesEncode};
|
||||||
|
|
||||||
use crate::{storable::Storable, storable::StorableVec};
|
use crate::storable::Storable;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- StorableHeed
|
//---------------------------------------------------------------------------------------------------- StorableHeed
|
||||||
/// The glue struct that implements `heed`'s (de)serialization
|
/// The glue struct that implements `heed`'s (de)serialization
|
||||||
|
@ -47,6 +47,8 @@ where
|
||||||
//---------------------------------------------------------------------------------------------------- Tests
|
//---------------------------------------------------------------------------------------------------- Tests
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{StorableBytes, StorableVec};
|
use crate::{StorableBytes, StorableVec};
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! Implementation of `trait TxRo/TxRw` for `heed`.
|
//! Implementation of `trait TxRo/TxRw` for `heed`.
|
||||||
|
|
||||||
use std::{cell::RefCell, ops::Deref, sync::RwLockReadGuard};
|
use std::cell::RefCell;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use crate::{
|
use crate::{
|
||||||
|
|
|
@ -1,13 +1,4 @@
|
||||||
//! Database backends.
|
//! Database backends.
|
||||||
//!
|
|
||||||
//! TODO:
|
|
||||||
//! Create a test backend backed by `std::collections::HashMap`.
|
|
||||||
//!
|
|
||||||
//! The full type could be something like `HashMap<&'static str, HashMap<K, V>>`.
|
|
||||||
//! where the `str` is the table name, and the containing hashmap are are the
|
|
||||||
//! key and values.
|
|
||||||
//!
|
|
||||||
//! Not sure how duplicate keys will work.
|
|
||||||
|
|
||||||
cfg_if::cfg_if! {
|
cfg_if::cfg_if! {
|
||||||
// If both backends are enabled, fallback to `heed`.
|
// If both backends are enabled, fallback to `heed`.
|
||||||
|
|
|
@ -1,12 +1,7 @@
|
||||||
//! Implementation of `trait DatabaseR{o,w}` for `redb`.
|
//! Implementation of `trait DatabaseR{o,w}` for `redb`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::ops::RangeBounds;
|
||||||
borrow::{Borrow, Cow},
|
|
||||||
fmt::Debug,
|
|
||||||
marker::PhantomData,
|
|
||||||
ops::{Bound, Deref, RangeBounds},
|
|
||||||
};
|
|
||||||
|
|
||||||
use redb::ReadableTable;
|
use redb::ReadableTable;
|
||||||
|
|
||||||
|
@ -17,7 +12,6 @@ use crate::{
|
||||||
},
|
},
|
||||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
storable::Storable,
|
|
||||||
table::Table,
|
table::Table,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,18 +1,14 @@
|
||||||
//! Implementation of `trait Env` for `redb`.
|
//! Implementation of `trait Env` for `redb`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{fmt::Debug, ops::Deref, path::Path, sync::Arc};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
backend::redb::{
|
backend::redb::storable::StorableRedb,
|
||||||
storable::StorableRedb,
|
|
||||||
types::{RedbTableRo, RedbTableRw},
|
|
||||||
},
|
|
||||||
config::{Config, SyncMode},
|
config::{Config, SyncMode},
|
||||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||||
env::{Env, EnvInner},
|
env::{Env, EnvInner},
|
||||||
error::{InitError, RuntimeError},
|
error::{InitError, RuntimeError},
|
||||||
table::Table,
|
table::Table,
|
||||||
|
tables::call_fn_on_all_tables_or_early_return,
|
||||||
TxRw,
|
TxRw,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -36,7 +32,8 @@ impl Drop for ConcreteEnv {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// INVARIANT: drop(ConcreteEnv) must sync.
|
// INVARIANT: drop(ConcreteEnv) must sync.
|
||||||
if let Err(e) = self.sync() {
|
if let Err(e) = self.sync() {
|
||||||
// TODO: log error?
|
// TODO: use tracing
|
||||||
|
println!("{e:#?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: log that we are dropping the database.
|
// TODO: log that we are dropping the database.
|
||||||
|
@ -53,23 +50,22 @@ impl Env for ConcreteEnv {
|
||||||
|
|
||||||
#[cold]
|
#[cold]
|
||||||
#[inline(never)] // called once.
|
#[inline(never)] // called once.
|
||||||
#[allow(clippy::items_after_statements)]
|
|
||||||
fn open(config: Config) -> Result<Self, InitError> {
|
fn open(config: Config) -> Result<Self, InitError> {
|
||||||
// TODO: dynamic syncs are not implemented.
|
// SOMEDAY: dynamic syncs are not implemented.
|
||||||
let durability = match config.sync_mode {
|
let durability = match config.sync_mode {
|
||||||
// TODO: There's also `redb::Durability::Paranoid`:
|
// FIXME: There's also `redb::Durability::Paranoid`:
|
||||||
// <https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Paranoid>
|
// <https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Paranoid>
|
||||||
// should we use that instead of Immediate?
|
// should we use that instead of Immediate?
|
||||||
SyncMode::Safe => redb::Durability::Immediate,
|
SyncMode::Safe => redb::Durability::Immediate,
|
||||||
SyncMode::Async => redb::Durability::Eventual,
|
SyncMode::Async => redb::Durability::Eventual,
|
||||||
SyncMode::Fast => redb::Durability::None,
|
SyncMode::Fast => redb::Durability::None,
|
||||||
// TODO: dynamic syncs are not implemented.
|
// SOMEDAY: dynamic syncs are not implemented.
|
||||||
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
|
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let env_builder = redb::Builder::new();
|
let env_builder = redb::Builder::new();
|
||||||
|
|
||||||
// TODO: we can set cache sizes with:
|
// FIXME: we can set cache sizes with:
|
||||||
// env_builder.set_cache(bytes);
|
// env_builder.set_cache(bytes);
|
||||||
|
|
||||||
// Use the in-memory backend if the feature is enabled.
|
// Use the in-memory backend if the feature is enabled.
|
||||||
|
@ -96,8 +92,6 @@ impl Env for ConcreteEnv {
|
||||||
|
|
||||||
/// Function that creates the tables based off the passed `T: Table`.
|
/// Function that creates the tables based off the passed `T: Table`.
|
||||||
fn create_table<T: Table>(tx_rw: &redb::WriteTransaction) -> Result<(), InitError> {
|
fn create_table<T: Table>(tx_rw: &redb::WriteTransaction) -> Result<(), InitError> {
|
||||||
println!("create_table(): {}", T::NAME); // TODO: use tracing.
|
|
||||||
|
|
||||||
let table: redb::TableDefinition<
|
let table: redb::TableDefinition<
|
||||||
'static,
|
'static,
|
||||||
StorableRedb<<T as Table>::Key>,
|
StorableRedb<<T as Table>::Key>,
|
||||||
|
@ -109,32 +103,20 @@ impl Env for ConcreteEnv {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::tables::{
|
// Create all tables.
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
// FIXME: this macro is kinda awkward.
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxBlobs, TxHeights, TxIds, TxOutputs,
|
let mut tx_rw = env.begin_write()?;
|
||||||
TxUnlockTime,
|
{
|
||||||
};
|
let tx_rw = &mut tx_rw;
|
||||||
|
match call_fn_on_all_tables_or_early_return!(create_table(tx_rw)) {
|
||||||
let tx_rw = env.begin_write()?;
|
Ok(_) => (),
|
||||||
create_table::<BlockBlobs>(&tx_rw)?;
|
Err(e) => return Err(e),
|
||||||
create_table::<BlockHeights>(&tx_rw)?;
|
}
|
||||||
create_table::<BlockInfos>(&tx_rw)?;
|
}
|
||||||
create_table::<KeyImages>(&tx_rw)?;
|
|
||||||
create_table::<NumOutputs>(&tx_rw)?;
|
|
||||||
create_table::<Outputs>(&tx_rw)?;
|
|
||||||
create_table::<PrunableHashes>(&tx_rw)?;
|
|
||||||
create_table::<PrunableTxBlobs>(&tx_rw)?;
|
|
||||||
create_table::<PrunedTxBlobs>(&tx_rw)?;
|
|
||||||
create_table::<RctOutputs>(&tx_rw)?;
|
|
||||||
create_table::<TxBlobs>(&tx_rw)?;
|
|
||||||
create_table::<TxHeights>(&tx_rw)?;
|
|
||||||
create_table::<TxIds>(&tx_rw)?;
|
|
||||||
create_table::<TxOutputs>(&tx_rw)?;
|
|
||||||
create_table::<TxUnlockTime>(&tx_rw)?;
|
|
||||||
tx_rw.commit()?;
|
tx_rw.commit()?;
|
||||||
|
|
||||||
// Check for file integrity.
|
// Check for file integrity.
|
||||||
// TODO: should we do this? is it slow?
|
// FIXME: should we do this? is it slow?
|
||||||
env.check_integrity()?;
|
env.check_integrity()?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
|
|
|
@ -45,7 +45,7 @@ impl From<redb::StorageError> for InitError {
|
||||||
|
|
||||||
match error {
|
match error {
|
||||||
E::Io(e) => Self::Io(e),
|
E::Io(e) => Self::Io(e),
|
||||||
E::Corrupted(s) => Self::Corrupt,
|
E::Corrupted(_) => Self::Corrupt,
|
||||||
// HACK: Handle new errors as `redb` adds them.
|
// HACK: Handle new errors as `redb` adds them.
|
||||||
_ => Self::Unknown(Box::new(error)),
|
_ => Self::Unknown(Box::new(error)),
|
||||||
}
|
}
|
||||||
|
@ -56,8 +56,6 @@ impl From<redb::TransactionError> for InitError {
|
||||||
/// Created by `redb` in:
|
/// Created by `redb` in:
|
||||||
/// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write)
|
/// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write)
|
||||||
fn from(error: redb::TransactionError) -> Self {
|
fn from(error: redb::TransactionError) -> Self {
|
||||||
use redb::StorageError as E;
|
|
||||||
|
|
||||||
match error {
|
match error {
|
||||||
redb::TransactionError::Storage(error) => error.into(),
|
redb::TransactionError::Storage(error) => error.into(),
|
||||||
// HACK: Handle new errors as `redb` adds them.
|
// HACK: Handle new errors as `redb` adds them.
|
||||||
|
@ -70,7 +68,6 @@ impl From<redb::TableError> for InitError {
|
||||||
/// Created by `redb` in:
|
/// Created by `redb` in:
|
||||||
/// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table)
|
/// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table)
|
||||||
fn from(error: redb::TableError) -> Self {
|
fn from(error: redb::TableError) -> Self {
|
||||||
use redb::StorageError as E2;
|
|
||||||
use redb::TableError as E;
|
use redb::TableError as E;
|
||||||
|
|
||||||
match error {
|
match error {
|
||||||
|
@ -85,8 +82,6 @@ impl From<redb::CommitError> for InitError {
|
||||||
/// Created by `redb` in:
|
/// Created by `redb` in:
|
||||||
/// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit)
|
/// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit)
|
||||||
fn from(error: redb::CommitError) -> Self {
|
fn from(error: redb::CommitError) -> Self {
|
||||||
use redb::StorageError as E;
|
|
||||||
|
|
||||||
match error {
|
match error {
|
||||||
redb::CommitError::Storage(error) => error.into(),
|
redb::CommitError::Storage(error) => error.into(),
|
||||||
// HACK: Handle new errors as `redb` adds them.
|
// HACK: Handle new errors as `redb` adds them.
|
||||||
|
@ -102,8 +97,6 @@ impl From<redb::TransactionError> for RuntimeError {
|
||||||
/// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write)
|
/// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write)
|
||||||
/// - [`redb::Database::begin_read`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_read)
|
/// - [`redb::Database::begin_read`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_read)
|
||||||
fn from(error: redb::TransactionError) -> Self {
|
fn from(error: redb::TransactionError) -> Self {
|
||||||
use redb::StorageError as E;
|
|
||||||
|
|
||||||
match error {
|
match error {
|
||||||
redb::TransactionError::Storage(error) => error.into(),
|
redb::TransactionError::Storage(error) => error.into(),
|
||||||
|
|
||||||
|
@ -118,8 +111,6 @@ impl From<redb::CommitError> for RuntimeError {
|
||||||
/// Created by `redb` in:
|
/// Created by `redb` in:
|
||||||
/// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit)
|
/// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit)
|
||||||
fn from(error: redb::CommitError) -> Self {
|
fn from(error: redb::CommitError) -> Self {
|
||||||
use redb::StorageError as E;
|
|
||||||
|
|
||||||
match error {
|
match error {
|
||||||
redb::CommitError::Storage(error) => error.into(),
|
redb::CommitError::Storage(error) => error.into(),
|
||||||
|
|
||||||
|
@ -135,7 +126,6 @@ impl From<redb::TableError> for RuntimeError {
|
||||||
/// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table)
|
/// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table)
|
||||||
/// - [`redb::ReadTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.ReadTransaction.html#method.open_table)
|
/// - [`redb::ReadTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.ReadTransaction.html#method.open_table)
|
||||||
fn from(error: redb::TableError) -> Self {
|
fn from(error: redb::TableError) -> Self {
|
||||||
use redb::StorageError as E2;
|
|
||||||
use redb::TableError as E;
|
use redb::TableError as E;
|
||||||
|
|
||||||
match error {
|
match error {
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! `cuprate_database::Storable` <-> `redb` serde trait compatibility layer.
|
//! `cuprate_database::Storable` <-> `redb` serde trait compatibility layer.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Use
|
//---------------------------------------------------------------------------------------------------- Use
|
||||||
use std::{any::Any, borrow::Cow, cmp::Ordering, fmt::Debug, marker::PhantomData};
|
use std::{cmp::Ordering, fmt::Debug, marker::PhantomData};
|
||||||
|
|
||||||
use redb::TypeName;
|
use redb::TypeName;
|
||||||
|
|
||||||
|
|
|
@ -2,8 +2,6 @@
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use crate::{
|
use crate::{
|
||||||
config::SyncMode,
|
|
||||||
env::Env,
|
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
transaction::{TxRo, TxRw},
|
transaction::{TxRo, TxRw},
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! `redb` type aliases.
|
//! `redb` type aliases.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Types
|
//---------------------------------------------------------------------------------------------------- Types
|
||||||
use crate::{backend::redb::storable::StorableRedb, table::Table};
|
use crate::backend::redb::storable::StorableRedb;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Types
|
//---------------------------------------------------------------------------------------------------- Types
|
||||||
/// The concrete type for readable `redb` tables.
|
/// The concrete type for readable `redb` tables.
|
||||||
|
|
|
@ -13,28 +13,20 @@
|
||||||
//!
|
//!
|
||||||
//! `redb`, and it only must be enabled for it to be tested.
|
//! `redb`, and it only must be enabled for it to be tested.
|
||||||
|
|
||||||
#![allow(
|
|
||||||
clippy::items_after_statements,
|
|
||||||
clippy::significant_drop_tightening,
|
|
||||||
clippy::cast_possible_truncation
|
|
||||||
)]
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::borrow::{Borrow, Cow};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::{Config, SyncMode},
|
|
||||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||||
env::{Env, EnvInner},
|
env::{Env, EnvInner},
|
||||||
error::{InitError, RuntimeError},
|
error::RuntimeError,
|
||||||
resize::ResizeAlgorithm,
|
resize::ResizeAlgorithm,
|
||||||
storable::StorableVec,
|
storable::StorableVec,
|
||||||
table::Table,
|
|
||||||
tables::{
|
tables::{
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxBlobs, TxHeights, TxIds, TxOutputs,
|
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxBlobs, TxHeights, TxIds, TxOutputs,
|
||||||
TxUnlockTime,
|
TxUnlockTime,
|
||||||
},
|
},
|
||||||
|
tables::{TablesIter, TablesMut},
|
||||||
tests::tmp_concrete_env,
|
tests::tmp_concrete_env,
|
||||||
transaction::{TxRo, TxRw},
|
transaction::{TxRo, TxRw},
|
||||||
types::{
|
types::{
|
||||||
|
@ -155,7 +147,6 @@ fn non_manual_resize_2() {
|
||||||
|
|
||||||
/// Test all `DatabaseR{o,w}` operations.
|
/// Test all `DatabaseR{o,w}` operations.
|
||||||
#[test]
|
#[test]
|
||||||
#[allow(clippy::too_many_lines)]
|
|
||||||
fn db_read_write() {
|
fn db_read_write() {
|
||||||
let (env, _tempdir) = tmp_concrete_env();
|
let (env, _tempdir) = tmp_concrete_env();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
|
@ -191,7 +182,7 @@ fn db_read_write() {
|
||||||
|
|
||||||
// Insert keys.
|
// Insert keys.
|
||||||
let mut key = KEY;
|
let mut key = KEY;
|
||||||
for i in 0..N {
|
for _ in 0..N {
|
||||||
table.put(&key, &VALUE).unwrap();
|
table.put(&key, &VALUE).unwrap();
|
||||||
key.amount += 1;
|
key.amount += 1;
|
||||||
}
|
}
|
||||||
|
@ -331,6 +322,60 @@ fn db_read_write() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Assert that `key`'s in database tables are sorted in
|
||||||
|
/// an ordered B-Tree fashion, i.e. `min_value -> max_value`.
|
||||||
|
#[test]
|
||||||
|
fn tables_are_sorted() {
|
||||||
|
let (env, _tmp) = tmp_concrete_env();
|
||||||
|
let env_inner = env.env_inner();
|
||||||
|
let tx_rw = env_inner.tx_rw().unwrap();
|
||||||
|
let mut tables_mut = env_inner.open_tables_mut(&tx_rw).unwrap();
|
||||||
|
|
||||||
|
// Insert `{5, 4, 3, 2, 1, 0}`, assert each new
|
||||||
|
// number inserted is the minimum `first()` value.
|
||||||
|
for key in (0..6).rev() {
|
||||||
|
tables_mut.num_outputs_mut().put(&key, &123).unwrap();
|
||||||
|
let (first, _) = tables_mut.num_outputs_mut().first().unwrap();
|
||||||
|
assert_eq!(first, key);
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(tables_mut);
|
||||||
|
TxRw::commit(tx_rw).unwrap();
|
||||||
|
let tx_rw = env_inner.tx_rw().unwrap();
|
||||||
|
|
||||||
|
// Assert iterators are ordered.
|
||||||
|
{
|
||||||
|
let tx_ro = env_inner.tx_ro().unwrap();
|
||||||
|
let tables = env_inner.open_tables(&tx_ro).unwrap();
|
||||||
|
let t = tables.num_outputs_iter();
|
||||||
|
let iter = t.iter().unwrap();
|
||||||
|
let keys = t.keys().unwrap();
|
||||||
|
for ((i, iter), key) in (0..6).zip(iter).zip(keys) {
|
||||||
|
let (iter, _) = iter.unwrap();
|
||||||
|
let key = key.unwrap();
|
||||||
|
assert_eq!(i, iter);
|
||||||
|
assert_eq!(iter, key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut tables_mut = env_inner.open_tables_mut(&tx_rw).unwrap();
|
||||||
|
let t = tables_mut.num_outputs_mut();
|
||||||
|
|
||||||
|
// Assert the `first()` values are the minimum, i.e. `{0, 1, 2}`
|
||||||
|
for key in 0..3 {
|
||||||
|
let (first, _) = t.first().unwrap();
|
||||||
|
assert_eq!(first, key);
|
||||||
|
t.delete(&key).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assert the `last()` values are the maximum, i.e. `{5, 4, 3}`
|
||||||
|
for key in (3..6).rev() {
|
||||||
|
let (last, _) = tables_mut.num_outputs_mut().last().unwrap();
|
||||||
|
assert_eq!(last, key);
|
||||||
|
tables_mut.num_outputs_mut().delete(&key).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Table Tests
|
//---------------------------------------------------------------------------------------------------- Table Tests
|
||||||
/// Test multiple tables and their key + values.
|
/// Test multiple tables and their key + values.
|
||||||
///
|
///
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//! TODO
|
//! SOMEDAY
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::{
|
||||||
|
@ -19,13 +19,13 @@ use crate::{
|
||||||
};
|
};
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Backend
|
//---------------------------------------------------------------------------------------------------- Backend
|
||||||
/// TODO
|
/// SOMEDAY: allow runtime hot-swappable backends.
|
||||||
#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
|
#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
|
||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
pub enum Backend {
|
pub enum Backend {
|
||||||
#[default]
|
#[default]
|
||||||
/// TODO
|
/// SOMEDAY
|
||||||
Heed,
|
Heed,
|
||||||
/// TODO
|
/// SOMEDAY
|
||||||
Redb,
|
Redb,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,17 +1,8 @@
|
||||||
//! Database [`Env`](crate::Env) configuration.
|
//! The main [`Config`] struct, holding all configurable values.
|
||||||
//!
|
|
||||||
//! This module contains the main [`Config`]uration struct
|
|
||||||
//! for the database [`Env`](crate::Env)ironment, and data
|
|
||||||
//! structures related to any configuration setting.
|
|
||||||
//!
|
|
||||||
//! These configurations are processed at runtime, meaning
|
|
||||||
//! the `Env` can/will dynamically adjust its behavior
|
|
||||||
//! based on these values.
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
borrow::Cow,
|
||||||
num::NonZeroUsize,
|
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -26,13 +17,143 @@ use crate::{
|
||||||
resize::ResizeAlgorithm,
|
resize::ResizeAlgorithm,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
//---------------------------------------------------------------------------------------------------- ConfigBuilder
|
||||||
|
/// Builder for [`Config`].
|
||||||
|
///
|
||||||
|
// SOMEDAY: there's are many more options to add in the future.
|
||||||
|
#[derive(Debug, Clone, PartialEq, PartialOrd)]
|
||||||
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
|
pub struct ConfigBuilder {
|
||||||
|
/// [`Config::db_directory`].
|
||||||
|
db_directory: Option<Cow<'static, Path>>,
|
||||||
|
|
||||||
|
/// [`Config::sync_mode`].
|
||||||
|
sync_mode: Option<SyncMode>,
|
||||||
|
|
||||||
|
/// [`Config::reader_threads`].
|
||||||
|
reader_threads: Option<ReaderThreads>,
|
||||||
|
|
||||||
|
/// [`Config::resize_algorithm`].
|
||||||
|
resize_algorithm: Option<ResizeAlgorithm>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConfigBuilder {
|
||||||
|
/// Create a new [`ConfigBuilder`].
|
||||||
|
///
|
||||||
|
/// [`ConfigBuilder::build`] can be called immediately
|
||||||
|
/// after this function to use default values.
|
||||||
|
pub const fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
db_directory: None,
|
||||||
|
sync_mode: None,
|
||||||
|
reader_threads: None,
|
||||||
|
resize_algorithm: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build into a [`Config`].
|
||||||
|
///
|
||||||
|
/// # Default values
|
||||||
|
/// If [`ConfigBuilder::db_directory`] was not called,
|
||||||
|
/// the default [`cuprate_database_dir`] will be used.
|
||||||
|
///
|
||||||
|
/// For all other values, [`Default::default`] is used.
|
||||||
|
pub fn build(self) -> Config {
|
||||||
|
// INVARIANT: all PATH safety checks are done
|
||||||
|
// in `helper::fs`. No need to do them here.
|
||||||
|
let db_directory = self
|
||||||
|
.db_directory
|
||||||
|
.unwrap_or_else(|| Cow::Borrowed(cuprate_database_dir()));
|
||||||
|
|
||||||
|
// Add the database filename to the directory.
|
||||||
|
let db_file = {
|
||||||
|
let mut db_file = db_directory.to_path_buf();
|
||||||
|
db_file.push(DATABASE_DATA_FILENAME);
|
||||||
|
Cow::Owned(db_file)
|
||||||
|
};
|
||||||
|
|
||||||
|
Config {
|
||||||
|
db_directory,
|
||||||
|
db_file,
|
||||||
|
sync_mode: self.sync_mode.unwrap_or_default(),
|
||||||
|
reader_threads: self.reader_threads.unwrap_or_default(),
|
||||||
|
resize_algorithm: self.resize_algorithm.unwrap_or_default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set a custom database directory (and file) [`Path`].
|
||||||
|
#[must_use]
|
||||||
|
pub fn db_directory(mut self, db_directory: PathBuf) -> Self {
|
||||||
|
self.db_directory = Some(Cow::Owned(db_directory));
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tune the [`ConfigBuilder`] for the highest performing,
|
||||||
|
/// but also most resource-intensive & maybe risky settings.
|
||||||
|
///
|
||||||
|
/// Good default for testing, and resource-available machines.
|
||||||
|
#[must_use]
|
||||||
|
pub fn fast(mut self) -> Self {
|
||||||
|
self.sync_mode = Some(SyncMode::Fast);
|
||||||
|
self.reader_threads = Some(ReaderThreads::OnePerThread);
|
||||||
|
self.resize_algorithm = Some(ResizeAlgorithm::default());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tune the [`ConfigBuilder`] for the lowest performing,
|
||||||
|
/// but also least resource-intensive settings.
|
||||||
|
///
|
||||||
|
/// Good default for resource-limited machines, e.g. a cheap VPS.
|
||||||
|
#[must_use]
|
||||||
|
pub fn low_power(mut self) -> Self {
|
||||||
|
self.sync_mode = Some(SyncMode::default());
|
||||||
|
self.reader_threads = Some(ReaderThreads::One);
|
||||||
|
self.resize_algorithm = Some(ResizeAlgorithm::default());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set a custom [`SyncMode`].
|
||||||
|
#[must_use]
|
||||||
|
pub const fn sync_mode(mut self, sync_mode: SyncMode) -> Self {
|
||||||
|
self.sync_mode = Some(sync_mode);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set a custom [`ReaderThreads`].
|
||||||
|
#[must_use]
|
||||||
|
pub const fn reader_threads(mut self, reader_threads: ReaderThreads) -> Self {
|
||||||
|
self.reader_threads = Some(reader_threads);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set a custom [`ResizeAlgorithm`].
|
||||||
|
#[must_use]
|
||||||
|
pub const fn resize_algorithm(mut self, resize_algorithm: ResizeAlgorithm) -> Self {
|
||||||
|
self.resize_algorithm = Some(resize_algorithm);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ConfigBuilder {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
db_directory: Some(Cow::Borrowed(cuprate_database_dir())),
|
||||||
|
sync_mode: Some(SyncMode::default()),
|
||||||
|
reader_threads: Some(ReaderThreads::default()),
|
||||||
|
resize_algorithm: Some(ResizeAlgorithm::default()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Config
|
//---------------------------------------------------------------------------------------------------- Config
|
||||||
/// Database [`Env`](crate::Env) configuration.
|
/// Database [`Env`](crate::Env) configuration.
|
||||||
///
|
///
|
||||||
/// This is the struct passed to [`Env::open`](crate::Env::open) that
|
/// This is the struct passed to [`Env::open`](crate::Env::open) that
|
||||||
/// allows the database to be configured in various ways.
|
/// allows the database to be configured in various ways.
|
||||||
///
|
///
|
||||||
/// TODO: there's probably more options to add.
|
/// For construction, either use [`ConfigBuilder`] or [`Config::default`].
|
||||||
|
///
|
||||||
|
// SOMEDAY: there's are many more options to add in the future.
|
||||||
#[derive(Debug, Clone, PartialEq, PartialOrd)]
|
#[derive(Debug, Clone, PartialEq, PartialOrd)]
|
||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
|
@ -44,8 +165,8 @@ pub struct Config {
|
||||||
/// By default, if no value is provided in the [`Config`]
|
/// By default, if no value is provided in the [`Config`]
|
||||||
/// constructor functions, this will be [`cuprate_database_dir`].
|
/// constructor functions, this will be [`cuprate_database_dir`].
|
||||||
///
|
///
|
||||||
/// TODO: we should also support `/etc/cuprated.conf`.
|
// SOMEDAY: we should also support `/etc/cuprated.conf`.
|
||||||
/// This could be represented with an `enum DbPath { Default, Custom, Etc, }`
|
// This could be represented with an `enum DbPath { Default, Custom, Etc, }`
|
||||||
pub(crate) db_directory: Cow<'static, Path>,
|
pub(crate) db_directory: Cow<'static, Path>,
|
||||||
/// The actual database data file.
|
/// The actual database data file.
|
||||||
///
|
///
|
||||||
|
@ -67,111 +188,50 @@ pub struct Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
/// Private function to acquire [`Config::db_file`]
|
|
||||||
/// from the user provided (or default) [`Config::db_directory`].
|
|
||||||
///
|
|
||||||
/// As the database data file PATH is just the directory + the filename,
|
|
||||||
/// we only need the directory from the user/Config, and can add it here.
|
|
||||||
fn return_db_dir_and_file(
|
|
||||||
db_directory: Option<PathBuf>,
|
|
||||||
) -> (Cow<'static, Path>, Cow<'static, Path>) {
|
|
||||||
// INVARIANT: all PATH safety checks are done
|
|
||||||
// in `helper::fs`. No need to do them here.
|
|
||||||
let db_directory =
|
|
||||||
db_directory.map_or_else(|| Cow::Borrowed(cuprate_database_dir()), Cow::Owned);
|
|
||||||
|
|
||||||
// Add the database filename to the directory.
|
|
||||||
let mut db_file = db_directory.to_path_buf();
|
|
||||||
db_file.push(DATABASE_DATA_FILENAME);
|
|
||||||
|
|
||||||
(db_directory, Cow::Owned(db_file))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new [`Config`] with sane default settings.
|
/// Create a new [`Config`] with sane default settings.
|
||||||
///
|
///
|
||||||
/// # `db_directory`
|
/// The [`Config::db_directory`] will be [`cuprate_database_dir`].
|
||||||
/// If this is `Some`, it will be used as the
|
|
||||||
/// directory that contains all database files.
|
|
||||||
///
|
///
|
||||||
/// If `None`, it will use the default directory [`cuprate_database_dir`].
|
/// All other values will be [`Default::default`].
|
||||||
pub fn new(db_directory: Option<PathBuf>) -> Self {
|
|
||||||
let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
|
|
||||||
Self {
|
|
||||||
db_directory,
|
|
||||||
db_file,
|
|
||||||
sync_mode: SyncMode::default(),
|
|
||||||
reader_threads: ReaderThreads::OnePerThread,
|
|
||||||
resize_algorithm: ResizeAlgorithm::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a [`Config`] with the highest performing,
|
|
||||||
/// but also most resource-intensive & maybe risky settings.
|
|
||||||
///
|
///
|
||||||
/// Good default for testing, and resource-available machines.
|
/// Same as [`Config::default`].
|
||||||
///
|
///
|
||||||
/// # `db_directory`
|
/// ```rust
|
||||||
/// If this is `Some`, it will be used as the
|
/// use cuprate_database::{config::*, resize::*, DATABASE_DATA_FILENAME};
|
||||||
/// directory that contains all database files.
|
/// use cuprate_helper::fs::*;
|
||||||
///
|
///
|
||||||
/// If `None`, it will use the default directory [`cuprate_database_dir`].
|
/// let config = Config::new();
|
||||||
pub fn fast(db_directory: Option<PathBuf>) -> Self {
|
|
||||||
let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
|
|
||||||
Self {
|
|
||||||
db_directory,
|
|
||||||
db_file,
|
|
||||||
sync_mode: SyncMode::Fast,
|
|
||||||
reader_threads: ReaderThreads::OnePerThread,
|
|
||||||
resize_algorithm: ResizeAlgorithm::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a [`Config`] with the lowest performing,
|
|
||||||
/// but also least resource-intensive settings.
|
|
||||||
///
|
///
|
||||||
/// Good default for resource-limited machines, e.g. a cheap VPS.
|
/// assert_eq!(config.db_directory(), cuprate_database_dir());
|
||||||
///
|
/// assert!(config.db_file().starts_with(cuprate_database_dir()));
|
||||||
/// # `db_directory`
|
/// assert!(config.db_file().ends_with(DATABASE_DATA_FILENAME));
|
||||||
/// If this is `Some`, it will be used as the
|
/// assert_eq!(config.sync_mode, SyncMode::default());
|
||||||
/// directory that contains all database files.
|
/// assert_eq!(config.reader_threads, ReaderThreads::default());
|
||||||
///
|
/// assert_eq!(config.resize_algorithm, ResizeAlgorithm::default());
|
||||||
/// If `None`, it will use the default directory [`cuprate_database_dir`].
|
/// ```
|
||||||
pub fn low_power(db_directory: Option<PathBuf>) -> Self {
|
pub fn new() -> Self {
|
||||||
let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
|
ConfigBuilder::default().build()
|
||||||
Self {
|
|
||||||
db_directory,
|
|
||||||
db_file,
|
|
||||||
sync_mode: SyncMode::default(),
|
|
||||||
reader_threads: ReaderThreads::One,
|
|
||||||
resize_algorithm: ResizeAlgorithm::default(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the absolute [`Path`] to the database directory.
|
/// Return the absolute [`Path`] to the database directory.
|
||||||
///
|
|
||||||
/// This will be the `db_directory` given
|
|
||||||
/// (or default) during [`Config`] construction.
|
|
||||||
pub const fn db_directory(&self) -> &Cow<'_, Path> {
|
pub const fn db_directory(&self) -> &Cow<'_, Path> {
|
||||||
&self.db_directory
|
&self.db_directory
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the absolute [`Path`] to the database data file.
|
/// Return the absolute [`Path`] to the database data file.
|
||||||
///
|
|
||||||
/// This will be based off the `db_directory` given
|
|
||||||
/// (or default) during [`Config`] construction.
|
|
||||||
pub const fn db_file(&self) -> &Cow<'_, Path> {
|
pub const fn db_file(&self) -> &Cow<'_, Path> {
|
||||||
&self.db_file
|
&self.db_file
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
/// Same as `Self::new(None)`.
|
/// Same as [`Config::new`].
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use cuprate_database::config::*;
|
/// # use cuprate_database::config::*;
|
||||||
/// assert_eq!(Config::default(), Config::new(None));
|
/// assert_eq!(Config::default(), Config::new());
|
||||||
/// ```
|
/// ```
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::new(None)
|
Self::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,44 @@
|
||||||
//! TODO
|
//! Database [`Env`](crate::Env) configuration.
|
||||||
|
//!
|
||||||
|
//! This module contains the main [`Config`]uration struct
|
||||||
|
//! for the database [`Env`](crate::Env)ironment, and types
|
||||||
|
//! related to configuration settings.
|
||||||
|
//!
|
||||||
|
//! The main constructor is the [`ConfigBuilder`].
|
||||||
|
//!
|
||||||
|
//! These configurations are processed at runtime, meaning
|
||||||
|
//! the `Env` can/will dynamically adjust its behavior
|
||||||
|
//! based on these values.
|
||||||
|
//!
|
||||||
|
//! # Example
|
||||||
|
//! ```rust
|
||||||
|
//! use cuprate_database::{
|
||||||
|
//! Env,
|
||||||
|
//! config::{ConfigBuilder, ReaderThreads, SyncMode}
|
||||||
|
//! };
|
||||||
|
//!
|
||||||
|
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
//! let db_dir = tempfile::tempdir()?;
|
||||||
|
//!
|
||||||
|
//! let config = ConfigBuilder::new()
|
||||||
|
//! // Use a custom database directory.
|
||||||
|
//! .db_directory(db_dir.path().to_path_buf())
|
||||||
|
//! // Use as many reader threads as possible (when using `service`).
|
||||||
|
//! .reader_threads(ReaderThreads::OnePerThread)
|
||||||
|
//! // Use the fastest sync mode.
|
||||||
|
//! .sync_mode(SyncMode::Fast)
|
||||||
|
//! // Build into `Config`
|
||||||
|
//! .build();
|
||||||
|
//!
|
||||||
|
//! // Start a database `service` using this configuration.
|
||||||
|
//! let (reader_handle, _) = cuprate_database::service::init(config.clone())?;
|
||||||
|
//! // It's using the config we provided.
|
||||||
|
//! assert_eq!(reader_handle.env().config(), &config);
|
||||||
|
//! # Ok(()) }
|
||||||
|
//! ```
|
||||||
|
|
||||||
mod config;
|
mod config;
|
||||||
pub use config::Config;
|
pub use config::{Config, ConfigBuilder};
|
||||||
|
|
||||||
mod reader_threads;
|
mod reader_threads;
|
||||||
pub use reader_threads::ReaderThreads;
|
pub use reader_threads::ReaderThreads;
|
||||||
|
|
|
@ -9,25 +9,19 @@
|
||||||
//! based on these values.
|
//! based on these values.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::num::NonZeroUsize;
|
||||||
borrow::Cow,
|
|
||||||
num::NonZeroUsize,
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use cuprate_helper::fs::cuprate_database_dir;
|
|
||||||
|
|
||||||
use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- ReaderThreads
|
//---------------------------------------------------------------------------------------------------- ReaderThreads
|
||||||
/// Amount of database reader threads to spawn.
|
/// Amount of database reader threads to spawn when using [`service`](crate::service).
|
||||||
///
|
///
|
||||||
/// This controls how many reader thread [`crate::service`]'s
|
/// This controls how many reader thread `service`'s
|
||||||
/// thread-pool will spawn to receive and send requests/responses.
|
/// thread-pool will spawn to receive and send requests/responses.
|
||||||
///
|
///
|
||||||
|
/// It does nothing outside of `service`.
|
||||||
|
///
|
||||||
/// It will always be at least 1, up until the amount of threads on the machine.
|
/// It will always be at least 1, up until the amount of threads on the machine.
|
||||||
///
|
///
|
||||||
/// The main function used to extract an actual
|
/// The main function used to extract an actual
|
||||||
|
@ -38,8 +32,8 @@ pub enum ReaderThreads {
|
||||||
#[default]
|
#[default]
|
||||||
/// Spawn 1 reader thread per available thread on the machine.
|
/// Spawn 1 reader thread per available thread on the machine.
|
||||||
///
|
///
|
||||||
/// For example, a `16-core, 32-thread` Ryzen 5950x will
|
/// For example, a `32-thread` system will spawn
|
||||||
/// spawn `32` reader threads using this setting.
|
/// `32` reader threads using this setting.
|
||||||
OnePerThread,
|
OnePerThread,
|
||||||
|
|
||||||
/// Only spawn 1 reader thread.
|
/// Only spawn 1 reader thread.
|
||||||
|
|
|
@ -9,19 +9,10 @@
|
||||||
//! based on these values.
|
//! based on these values.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
|
||||||
borrow::Cow,
|
|
||||||
num::NonZeroUsize,
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use cuprate_helper::fs::cuprate_database_dir;
|
|
||||||
|
|
||||||
use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- SyncMode
|
//---------------------------------------------------------------------------------------------------- SyncMode
|
||||||
/// Disk synchronization mode.
|
/// Disk synchronization mode.
|
||||||
///
|
///
|
||||||
|
@ -48,7 +39,7 @@ use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
|
||||||
/// ```
|
/// ```
|
||||||
/// will be fine, most likely pulling from memory instead of disk.
|
/// will be fine, most likely pulling from memory instead of disk.
|
||||||
///
|
///
|
||||||
/// # TODO
|
/// # SOMEDAY
|
||||||
/// Dynamic sync's are not yet supported.
|
/// Dynamic sync's are not yet supported.
|
||||||
///
|
///
|
||||||
/// Only:
|
/// Only:
|
||||||
|
@ -64,24 +55,24 @@ pub enum SyncMode {
|
||||||
/// Use [`SyncMode::Fast`] until fully synced,
|
/// Use [`SyncMode::Fast`] until fully synced,
|
||||||
/// then use [`SyncMode::Safe`].
|
/// then use [`SyncMode::Safe`].
|
||||||
///
|
///
|
||||||
/// # TODO: how to implement this?
|
// # SOMEDAY: how to implement this?
|
||||||
/// ref: <https://github.com/monero-project/monero/issues/1463>
|
// ref: <https://github.com/monero-project/monero/issues/1463>
|
||||||
/// monerod-solution: <https://github.com/monero-project/monero/pull/1506>
|
// monerod-solution: <https://github.com/monero-project/monero/pull/1506>
|
||||||
/// cuprate-issue: <https://github.com/Cuprate/cuprate/issues/78>
|
// cuprate-issue: <https://github.com/Cuprate/cuprate/issues/78>
|
||||||
///
|
//
|
||||||
/// We could:
|
// We could:
|
||||||
/// ```rust,ignore
|
// ```rust,ignore
|
||||||
/// if current_db_block <= top_block.saturating_sub(N) {
|
// if current_db_block <= top_block.saturating_sub(N) {
|
||||||
/// // don't sync()
|
// // don't sync()
|
||||||
/// } else {
|
// } else {
|
||||||
/// // sync()
|
// // sync()
|
||||||
/// }
|
// }
|
||||||
/// ```
|
// ```
|
||||||
/// where N is some threshold we pick that is _close_ enough
|
// where N is some threshold we pick that is _close_ enough
|
||||||
/// to being synced where we want to start being safer.
|
// to being synced where we want to start being safer.
|
||||||
///
|
//
|
||||||
/// Essentially, when we are in a certain % range of being finished,
|
// Essentially, when we are in a certain % range of being finished,
|
||||||
/// switch to safe mode, until then, go fast.
|
// switch to safe mode, until then, go fast.
|
||||||
FastThenSafe,
|
FastThenSafe,
|
||||||
|
|
||||||
#[default]
|
#[default]
|
||||||
|
@ -136,7 +127,7 @@ pub enum SyncMode {
|
||||||
/// In the case of a system crash, the database
|
/// In the case of a system crash, the database
|
||||||
/// may become corrupted when using this option.
|
/// may become corrupted when using this option.
|
||||||
//
|
//
|
||||||
// TODO: we could call this `unsafe`
|
// FIXME: we could call this `unsafe`
|
||||||
// and use that terminology in the config file
|
// and use that terminology in the config file
|
||||||
// so users know exactly what they are getting
|
// so users know exactly what they are getting
|
||||||
// themselves into.
|
// themselves into.
|
||||||
|
|
|
@ -35,8 +35,8 @@ TODO: instructions on:
|
||||||
///
|
///
|
||||||
/// | Backend | Value |
|
/// | Backend | Value |
|
||||||
/// |---------|-------|
|
/// |---------|-------|
|
||||||
/// | `heed` | "heed"
|
/// | `heed` | `"heed"`
|
||||||
/// | `redb` | "redb"
|
/// | `redb` | `"redb"`
|
||||||
pub const DATABASE_BACKEND: &str = {
|
pub const DATABASE_BACKEND: &str = {
|
||||||
cfg_if! {
|
cfg_if! {
|
||||||
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
|
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
|
||||||
|
@ -53,8 +53,8 @@ pub const DATABASE_BACKEND: &str = {
|
||||||
///
|
///
|
||||||
/// | Backend | Value |
|
/// | Backend | Value |
|
||||||
/// |---------|-------|
|
/// |---------|-------|
|
||||||
/// | `heed` | "data.mdb"
|
/// | `heed` | `"data.mdb"`
|
||||||
/// | `redb` | "data.redb"
|
/// | `redb` | `"data.redb"`
|
||||||
pub const DATABASE_DATA_FILENAME: &str = {
|
pub const DATABASE_DATA_FILENAME: &str = {
|
||||||
cfg_if! {
|
cfg_if! {
|
||||||
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
|
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
|
||||||
|
@ -69,8 +69,8 @@ pub const DATABASE_DATA_FILENAME: &str = {
|
||||||
///
|
///
|
||||||
/// | Backend | Value |
|
/// | Backend | Value |
|
||||||
/// |---------|-------|
|
/// |---------|-------|
|
||||||
/// | `heed` | Some("lock.mdb")
|
/// | `heed` | `Some("lock.mdb")`
|
||||||
/// | `redb` | None (redb doesn't use a file lock)
|
/// | `redb` | `None` (redb doesn't use a file lock)
|
||||||
pub const DATABASE_LOCK_FILENAME: Option<&str> = {
|
pub const DATABASE_LOCK_FILENAME: Option<&str> = {
|
||||||
cfg_if! {
|
cfg_if! {
|
||||||
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
|
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
|
||||||
|
|
|
@ -1,33 +1,38 @@
|
||||||
//! Abstracted database; `trait DatabaseRo` & `trait DatabaseRw`.
|
//! Abstracted database table operations; `trait DatabaseRo` & `trait DatabaseRw`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::ops::RangeBounds;
|
||||||
borrow::{Borrow, Cow},
|
|
||||||
fmt::Debug,
|
|
||||||
ops::{Deref, RangeBounds},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{error::RuntimeError, table::Table};
|
||||||
error::RuntimeError,
|
|
||||||
table::Table,
|
|
||||||
transaction::{TxRo, TxRw},
|
|
||||||
};
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- DatabaseIter
|
//---------------------------------------------------------------------------------------------------- DatabaseIter
|
||||||
|
/// Generic post-fix documentation for `DatabaseIter` methods.
|
||||||
|
macro_rules! doc_iter {
|
||||||
|
() => {
|
||||||
|
r"Although the returned iterator itself is tied to the lifetime
|
||||||
|
of `&self`, the returned values from the iterator are _owned_.
|
||||||
|
|
||||||
|
# Errors
|
||||||
|
The construction of the iterator itself may error.
|
||||||
|
|
||||||
|
Each iteration of the iterator has the potential to error as well."
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/// Database (key-value store) read-only iteration abstraction.
|
/// Database (key-value store) read-only iteration abstraction.
|
||||||
///
|
///
|
||||||
/// These are read-only iteration-related operations that
|
/// These are read-only iteration-related operations that
|
||||||
/// can only be called from [`DatabaseRo`] objects.
|
/// can only be called from [`DatabaseRo`] objects.
|
||||||
///
|
///
|
||||||
/// # Hack
|
/// # Hack
|
||||||
/// This is a HACK to get around the fact our read/write tables
|
/// This is a HACK to get around the fact [`DatabaseRw`] tables
|
||||||
/// cannot safely return values returning lifetimes, as such,
|
/// cannot safely return values returning lifetimes, as such,
|
||||||
/// only read-only tables implement this trait.
|
/// only read-only tables implement this trait.
|
||||||
///
|
///
|
||||||
/// - <https://github.com/Cuprate/cuprate/pull/102#discussion_r1548695610>
|
/// - <https://github.com/Cuprate/cuprate/pull/102#discussion_r1548695610>
|
||||||
/// - <https://github.com/Cuprate/cuprate/pull/104>
|
/// - <https://github.com/Cuprate/cuprate/pull/104>
|
||||||
pub trait DatabaseIter<T: Table> {
|
pub trait DatabaseIter<T: Table> {
|
||||||
/// Get an iterator of value's corresponding to a range of keys.
|
/// Get an [`Iterator`] of value's corresponding to a range of keys.
|
||||||
///
|
///
|
||||||
/// For example:
|
/// For example:
|
||||||
/// ```rust,ignore
|
/// ```rust,ignore
|
||||||
|
@ -39,12 +44,7 @@ pub trait DatabaseIter<T: Table> {
|
||||||
/// Although the returned iterator itself is tied to the lifetime
|
/// Although the returned iterator itself is tied to the lifetime
|
||||||
/// of `&'a self`, the returned values from the iterator are _owned_.
|
/// of `&'a self`, the returned values from the iterator are _owned_.
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_iter!()]
|
||||||
/// Each key in the `range` has the potential to error, for example,
|
|
||||||
/// if a particular key in the `range` does not exist,
|
|
||||||
/// [`RuntimeError::KeyNotFound`] wrapped in [`Err`] will be returned
|
|
||||||
/// from the iterator.
|
|
||||||
#[allow(clippy::iter_not_returning_iterator)]
|
|
||||||
fn get_range<'a, Range>(
|
fn get_range<'a, Range>(
|
||||||
&'a self,
|
&'a self,
|
||||||
range: Range,
|
range: Range,
|
||||||
|
@ -52,32 +52,36 @@ pub trait DatabaseIter<T: Table> {
|
||||||
where
|
where
|
||||||
Range: RangeBounds<T::Key> + 'a;
|
Range: RangeBounds<T::Key> + 'a;
|
||||||
|
|
||||||
/// TODO
|
/// Get an [`Iterator`] that returns the `(key, value)` types for this database.
|
||||||
///
|
#[doc = doc_iter!()]
|
||||||
/// # Errors
|
|
||||||
/// TODO
|
|
||||||
#[allow(clippy::iter_not_returning_iterator)]
|
#[allow(clippy::iter_not_returning_iterator)]
|
||||||
fn iter(
|
fn iter(
|
||||||
&self,
|
&self,
|
||||||
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>;
|
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>;
|
||||||
|
|
||||||
/// TODO
|
/// Get an [`Iterator`] that returns _only_ the `key` type for this database.
|
||||||
///
|
#[doc = doc_iter!()]
|
||||||
/// # Errors
|
|
||||||
/// TODO
|
|
||||||
fn keys(&self)
|
fn keys(&self)
|
||||||
-> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError>;
|
-> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError>;
|
||||||
|
|
||||||
/// TODO
|
/// Get an [`Iterator`] that returns _only_ the `value` type for this database.
|
||||||
///
|
#[doc = doc_iter!()]
|
||||||
/// # Errors
|
|
||||||
/// TODO
|
|
||||||
fn values(
|
fn values(
|
||||||
&self,
|
&self,
|
||||||
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError>;
|
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- DatabaseRo
|
//---------------------------------------------------------------------------------------------------- DatabaseRo
|
||||||
|
/// Generic post-fix documentation for `DatabaseR{o,w}` methods.
|
||||||
|
macro_rules! doc_database {
|
||||||
|
() => {
|
||||||
|
r"# Errors
|
||||||
|
This will return [`RuntimeError::KeyNotFound`] if:
|
||||||
|
- Input does not exist OR
|
||||||
|
- Database is empty"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/// Database (key-value store) read abstraction.
|
/// Database (key-value store) read abstraction.
|
||||||
///
|
///
|
||||||
/// This is a read-only database table,
|
/// This is a read-only database table,
|
||||||
|
@ -106,19 +110,16 @@ pub trait DatabaseIter<T: Table> {
|
||||||
/// - <https://doc.rust-lang.org/nomicon/send-and-sync.html>
|
/// - <https://doc.rust-lang.org/nomicon/send-and-sync.html>
|
||||||
pub unsafe trait DatabaseRo<T: Table> {
|
pub unsafe trait DatabaseRo<T: Table> {
|
||||||
/// Get the value corresponding to a key.
|
/// Get the value corresponding to a key.
|
||||||
///
|
#[doc = doc_database!()]
|
||||||
/// The returned value is _owned_.
|
|
||||||
///
|
|
||||||
/// # Errors
|
|
||||||
/// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist.
|
|
||||||
///
|
|
||||||
/// It will return other [`RuntimeError`]'s on things like IO errors as well.
|
|
||||||
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError>;
|
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError>;
|
||||||
|
|
||||||
/// TODO
|
/// Returns `true` if the database contains a value for the specified key.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
/// TODO
|
/// Note that this will _never_ return `Err(RuntimeError::KeyNotFound)`,
|
||||||
|
/// as in that case, `Ok(false)` will be returned.
|
||||||
|
///
|
||||||
|
/// Other errors may still occur.
|
||||||
fn contains(&self, key: &T::Key) -> Result<bool, RuntimeError> {
|
fn contains(&self, key: &T::Key) -> Result<bool, RuntimeError> {
|
||||||
match self.get(key) {
|
match self.get(key) {
|
||||||
Ok(_) => Ok(true),
|
Ok(_) => Ok(true),
|
||||||
|
@ -127,28 +128,24 @@ pub unsafe trait DatabaseRo<T: Table> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO
|
/// Returns the number of `(key, value)` pairs in the database.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
/// TODO
|
/// This will never return [`RuntimeError::KeyNotFound`].
|
||||||
fn len(&self) -> Result<u64, RuntimeError>;
|
fn len(&self) -> Result<u64, RuntimeError>;
|
||||||
|
|
||||||
/// TODO
|
/// Returns the first `(key, value)` pair in the database.
|
||||||
///
|
#[doc = doc_database!()]
|
||||||
/// # Errors
|
|
||||||
/// TODO
|
|
||||||
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>;
|
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>;
|
||||||
|
|
||||||
/// TODO
|
/// Returns the last `(key, value)` pair in the database.
|
||||||
///
|
#[doc = doc_database!()]
|
||||||
/// # Errors
|
|
||||||
/// TODO
|
|
||||||
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>;
|
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>;
|
||||||
|
|
||||||
/// TODO
|
/// Returns `true` if the database contains no `(key, value)` pairs.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
/// TODO
|
/// This can only return [`RuntimeError::Io`] on errors.
|
||||||
fn is_empty(&self) -> Result<bool, RuntimeError>;
|
fn is_empty(&self) -> Result<bool, RuntimeError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,7 +158,8 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
||||||
///
|
///
|
||||||
/// This will overwrite any existing key-value pairs.
|
/// This will overwrite any existing key-value pairs.
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_database!()]
|
||||||
|
///
|
||||||
/// This will never [`RuntimeError::KeyExists`].
|
/// This will never [`RuntimeError::KeyExists`].
|
||||||
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>;
|
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>;
|
||||||
|
|
||||||
|
@ -169,8 +167,9 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
||||||
///
|
///
|
||||||
/// This will return `Ok(())` if the key does not exist.
|
/// This will return `Ok(())` if the key does not exist.
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_database!()]
|
||||||
/// This will never [`RuntimeError::KeyNotFound`].
|
///
|
||||||
|
/// This will never [`RuntimeError::KeyExists`].
|
||||||
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>;
|
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>;
|
||||||
|
|
||||||
/// Delete and return a key-value pair in the database.
|
/// Delete and return a key-value pair in the database.
|
||||||
|
@ -178,8 +177,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
||||||
/// This is the same as [`DatabaseRw::delete`], however,
|
/// This is the same as [`DatabaseRw::delete`], however,
|
||||||
/// it will serialize the `T::Value` and return it.
|
/// it will serialize the `T::Value` and return it.
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_database!()]
|
||||||
/// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist.
|
|
||||||
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError>;
|
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError>;
|
||||||
|
|
||||||
/// Fetch the value, and apply a function to it - or delete the entry.
|
/// Fetch the value, and apply a function to it - or delete the entry.
|
||||||
|
@ -193,8 +191,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
||||||
/// - If `f` returns `Some(value)`, that will be [`DatabaseRw::put`] as the new value
|
/// - If `f` returns `Some(value)`, that will be [`DatabaseRw::put`] as the new value
|
||||||
/// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d
|
/// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_database!()]
|
||||||
/// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist.
|
|
||||||
fn update<F>(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError>
|
fn update<F>(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError>
|
||||||
where
|
where
|
||||||
F: FnMut(T::Value) -> Option<T::Value>,
|
F: FnMut(T::Value) -> Option<T::Value>,
|
||||||
|
@ -207,15 +204,13 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO
|
/// Removes and returns the first `(key, value)` pair in the database.
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_database!()]
|
||||||
/// TODO
|
|
||||||
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
|
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
|
||||||
|
|
||||||
/// TODO
|
/// Removes and returns the last `(key, value)` pair in the database.
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_database!()]
|
||||||
/// TODO
|
|
||||||
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
|
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Abstracted database environment; `trait Env`.
|
//! Abstracted database environment; `trait Env`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{fmt::Debug, num::NonZeroUsize, ops::Deref};
|
use std::num::NonZeroUsize;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::Config,
|
config::Config,
|
||||||
|
@ -9,11 +9,7 @@ use crate::{
|
||||||
error::{InitError, RuntimeError},
|
error::{InitError, RuntimeError},
|
||||||
resize::ResizeAlgorithm,
|
resize::ResizeAlgorithm,
|
||||||
table::Table,
|
table::Table,
|
||||||
tables::{
|
tables::{call_fn_on_all_tables_or_early_return, TablesIter, TablesMut},
|
||||||
call_fn_on_all_tables_or_early_return, BlockBlobs, BlockHeights, BlockInfos, KeyImages,
|
|
||||||
NumOutputs, Outputs, PrunableHashes, PrunableTxBlobs, PrunedTxBlobs, RctOutputs, Tables,
|
|
||||||
TablesIter, TablesMut, TxHeights, TxIds, TxUnlockTime,
|
|
||||||
},
|
|
||||||
transaction::{TxRo, TxRw},
|
transaction::{TxRo, TxRw},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -28,8 +24,16 @@ use crate::{
|
||||||
/// although, no invariant relies on this (yet).
|
/// although, no invariant relies on this (yet).
|
||||||
///
|
///
|
||||||
/// # Lifetimes
|
/// # Lifetimes
|
||||||
/// TODO: Explain the very sequential lifetime pipeline:
|
/// The lifetimes associated with `Env` have a sequential flow:
|
||||||
/// - `ConcreteEnv` -> `'env` -> `'tx` -> `impl DatabaseR{o,w}`
|
/// 1. `ConcreteEnv`
|
||||||
|
/// 2. `'env`
|
||||||
|
/// 3. `'tx`
|
||||||
|
/// 4. `'db`
|
||||||
|
///
|
||||||
|
/// As in:
|
||||||
|
/// - open database tables only live as long as...
|
||||||
|
/// - transactions which only live as long as the...
|
||||||
|
/// - environment ([`EnvInner`])
|
||||||
pub trait Env: Sized {
|
pub trait Env: Sized {
|
||||||
//------------------------------------------------ Constants
|
//------------------------------------------------ Constants
|
||||||
/// Does the database backend need to be manually
|
/// Does the database backend need to be manually
|
||||||
|
@ -37,7 +41,7 @@ pub trait Env: Sized {
|
||||||
///
|
///
|
||||||
/// # Invariant
|
/// # Invariant
|
||||||
/// If this is `false`, that means this [`Env`]
|
/// If this is `false`, that means this [`Env`]
|
||||||
/// can _never_ return a [`RuntimeError::ResizeNeeded`].
|
/// must _never_ return a [`RuntimeError::ResizeNeeded`].
|
||||||
///
|
///
|
||||||
/// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`]
|
/// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`]
|
||||||
/// _must_ be re-implemented, as it just panics by default.
|
/// _must_ be re-implemented, as it just panics by default.
|
||||||
|
@ -55,10 +59,10 @@ pub trait Env: Sized {
|
||||||
/// This is used as the `self` in [`EnvInner`] functions, so whatever
|
/// This is used as the `self` in [`EnvInner`] functions, so whatever
|
||||||
/// this type is, is what will be accessible from those functions.
|
/// this type is, is what will be accessible from those functions.
|
||||||
///
|
///
|
||||||
/// # Explanation (not needed for practical use)
|
// # HACK
|
||||||
/// For `heed`, this is just `heed::Env`, for `redb` this is
|
// For `heed`, this is just `heed::Env`, for `redb` this is
|
||||||
/// `(redb::Database, redb::Durability)` as each transaction
|
// `(redb::Database, redb::Durability)` as each transaction
|
||||||
/// needs the sync mode set during creation.
|
// needs the sync mode set during creation.
|
||||||
type EnvInner<'env>: EnvInner<'env, Self::TxRo<'env>, Self::TxRw<'env>>
|
type EnvInner<'env>: EnvInner<'env, Self::TxRo<'env>, Self::TxRw<'env>>
|
||||||
where
|
where
|
||||||
Self: 'env;
|
Self: 'env;
|
||||||
|
@ -100,11 +104,11 @@ pub trait Env: Sized {
|
||||||
/// I.e., after this function returns, there must be no doubts
|
/// I.e., after this function returns, there must be no doubts
|
||||||
/// that the data isn't synced yet, it _must_ be synced.
|
/// that the data isn't synced yet, it _must_ be synced.
|
||||||
///
|
///
|
||||||
/// TODO: either this invariant or `sync()` itself will most
|
// FIXME: either this invariant or `sync()` itself will most
|
||||||
/// likely be removed/changed after `SyncMode` is finalized.
|
// likely be removed/changed after `SyncMode` is finalized.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
/// TODO
|
/// If there is a synchronization error, this should return an error.
|
||||||
fn sync(&self) -> Result<(), RuntimeError>;
|
fn sync(&self) -> Result<(), RuntimeError>;
|
||||||
|
|
||||||
/// Resize the database's memory map to a
|
/// Resize the database's memory map to a
|
||||||
|
@ -120,6 +124,7 @@ pub trait Env: Sized {
|
||||||
/// This function _must_ be re-implemented if [`Env::MANUAL_RESIZE`] is `true`.
|
/// This function _must_ be re-implemented if [`Env::MANUAL_RESIZE`] is `true`.
|
||||||
///
|
///
|
||||||
/// Otherwise, this function will panic with `unreachable!()`.
|
/// Otherwise, this function will panic with `unreachable!()`.
|
||||||
|
#[allow(unused_variables)]
|
||||||
fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) -> NonZeroUsize {
|
fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) -> NonZeroUsize {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
|
@ -171,7 +176,26 @@ pub trait Env: Sized {
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- DatabaseRo
|
//---------------------------------------------------------------------------------------------------- DatabaseRo
|
||||||
/// TODO
|
/// Document errors when opening tables in [`EnvInner`].
|
||||||
|
macro_rules! doc_table_error {
|
||||||
|
() => {
|
||||||
|
r"# Errors
|
||||||
|
This will only return [`RuntimeError::Io`] if it errors.
|
||||||
|
|
||||||
|
As all tables are created upon [`Env::open`],
|
||||||
|
this function will never error because a table doesn't exist."
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The inner [`Env`] type.
|
||||||
|
///
|
||||||
|
/// This type is created with [`Env::env_inner`] and represents
|
||||||
|
/// the type able to generate transactions and open tables.
|
||||||
|
///
|
||||||
|
/// # Locking behavior
|
||||||
|
/// As noted in `Env::env_inner`, this is a `RwLockReadGuard`
|
||||||
|
/// when using the `heed` backend, be aware of this and do
|
||||||
|
/// not hold onto an `EnvInner` for a long time.
|
||||||
pub trait EnvInner<'env, Ro, Rw>
|
pub trait EnvInner<'env, Ro, Rw>
|
||||||
where
|
where
|
||||||
Self: 'env,
|
Self: 'env,
|
||||||
|
@ -192,6 +216,9 @@ where
|
||||||
|
|
||||||
/// Open a database in read-only mode.
|
/// Open a database in read-only mode.
|
||||||
///
|
///
|
||||||
|
/// The returned value can have [`DatabaseRo`]
|
||||||
|
/// & [`DatabaseIter`] functions called on it.
|
||||||
|
///
|
||||||
/// This will open the database [`Table`]
|
/// This will open the database [`Table`]
|
||||||
/// passed as a generic to this function.
|
/// passed as a generic to this function.
|
||||||
///
|
///
|
||||||
|
@ -202,12 +229,7 @@ where
|
||||||
/// // (name, key/value type)
|
/// // (name, key/value type)
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_table_error!()]
|
||||||
/// This function errors upon internal database/IO errors.
|
|
||||||
///
|
|
||||||
/// As [`Table`] is `Sealed`, and all tables are created
|
|
||||||
/// upon [`Env::open`], this function will never error because
|
|
||||||
/// a table doesn't exist.
|
|
||||||
fn open_db_ro<T: Table>(
|
fn open_db_ro<T: Table>(
|
||||||
&self,
|
&self,
|
||||||
tx_ro: &Ro,
|
tx_ro: &Ro,
|
||||||
|
@ -218,31 +240,33 @@ where
|
||||||
/// All [`DatabaseRo`] functions are also callable
|
/// All [`DatabaseRo`] functions are also callable
|
||||||
/// with the returned [`DatabaseRw`] structure.
|
/// with the returned [`DatabaseRw`] structure.
|
||||||
///
|
///
|
||||||
|
/// Note that [`DatabaseIter`] functions are _not_
|
||||||
|
/// available to [`DatabaseRw`] structures.
|
||||||
|
///
|
||||||
/// This will open the database [`Table`]
|
/// This will open the database [`Table`]
|
||||||
/// passed as a generic to this function.
|
/// passed as a generic to this function.
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_table_error!()]
|
||||||
/// This function errors upon internal database/IO errors.
|
|
||||||
///
|
|
||||||
/// As [`Table`] is `Sealed`, and all tables are created
|
|
||||||
/// upon [`Env::open`], this function will never error because
|
|
||||||
/// a table doesn't exist.
|
|
||||||
fn open_db_rw<T: Table>(&self, tx_rw: &Rw) -> Result<impl DatabaseRw<T>, RuntimeError>;
|
fn open_db_rw<T: Table>(&self, tx_rw: &Rw) -> Result<impl DatabaseRw<T>, RuntimeError>;
|
||||||
|
|
||||||
/// TODO
|
/// Open all tables in read/iter mode.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// This calls [`EnvInner::open_db_ro`] on all database tables
|
||||||
/// TODO
|
/// and returns a structure that allows access to all tables.
|
||||||
|
///
|
||||||
|
#[doc = doc_table_error!()]
|
||||||
fn open_tables(&self, tx_ro: &Ro) -> Result<impl TablesIter, RuntimeError> {
|
fn open_tables(&self, tx_ro: &Ro) -> Result<impl TablesIter, RuntimeError> {
|
||||||
call_fn_on_all_tables_or_early_return! {
|
call_fn_on_all_tables_or_early_return! {
|
||||||
Self::open_db_ro(self, tx_ro)
|
Self::open_db_ro(self, tx_ro)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO
|
/// Open all tables in read-write mode.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// This calls [`EnvInner::open_db_rw`] on all database tables
|
||||||
/// TODO
|
/// and returns a structure that allows access to all tables.
|
||||||
|
///
|
||||||
|
#[doc = doc_table_error!()]
|
||||||
fn open_tables_mut(&self, tx_rw: &Rw) -> Result<impl TablesMut, RuntimeError> {
|
fn open_tables_mut(&self, tx_rw: &Rw) -> Result<impl TablesMut, RuntimeError> {
|
||||||
call_fn_on_all_tables_or_early_return! {
|
call_fn_on_all_tables_or_early_return! {
|
||||||
Self::open_db_rw(self, tx_rw)
|
Self::open_db_rw(self, tx_rw)
|
||||||
|
@ -257,11 +281,6 @@ where
|
||||||
/// Note that this operation is tied to `tx_rw`, as such this
|
/// Note that this operation is tied to `tx_rw`, as such this
|
||||||
/// function's effects can be aborted using [`TxRw::abort`].
|
/// function's effects can be aborted using [`TxRw::abort`].
|
||||||
///
|
///
|
||||||
/// # Errors
|
#[doc = doc_table_error!()]
|
||||||
/// This function errors upon internal database/IO errors.
|
|
||||||
///
|
|
||||||
/// As [`Table`] is `Sealed`, and all tables are created
|
|
||||||
/// upon [`Env::open`], this function will never error because
|
|
||||||
/// a table doesn't exist.
|
|
||||||
fn clear_db<T: Table>(&self, tx_rw: &mut Rw) -> Result<(), RuntimeError>;
|
fn clear_db<T: Table>(&self, tx_rw: &mut Rw) -> Result<(), RuntimeError>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//! Database error types.
|
//! Database error types.
|
||||||
//! TODO: `InitError/RuntimeError` are maybe bad names.
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
@ -42,8 +41,12 @@ pub enum InitError {
|
||||||
/// The database is currently in the process
|
/// The database is currently in the process
|
||||||
/// of shutting down and cannot respond.
|
/// of shutting down and cannot respond.
|
||||||
///
|
///
|
||||||
/// TODO: This might happen if we try to open
|
/// # Notes
|
||||||
/// while we are shutting down, `unreachable!()`?
|
/// This error can only occur with the `heed` backend when
|
||||||
|
/// the database environment is opened _right_ at the same time
|
||||||
|
/// another thread/process is closing it.
|
||||||
|
///
|
||||||
|
/// This will never occur with other backends.
|
||||||
#[error("database is shutting down")]
|
#[error("database is shutting down")]
|
||||||
ShuttingDown,
|
ShuttingDown,
|
||||||
|
|
||||||
|
|
|
@ -1,40 +1,22 @@
|
||||||
//! Database key abstraction; `trait Key`.
|
//! Database key abstraction; `trait Key`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{cmp::Ordering, fmt::Debug};
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
use bytemuck::Pod;
|
use crate::storable::Storable;
|
||||||
|
|
||||||
use crate::storable::{self, Storable};
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Table
|
//---------------------------------------------------------------------------------------------------- Table
|
||||||
/// Database [`Table`](crate::table::Table) key metadata.
|
/// Database [`Table`](crate::table::Table) key metadata.
|
||||||
///
|
///
|
||||||
/// Purely compile time information for database table keys, supporting duplicate keys.
|
/// Purely compile time information for database table keys.
|
||||||
|
//
|
||||||
|
// FIXME: this doesn't need to exist right now but
|
||||||
|
// may be used if we implement getting values using ranges.
|
||||||
|
// <https://github.com/Cuprate/cuprate/pull/117#discussion_r1589378104>
|
||||||
pub trait Key: Storable + Sized {
|
pub trait Key: Storable + Sized {
|
||||||
/// Does this [`Key`] require multiple keys to reach a value?
|
|
||||||
///
|
|
||||||
/// # Invariant
|
|
||||||
/// - If [`Key::DUPLICATE`] is `true`, [`Key::primary_secondary`] MUST be re-implemented.
|
|
||||||
/// - If [`Key::DUPLICATE`] is `true`, [`Key::new_with_max_secondary`] MUST be re-implemented.
|
|
||||||
const DUPLICATE: bool;
|
|
||||||
|
|
||||||
/// Does this [`Key`] have a custom comparison function?
|
|
||||||
///
|
|
||||||
/// # Invariant
|
|
||||||
/// If [`Key::CUSTOM_COMPARE`] is `true`, [`Key::compare`] MUST be re-implemented.
|
|
||||||
const CUSTOM_COMPARE: bool;
|
|
||||||
|
|
||||||
/// The primary key type.
|
/// The primary key type.
|
||||||
type Primary: Storable;
|
type Primary: Storable;
|
||||||
|
|
||||||
/// Acquire [`Self::Primary`] and the secondary key.
|
|
||||||
///
|
|
||||||
/// # TODO: doc test
|
|
||||||
fn primary_secondary(self) -> (Self::Primary, u64) {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compare 2 [`Key`]'s against each other.
|
/// Compare 2 [`Key`]'s against each other.
|
||||||
///
|
///
|
||||||
/// By default, this does a straight _byte_ comparison,
|
/// By default, this does a straight _byte_ comparison,
|
||||||
|
@ -55,67 +37,17 @@ pub trait Key: Storable + Sized {
|
||||||
/// std::cmp::Ordering::Greater,
|
/// std::cmp::Ordering::Greater,
|
||||||
/// );
|
/// );
|
||||||
/// ```
|
/// ```
|
||||||
|
#[inline]
|
||||||
fn compare(left: &[u8], right: &[u8]) -> Ordering {
|
fn compare(left: &[u8], right: &[u8]) -> Ordering {
|
||||||
left.cmp(right)
|
left.cmp(right)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new [`Key`] from the [`Key::Primary`] type,
|
|
||||||
/// with the secondary key type set to the maximum value.
|
|
||||||
///
|
|
||||||
/// # Invariant
|
|
||||||
/// Secondary key must be the max value of the type.
|
|
||||||
///
|
|
||||||
/// # TODO: doc test
|
|
||||||
fn new_with_max_secondary(primary: Self::Primary) -> Self {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Impl
|
//---------------------------------------------------------------------------------------------------- Impl
|
||||||
/// TODO: remove after we finalize tables.
|
impl<T> Key for T
|
||||||
///
|
where
|
||||||
/// Implement `Key` on most primitive types.
|
T: Storable + Sized,
|
||||||
///
|
{
|
||||||
/// - `Key::DUPLICATE` is always `false`.
|
|
||||||
/// - `Key::CUSTOM_COMPARE` is always `false`.
|
|
||||||
macro_rules! impl_key {
|
|
||||||
(
|
|
||||||
$(
|
|
||||||
$t:ident // Key type.
|
|
||||||
),* $(,)?
|
|
||||||
) => {
|
|
||||||
$(
|
|
||||||
impl Key for $t {
|
|
||||||
const DUPLICATE: bool = false;
|
|
||||||
const CUSTOM_COMPARE: bool = false;
|
|
||||||
|
|
||||||
type Primary = $t;
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
};
|
|
||||||
}
|
|
||||||
// Implement `Key` for primitives.
|
|
||||||
impl_key! {
|
|
||||||
u8,
|
|
||||||
u16,
|
|
||||||
u32,
|
|
||||||
u64,
|
|
||||||
i8,
|
|
||||||
i16,
|
|
||||||
i32,
|
|
||||||
i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Key + Pod, const N: usize> Key for [T; N] {
|
|
||||||
const DUPLICATE: bool = false;
|
|
||||||
const CUSTOM_COMPARE: bool = false;
|
|
||||||
type Primary = Self;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: temporary for now for `Key` bound, remove later.
|
|
||||||
impl Key for crate::types::PreRctOutputId {
|
|
||||||
const DUPLICATE: bool = false;
|
|
||||||
const CUSTOM_COMPARE: bool = false;
|
|
||||||
type Primary = Self;
|
type Primary = Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,8 +13,8 @@
|
||||||
//!
|
//!
|
||||||
//! Each layer builds on-top of the previous.
|
//! Each layer builds on-top of the previous.
|
||||||
//!
|
//!
|
||||||
//! As a user of `cuprate_database`, consider using the higher-level [`service`],
|
//! As a user of `cuprate_database`, consider using the higher-level [`service`] module,
|
||||||
//! or at the very least [`ops`] instead of interacting with the database traits directly.
|
//! or at the very least the [`ops`] module instead of interacting with the database traits directly.
|
||||||
//!
|
//!
|
||||||
//! With that said, many database traits and internals (like [`DatabaseRo::get`]) are exposed.
|
//! With that said, many database traits and internals (like [`DatabaseRo::get`]) are exposed.
|
||||||
//!
|
//!
|
||||||
|
@ -63,14 +63,10 @@
|
||||||
//! Note that `ConcreteEnv` itself is not a clonable type,
|
//! Note that `ConcreteEnv` itself is not a clonable type,
|
||||||
//! it should be wrapped in [`std::sync::Arc`].
|
//! it should be wrapped in [`std::sync::Arc`].
|
||||||
//!
|
//!
|
||||||
//! TODO: we could also expose `ConcreteDatabase` if we're
|
//! <!-- SOMEDAY: replace `ConcreteEnv` with `fn Env::open() -> impl Env`/
|
||||||
//! going to be storing any databases in structs, to lessen
|
|
||||||
//! the generic `<D: Database>` pain.
|
|
||||||
//!
|
|
||||||
//! TODO: we could replace `ConcreteEnv` with `fn Env::open() -> impl Env`/
|
|
||||||
//! and use `<E: Env>` everywhere it is stored instead. This would allow
|
//! and use `<E: Env>` everywhere it is stored instead. This would allow
|
||||||
//! generic-backed dynamic runtime selection of the database backend, i.e.
|
//! generic-backed dynamic runtime selection of the database backend, i.e.
|
||||||
//! the user can select which database backend they use.
|
//! the user can select which database backend they use. -->
|
||||||
//!
|
//!
|
||||||
//! # Feature flags
|
//! # Feature flags
|
||||||
//! The `service` module requires the `service` feature to be enabled.
|
//! The `service` module requires the `service` feature to be enabled.
|
||||||
|
@ -82,45 +78,66 @@
|
||||||
//!
|
//!
|
||||||
//! The default is `heed`.
|
//! The default is `heed`.
|
||||||
//!
|
//!
|
||||||
|
//! `tracing` is always enabled and cannot be disabled via feature-flag.
|
||||||
|
//! <!-- FIXME: tracing should be behind a feature flag -->
|
||||||
|
//!
|
||||||
//! # Invariants when not using `service`
|
//! # Invariants when not using `service`
|
||||||
//! `cuprate_database` can be used without the `service` feature enabled but
|
//! `cuprate_database` can be used without the `service` feature enabled but
|
||||||
//! there are some things that must be kept in mind when doing so:
|
//! there are some things that must be kept in mind when doing so.
|
||||||
//!
|
//!
|
||||||
//! TODO: make pretty. these will need to be updated
|
//! Failing to uphold these invariants may cause panics.
|
||||||
//! as things change and as more backends are added.
|
|
||||||
//!
|
//!
|
||||||
//! 1. Memory map resizing (must resize as needed)
|
//! 1. `LMDB` requires the user to resize the memory map resizing (see [`RuntimeError::ResizeNeeded`]
|
||||||
//! 1. Must not exceed `Config`'s maximum reader count
|
//! 1. `LMDB` has a maximum reader transaction count, currently it is set to `128`
|
||||||
//! 1. Avoid many nested transactions
|
//! 1. `LMDB` has [maximum key/value byte size](http://www.lmdb.tech/doc/group__internal.html#gac929399f5d93cef85f874b9e9b1d09e0) which must not be exceeded
|
||||||
//! 1. `heed::MdbError::BadValSize`
|
|
||||||
//! 1. `heed::Error::InvalidDatabaseTyping`
|
|
||||||
//! 1. `heed::Error::BadOpenOptions`
|
|
||||||
//! 1. Encoding/decoding into `[u8]`
|
|
||||||
//!
|
//!
|
||||||
//! # Example
|
//! # Examples
|
||||||
//! Simple usage of this crate.
|
//! The below is an example of using `cuprate_database`'s
|
||||||
|
//! lowest API, i.e. using the database directly.
|
||||||
|
//!
|
||||||
|
//! For examples of the higher-level APIs, see:
|
||||||
|
//! - [`ops`]
|
||||||
|
//! - [`service`]
|
||||||
//!
|
//!
|
||||||
//! ```rust
|
//! ```rust
|
||||||
//! use cuprate_database::{
|
//! use cuprate_database::{
|
||||||
//! config::Config,
|
|
||||||
//! ConcreteEnv,
|
//! ConcreteEnv,
|
||||||
//! Env, Key, TxRo, TxRw,
|
//! config::ConfigBuilder,
|
||||||
//! };
|
//! Env, EnvInner,
|
||||||
//! use cuprate_types::{
|
//! tables::{Tables, TablesMut},
|
||||||
//! service::{ReadRequest, WriteRequest, Response},
|
//! DatabaseRo, DatabaseRw, TxRo, TxRw,
|
||||||
//! };
|
//! };
|
||||||
//!
|
//!
|
||||||
|
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
//! // Create a configuration for the database environment.
|
//! // Create a configuration for the database environment.
|
||||||
//! let db_dir = tempfile::tempdir().unwrap();
|
//! let db_dir = tempfile::tempdir()?;
|
||||||
//! let config = Config::new(Some(db_dir.path().to_path_buf()));
|
//! let config = ConfigBuilder::new()
|
||||||
|
//! .db_directory(db_dir.path().to_path_buf())
|
||||||
|
//! .build();
|
||||||
//!
|
//!
|
||||||
//! // Initialize the database thread-pool.
|
//! // Initialize the database environment.
|
||||||
|
//! let env = ConcreteEnv::open(config)?;
|
||||||
//!
|
//!
|
||||||
//! // TODO:
|
//! // Open up a transaction + tables for writing.
|
||||||
//! // 1. let (read_handle, write_handle) = cuprate_database::service::init(config).unwrap();
|
//! let env_inner = env.env_inner();
|
||||||
//! // 2. Send write/read requests
|
//! let tx_rw = env_inner.tx_rw()?;
|
||||||
//! // 3. Use some other `Env` functions
|
//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
|
||||||
//! // 4. Shutdown
|
//!
|
||||||
|
//! // ⚠️ Write data to the tables directly.
|
||||||
|
//! // (not recommended, use `ops` or `service`).
|
||||||
|
//! const KEY_IMAGE: [u8; 32] = [88; 32];
|
||||||
|
//! tables.key_images_mut().put(&KEY_IMAGE, &())?;
|
||||||
|
//!
|
||||||
|
//! // Commit the data written.
|
||||||
|
//! drop(tables);
|
||||||
|
//! TxRw::commit(tx_rw)?;
|
||||||
|
//!
|
||||||
|
//! // Read the data, assert it is correct.
|
||||||
|
//! let tx_ro = env_inner.tx_ro()?;
|
||||||
|
//! let tables = env_inner.open_tables(&tx_ro)?;
|
||||||
|
//! let (key_image, _) = tables.key_images().first()?;
|
||||||
|
//! assert_eq!(key_image, KEY_IMAGE);
|
||||||
|
//! # Ok(()) }
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Lints
|
//---------------------------------------------------------------------------------------------------- Lints
|
||||||
|
@ -180,7 +197,6 @@
|
||||||
unused_comparisons,
|
unused_comparisons,
|
||||||
nonstandard_style
|
nonstandard_style
|
||||||
)]
|
)]
|
||||||
#![allow(unreachable_code, unused_variables, dead_code, unused_imports)] // TODO: remove
|
|
||||||
#![allow(
|
#![allow(
|
||||||
// FIXME: this lint affects crates outside of
|
// FIXME: this lint affects crates outside of
|
||||||
// `database/` for some reason, allow for now.
|
// `database/` for some reason, allow for now.
|
||||||
|
@ -195,8 +211,8 @@
|
||||||
// with our `Env` + `RwLock` setup.
|
// with our `Env` + `RwLock` setup.
|
||||||
clippy::significant_drop_tightening,
|
clippy::significant_drop_tightening,
|
||||||
|
|
||||||
// TODO: should be removed after all `todo!()`'s are gone.
|
// FIXME: good lint but is less clear in most cases.
|
||||||
clippy::diverging_sub_expression,
|
clippy::items_after_statements,
|
||||||
|
|
||||||
clippy::module_name_repetitions,
|
clippy::module_name_repetitions,
|
||||||
clippy::module_inception,
|
clippy::module_inception,
|
||||||
|
@ -205,7 +221,16 @@
|
||||||
)]
|
)]
|
||||||
// Allow some lints when running in debug mode.
|
// Allow some lints when running in debug mode.
|
||||||
#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))]
|
#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))]
|
||||||
|
// Allow some lints in tests.
|
||||||
|
#![cfg_attr(
|
||||||
|
test,
|
||||||
|
allow(
|
||||||
|
clippy::cognitive_complexity,
|
||||||
|
clippy::needless_pass_by_value,
|
||||||
|
clippy::cast_possible_truncation,
|
||||||
|
clippy::too_many_lines
|
||||||
|
)
|
||||||
|
)]
|
||||||
// Only allow building 64-bit targets.
|
// Only allow building 64-bit targets.
|
||||||
//
|
//
|
||||||
// This allows us to assume 64-bit
|
// This allows us to assume 64-bit
|
||||||
|
@ -249,8 +274,6 @@ pub mod resize;
|
||||||
mod key;
|
mod key;
|
||||||
pub use key::Key;
|
pub use key::Key;
|
||||||
|
|
||||||
mod macros;
|
|
||||||
|
|
||||||
mod storable;
|
mod storable;
|
||||||
pub use storable::{Storable, StorableBytes, StorableVec};
|
pub use storable::{Storable, StorableBytes, StorableVec};
|
||||||
|
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
//! General macros used throughout `cuprate-database`.
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Constants
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- TYPE
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- IMPL
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Trait Impl
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Tests
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
// use super::*;
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
//! Alternative blocks.
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Free Functions
|
|
||||||
/// TODO
|
|
||||||
pub fn add_alt_block() {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO
|
|
||||||
pub fn get_alt_block() {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO
|
|
||||||
pub fn remove_alt_block() {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO
|
|
||||||
pub fn get_alt_block_count() {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO
|
|
||||||
pub fn drop_alt_blocks() {
|
|
||||||
todo!()
|
|
||||||
}
|
|
|
@ -1,41 +1,23 @@
|
||||||
//! Blocks.
|
//! Blocks functions.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use bytemuck::TransparentWrapper;
|
use bytemuck::TransparentWrapper;
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar};
|
use monero_serai::block::Block;
|
||||||
use monero_serai::{
|
|
||||||
block::Block,
|
|
||||||
transaction::{Input, Timelock, Transaction},
|
|
||||||
};
|
|
||||||
|
|
||||||
use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits};
|
use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits};
|
||||||
use cuprate_types::{ExtendedBlockHeader, TransactionVerificationData, VerifiedBlockInformation};
|
use cuprate_types::{ExtendedBlockHeader, VerifiedBlockInformation};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
database::{DatabaseRo, DatabaseRw},
|
database::{DatabaseRo, DatabaseRw},
|
||||||
env::EnvInner,
|
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
ops::{
|
ops::{
|
||||||
blockchain::{chain_height, cumulative_generated_coins},
|
blockchain::{chain_height, cumulative_generated_coins},
|
||||||
key_image::{add_key_image, remove_key_image},
|
|
||||||
macros::doc_error,
|
macros::doc_error,
|
||||||
output::{
|
output::get_rct_num_outputs,
|
||||||
add_output, add_rct_output, get_rct_num_outputs, remove_output, remove_rct_output,
|
tx::{add_tx, remove_tx},
|
||||||
},
|
|
||||||
tx::{add_tx, get_num_tx, remove_tx},
|
|
||||||
},
|
|
||||||
tables::{
|
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, Tables, TablesMut, TxHeights, TxIds,
|
|
||||||
TxUnlockTime,
|
|
||||||
},
|
|
||||||
transaction::{TxRo, TxRw},
|
|
||||||
types::{
|
|
||||||
AmountIndex, BlockHash, BlockHeight, BlockInfo, KeyImage, Output, OutputFlags,
|
|
||||||
PreRctOutputId, RctOutput, TxHash,
|
|
||||||
},
|
},
|
||||||
|
tables::{BlockHeights, BlockInfos, Tables, TablesMut},
|
||||||
|
types::{BlockHash, BlockHeight, BlockInfo},
|
||||||
StorableVec,
|
StorableVec,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -66,9 +48,11 @@ pub fn add_block(
|
||||||
// Cast height to `u32` for storage (handled at top of function).
|
// Cast height to `u32` for storage (handled at top of function).
|
||||||
// Panic (should never happen) instead of allowing DB corruption.
|
// Panic (should never happen) instead of allowing DB corruption.
|
||||||
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1560020991>
|
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1560020991>
|
||||||
let Ok(height) = u32::try_from(block.height) else {
|
assert!(
|
||||||
panic!("block.height ({}) > u32::MAX", block.height);
|
u32::try_from(block.height).is_ok(),
|
||||||
};
|
"block.height ({}) > u32::MAX",
|
||||||
|
block.height,
|
||||||
|
);
|
||||||
|
|
||||||
let chain_height = chain_height(tables.block_heights())?;
|
let chain_height = chain_height(tables.block_heights())?;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
@ -144,8 +128,11 @@ pub fn add_block(
|
||||||
//---------------------------------------------------------------------------------------------------- `pop_block`
|
//---------------------------------------------------------------------------------------------------- `pop_block`
|
||||||
/// Remove the top/latest block from the database.
|
/// Remove the top/latest block from the database.
|
||||||
///
|
///
|
||||||
/// The removed block's height and hash are returned.
|
/// The removed block's data is returned.
|
||||||
#[doc = doc_error!()]
|
#[doc = doc_error!()]
|
||||||
|
///
|
||||||
|
/// In `pop_block()`'s case, [`RuntimeError::KeyNotFound`]
|
||||||
|
/// will be returned if there are no blocks left.
|
||||||
// no inline, too big
|
// no inline, too big
|
||||||
pub fn pop_block(
|
pub fn pop_block(
|
||||||
tables: &mut impl TablesMut,
|
tables: &mut impl TablesMut,
|
||||||
|
@ -254,7 +241,12 @@ pub fn get_block_height(
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if a block exists in the database.
|
/// Check if a block exists in the database.
|
||||||
#[doc = doc_error!()]
|
///
|
||||||
|
/// # Errors
|
||||||
|
/// Note that this will never return `Err(RuntimeError::KeyNotFound)`,
|
||||||
|
/// as in that case, `Ok(false)` will be returned.
|
||||||
|
///
|
||||||
|
/// Other errors may still occur.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn block_exists(
|
pub fn block_exists(
|
||||||
block_hash: &BlockHash,
|
block_hash: &BlockHash,
|
||||||
|
@ -271,16 +263,16 @@ pub fn block_exists(
|
||||||
clippy::too_many_lines
|
clippy::too_many_lines
|
||||||
)]
|
)]
|
||||||
mod test {
|
mod test {
|
||||||
use hex_literal::hex;
|
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_v2_rct3};
|
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
ops::tx::{get_tx, tx_exists},
|
ops::tx::{get_tx, tx_exists},
|
||||||
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
||||||
Env,
|
transaction::TxRw,
|
||||||
|
Env, EnvInner,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Tests all above block functions.
|
/// Tests all above block functions.
|
||||||
|
@ -292,7 +284,7 @@ mod test {
|
||||||
/// stored and retrieved is the same.
|
/// stored and retrieved is the same.
|
||||||
#[test]
|
#[test]
|
||||||
fn all_block_functions() {
|
fn all_block_functions() {
|
||||||
let (env, tmp) = tmp_concrete_env();
|
let (env, _tmp) = tmp_concrete_env();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
assert_all_tables_are_empty(&env);
|
assert_all_tables_are_empty(&env);
|
||||||
|
|
||||||
|
@ -417,7 +409,7 @@ mod test {
|
||||||
for block_hash in block_hashes.into_iter().rev() {
|
for block_hash in block_hashes.into_iter().rev() {
|
||||||
println!("pop_block(): block_hash: {}", hex::encode(block_hash));
|
println!("pop_block(): block_hash: {}", hex::encode(block_hash));
|
||||||
|
|
||||||
let (popped_height, popped_hash, popped_block) = pop_block(&mut tables).unwrap();
|
let (_popped_height, popped_hash, _popped_block) = pop_block(&mut tables).unwrap();
|
||||||
|
|
||||||
assert_eq!(block_hash, popped_hash);
|
assert_eq!(block_hash, popped_hash);
|
||||||
|
|
||||||
|
@ -438,7 +430,7 @@ mod test {
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "block.height (4294967296) > u32::MAX")]
|
#[should_panic(expected = "block.height (4294967296) > u32::MAX")]
|
||||||
fn block_height_gt_u32_max() {
|
fn block_height_gt_u32_max() {
|
||||||
let (env, tmp) = tmp_concrete_env();
|
let (env, _tmp) = tmp_concrete_env();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
assert_all_tables_are_empty(&env);
|
assert_all_tables_are_empty(&env);
|
||||||
|
|
||||||
|
@ -457,7 +449,7 @@ mod test {
|
||||||
expected = "assertion `left == right` failed: block.height (123) != chain_height (1)\n left: 123\n right: 1"
|
expected = "assertion `left == right` failed: block.height (123) != chain_height (1)\n left: 123\n right: 1"
|
||||||
)]
|
)]
|
||||||
fn block_height_not_chain_height() {
|
fn block_height_not_chain_height() {
|
||||||
let (env, tmp) = tmp_concrete_env();
|
let (env, _tmp) = tmp_concrete_env();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
assert_all_tables_are_empty(&env);
|
assert_all_tables_are_empty(&env);
|
||||||
|
|
||||||
|
|
|
@ -1,22 +1,12 @@
|
||||||
//! Blockchain.
|
//! Blockchain functions - chain height, generated coins, etc.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use monero_serai::transaction::Timelock;
|
|
||||||
|
|
||||||
use cuprate_types::VerifiedBlockInformation;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
database::{DatabaseRo, DatabaseRw},
|
database::DatabaseRo,
|
||||||
env::EnvInner,
|
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
ops::macros::doc_error,
|
ops::macros::doc_error,
|
||||||
tables::{
|
tables::{BlockHeights, BlockInfos},
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
types::BlockHeight,
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, Tables, TablesMut, TxHeights, TxIds,
|
|
||||||
TxUnlockTime,
|
|
||||||
},
|
|
||||||
transaction::{TxRo, TxRw},
|
|
||||||
types::{BlockHash, BlockHeight, BlockInfo, KeyImage, Output, PreRctOutputId, RctOutput},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Free Functions
|
//---------------------------------------------------------------------------------------------------- Free Functions
|
||||||
|
@ -88,21 +78,18 @@ pub fn cumulative_generated_coins(
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Tests
|
//---------------------------------------------------------------------------------------------------- Tests
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[allow(clippy::significant_drop_tightening)]
|
|
||||||
mod test {
|
mod test {
|
||||||
use hex_literal::hex;
|
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_v2_rct3};
|
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
ops::{
|
ops::block::add_block,
|
||||||
block::add_block,
|
tables::Tables,
|
||||||
tx::{get_tx, tx_exists},
|
|
||||||
},
|
|
||||||
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
||||||
Env,
|
transaction::TxRw,
|
||||||
|
Env, EnvInner,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Tests all above functions.
|
/// Tests all above functions.
|
||||||
|
@ -113,9 +100,8 @@ mod test {
|
||||||
/// It simply tests if the proper tables are mutated, and if the data
|
/// It simply tests if the proper tables are mutated, and if the data
|
||||||
/// stored and retrieved is the same.
|
/// stored and retrieved is the same.
|
||||||
#[test]
|
#[test]
|
||||||
#[allow(clippy::cognitive_complexity, clippy::cast_possible_truncation)]
|
|
||||||
fn all_blockchain_functions() {
|
fn all_blockchain_functions() {
|
||||||
let (env, tmp) = tmp_concrete_env();
|
let (env, _tmp) = tmp_concrete_env();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
assert_all_tables_are_empty(&env);
|
assert_all_tables_are_empty(&env);
|
||||||
|
|
||||||
|
|
|
@ -1,24 +1,12 @@
|
||||||
//! Spent keys.
|
//! Key image functions.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use monero_serai::transaction::{Timelock, Transaction};
|
|
||||||
|
|
||||||
use cuprate_types::{OutputOnChain, VerifiedBlockInformation};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
database::{DatabaseRo, DatabaseRw},
|
||||||
env::EnvInner,
|
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
ops::macros::{doc_add_block_inner_invariant, doc_error},
|
ops::macros::{doc_add_block_inner_invariant, doc_error},
|
||||||
tables::{
|
tables::KeyImages,
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
types::KeyImage,
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, Tables, TablesMut, TxHeights, TxIds,
|
|
||||||
TxUnlockTime,
|
|
||||||
},
|
|
||||||
transaction::{TxRo, TxRw},
|
|
||||||
types::{
|
|
||||||
BlockHash, BlockHeight, BlockInfo, KeyImage, Output, PreRctOutputId, RctOutput, TxHash,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Key image functions
|
//---------------------------------------------------------------------------------------------------- Key image functions
|
||||||
|
@ -56,16 +44,15 @@ pub fn key_image_exists(
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Tests
|
//---------------------------------------------------------------------------------------------------- Tests
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[allow(clippy::significant_drop_tightening, clippy::cognitive_complexity)]
|
|
||||||
mod test {
|
mod test {
|
||||||
use hex_literal::hex;
|
use hex_literal::hex;
|
||||||
use pretty_assertions::assert_eq;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
ops::tx::{get_tx, tx_exists},
|
tables::{Tables, TablesMut},
|
||||||
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
||||||
Env,
|
transaction::TxRw,
|
||||||
|
Env, EnvInner,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Tests all above key-image functions.
|
/// Tests all above key-image functions.
|
||||||
|
@ -77,7 +64,7 @@ mod test {
|
||||||
/// stored and retrieved is the same.
|
/// stored and retrieved is the same.
|
||||||
#[test]
|
#[test]
|
||||||
fn all_key_image_functions() {
|
fn all_key_image_functions() {
|
||||||
let (env, tmp) = tmp_concrete_env();
|
let (env, _tmp) = tmp_concrete_env();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
assert_all_tables_are_empty(&env);
|
assert_all_tables_are_empty(&env);
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
macro_rules! doc_error {
|
macro_rules! doc_error {
|
||||||
() => {
|
() => {
|
||||||
r#"# Errors
|
r#"# Errors
|
||||||
This function returns [`RuntimeError::KeyNotFound`] if the input doesn't exist or other `RuntimeError`'s on database errors."#
|
This function returns [`RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."#
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
pub(super) use doc_error;
|
pub(super) use doc_error;
|
||||||
|
|
|
@ -31,16 +31,75 @@
|
||||||
//! # Sub-functions
|
//! # Sub-functions
|
||||||
//! The main functions within this module are mostly within the [`block`] module.
|
//! The main functions within this module are mostly within the [`block`] module.
|
||||||
//!
|
//!
|
||||||
//! Practically speaking, you should only be using 2 functions:
|
//! Practically speaking, you should only be using 2 functions for mutation:
|
||||||
//! - [`add_block`](block::add_block)
|
//! - [`add_block`](block::add_block)
|
||||||
//! - [`pop_block`](block::pop_block)
|
//! - [`pop_block`](block::pop_block)
|
||||||
//!
|
//!
|
||||||
//! The `block` functions are "parent" functions, calling other
|
//! The `block` functions are "parent" functions, calling other
|
||||||
//! sub-functions such as [`add_output()`](output::add_output). `add_output()`
|
//! sub-functions such as [`add_output()`](output::add_output).
|
||||||
//! itself only modifies output-related tables, while the `block` "parent" functions
|
//!
|
||||||
//! (like `add_block` and `pop_block`) modify _everything_ that is required.
|
//! `add_output()` itself only modifies output-related tables, while the `block` "parent"
|
||||||
|
//! functions (like `add_block` and `pop_block`) modify all tables required.
|
||||||
|
//!
|
||||||
|
//! `add_block()` makes sure all data related to the input is mutated, while
|
||||||
|
//! this sub-function _do not_, it specifically mutates _particular_ tables.
|
||||||
|
//!
|
||||||
|
//! When calling this sub-functions, ensure that either:
|
||||||
|
//! 1. This effect (incomplete database mutation) is what is desired, or that...
|
||||||
|
//! 2. ...the other tables will also be mutated to a correct state
|
||||||
|
//!
|
||||||
|
//! # Example
|
||||||
|
//! Simple usage of `ops`.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! use hex_literal::hex;
|
||||||
|
//!
|
||||||
|
//! use cuprate_test_utils::data::block_v16_tx0;
|
||||||
|
//!
|
||||||
|
//! use cuprate_database::{
|
||||||
|
//! ConcreteEnv,
|
||||||
|
//! config::ConfigBuilder,
|
||||||
|
//! Env, EnvInner,
|
||||||
|
//! tables::{Tables, TablesMut},
|
||||||
|
//! DatabaseRo, DatabaseRw, TxRo, TxRw,
|
||||||
|
//! ops::block::{add_block, pop_block},
|
||||||
|
//! };
|
||||||
|
//!
|
||||||
|
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
//! // Create a configuration for the database environment.
|
||||||
|
//! let db_dir = tempfile::tempdir()?;
|
||||||
|
//! let config = ConfigBuilder::new()
|
||||||
|
//! .db_directory(db_dir.path().to_path_buf())
|
||||||
|
//! .build();
|
||||||
|
//!
|
||||||
|
//! // Initialize the database environment.
|
||||||
|
//! let env = ConcreteEnv::open(config)?;
|
||||||
|
//!
|
||||||
|
//! // Open up a transaction + tables for writing.
|
||||||
|
//! let env_inner = env.env_inner();
|
||||||
|
//! let tx_rw = env_inner.tx_rw()?;
|
||||||
|
//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
|
||||||
|
//!
|
||||||
|
//! // Write a block to the database.
|
||||||
|
//! let mut block = block_v16_tx0().clone();
|
||||||
|
//! # block.height = 0;
|
||||||
|
//! add_block(&block, &mut tables)?;
|
||||||
|
//!
|
||||||
|
//! // Commit the data written.
|
||||||
|
//! drop(tables);
|
||||||
|
//! TxRw::commit(tx_rw)?;
|
||||||
|
//!
|
||||||
|
//! // Read the data, assert it is correct.
|
||||||
|
//! let tx_rw = env_inner.tx_rw()?;
|
||||||
|
//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
|
||||||
|
//! let (height, hash, serai_block) = pop_block(&mut tables)?;
|
||||||
|
//!
|
||||||
|
//! assert_eq!(height, 0);
|
||||||
|
//! assert_eq!(serai_block, block.block);
|
||||||
|
//! assert_eq!(hash, hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428"));
|
||||||
|
//! # Ok(()) }
|
||||||
|
//! ```
|
||||||
|
|
||||||
// pub mod alt_block; // TODO: is this needed?
|
|
||||||
pub mod block;
|
pub mod block;
|
||||||
pub mod blockchain;
|
pub mod blockchain;
|
||||||
pub mod key_image;
|
pub mod key_image;
|
||||||
|
|
|
@ -1,30 +1,18 @@
|
||||||
//! Outputs.
|
//! Output functions.
|
||||||
|
|
||||||
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
|
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
|
||||||
|
use monero_serai::{transaction::Timelock, H};
|
||||||
|
|
||||||
use cuprate_helper::map::u64_to_timelock;
|
use cuprate_helper::map::u64_to_timelock;
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
|
use cuprate_types::OutputOnChain;
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
|
||||||
use monero_serai::{
|
|
||||||
transaction::{Timelock, Transaction},
|
|
||||||
H,
|
|
||||||
};
|
|
||||||
|
|
||||||
use cuprate_types::{OutputOnChain, VerifiedBlockInformation};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
database::{DatabaseRo, DatabaseRw},
|
||||||
env::EnvInner,
|
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
ops::macros::{doc_add_block_inner_invariant, doc_error},
|
ops::macros::{doc_add_block_inner_invariant, doc_error},
|
||||||
tables::{
|
tables::{Outputs, RctOutputs, Tables, TablesMut, TxUnlockTime},
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
types::{Amount, AmountIndex, Output, OutputFlags, PreRctOutputId, RctOutput},
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, Tables, TablesMut, TxHeights, TxIds,
|
|
||||||
TxUnlockTime,
|
|
||||||
},
|
|
||||||
transaction::{TxRo, TxRw},
|
|
||||||
types::{
|
|
||||||
Amount, AmountIndex, BlockHash, BlockHeight, BlockInfo, KeyImage, Output, OutputFlags,
|
|
||||||
PreRctOutputId, RctOutput, TxHash,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Pre-RCT Outputs
|
//---------------------------------------------------------------------------------------------------- Pre-RCT Outputs
|
||||||
|
@ -257,15 +245,15 @@ pub fn id_to_output_on_chain(
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Tests
|
//---------------------------------------------------------------------------------------------------- Tests
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[allow(clippy::significant_drop_tightening, clippy::cognitive_complexity)]
|
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
|
tables::{Tables, TablesMut},
|
||||||
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
||||||
types::OutputFlags,
|
types::OutputFlags,
|
||||||
Env,
|
Env, EnvInner,
|
||||||
};
|
};
|
||||||
use cuprate_test_utils::data::{tx_v1_sig2, tx_v2_rct3};
|
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
/// Dummy `Output`.
|
/// Dummy `Output`.
|
||||||
|
@ -297,7 +285,7 @@ mod test {
|
||||||
/// stored and retrieved is the same.
|
/// stored and retrieved is the same.
|
||||||
#[test]
|
#[test]
|
||||||
fn all_output_functions() {
|
fn all_output_functions() {
|
||||||
let (env, tmp) = tmp_concrete_env();
|
let (env, _tmp) = tmp_concrete_env();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
assert_all_tables_are_empty(&env);
|
assert_all_tables_are_empty(&env);
|
||||||
|
|
||||||
|
|
|
@ -1,57 +1,39 @@
|
||||||
//! Properties.
|
//! Database properties functions - version, pruning, etc.
|
||||||
|
//!
|
||||||
|
//! SOMEDAY: the database `properties` table is not yet implemented.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use monero_pruning::PruningSeed;
|
use monero_pruning::PruningSeed;
|
||||||
use monero_serai::transaction::{Timelock, Transaction};
|
|
||||||
|
|
||||||
use cuprate_types::{OutputOnChain, VerifiedBlockInformation};
|
use crate::{error::RuntimeError, ops::macros::doc_error};
|
||||||
|
|
||||||
use crate::{
|
|
||||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
|
||||||
env::EnvInner,
|
|
||||||
error::RuntimeError,
|
|
||||||
ops::macros::{doc_add_block_inner_invariant, doc_error},
|
|
||||||
tables::{
|
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, Tables, TablesMut, TxHeights, TxIds,
|
|
||||||
TxUnlockTime,
|
|
||||||
},
|
|
||||||
transaction::{TxRo, TxRw},
|
|
||||||
types::{
|
|
||||||
BlockHash, BlockHeight, BlockInfo, KeyImage, Output, PreRctOutputId, RctOutput, TxHash,
|
|
||||||
TxId,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
//---------------------------------------------------------------------------------------------------- Free Functions
|
//---------------------------------------------------------------------------------------------------- Free Functions
|
||||||
/// TODO
|
/// SOMEDAY
|
||||||
///
|
///
|
||||||
#[doc = doc_add_block_inner_invariant!()]
|
|
||||||
#[doc = doc_error!()]
|
#[doc = doc_error!()]
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use cuprate_database::{*, tables::*, ops::block::*, ops::tx::*};
|
/// # use cuprate_database::{*, tables::*, ops::block::*, ops::tx::*};
|
||||||
/// // TODO
|
/// // SOMEDAY
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError> {
|
pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError> {
|
||||||
// TODO: impl pruning.
|
// SOMEDAY: impl pruning.
|
||||||
// We need a DB properties table.
|
// We need a DB properties table.
|
||||||
Ok(PruningSeed::NotPruned)
|
Ok(PruningSeed::NotPruned)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO
|
/// SOMEDAY
|
||||||
///
|
///
|
||||||
#[doc = doc_add_block_inner_invariant!()]
|
|
||||||
#[doc = doc_error!()]
|
#[doc = doc_error!()]
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use cuprate_database::{*, tables::*, ops::block::*, ops::tx::*};
|
/// # use cuprate_database::{*, tables::*, ops::block::*, ops::tx::*};
|
||||||
/// // TODO
|
/// // SOMEDAY
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub const fn db_version() -> Result<u64, RuntimeError> {
|
pub const fn db_version() -> Result<u64, RuntimeError> {
|
||||||
// TODO: We need a DB properties table.
|
// SOMEDAY: We need a DB properties table.
|
||||||
Ok(crate::constants::DATABASE_VERSION)
|
Ok(crate::constants::DATABASE_VERSION)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,40 +1,25 @@
|
||||||
//! Transactions.
|
//! Transaction functions.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use bytemuck::TransparentWrapper;
|
use bytemuck::TransparentWrapper;
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar};
|
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar};
|
||||||
use monero_serai::transaction::{Input, Timelock, Transaction};
|
use monero_serai::transaction::{Input, Timelock, Transaction};
|
||||||
|
|
||||||
use cuprate_types::{OutputOnChain, TransactionVerificationData, VerifiedBlockInformation};
|
|
||||||
use monero_pruning::PruningSeed;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
database::{DatabaseRo, DatabaseRw},
|
||||||
env::EnvInner,
|
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
ops::{
|
ops::{
|
||||||
blockchain::chain_height,
|
key_image::{add_key_image, remove_key_image},
|
||||||
macros::{doc_add_block_inner_invariant, doc_error},
|
macros::{doc_add_block_inner_invariant, doc_error},
|
||||||
property::get_blockchain_pruning_seed,
|
output::{
|
||||||
},
|
add_output, add_rct_output, get_rct_num_outputs, remove_output, remove_rct_output,
|
||||||
tables::{
|
},
|
||||||
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
|
|
||||||
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, Tables, TablesMut, TxBlobs, TxHeights, TxIds,
|
|
||||||
TxUnlockTime,
|
|
||||||
},
|
|
||||||
transaction::{TxRo, TxRw},
|
|
||||||
types::{
|
|
||||||
AmountIndices, BlockHash, BlockHeight, BlockInfo, KeyImage, Output, OutputFlags,
|
|
||||||
PreRctOutputId, RctOutput, TxBlob, TxHash, TxId,
|
|
||||||
},
|
},
|
||||||
|
tables::{TablesMut, TxBlobs, TxIds},
|
||||||
|
types::{BlockHeight, Output, OutputFlags, PreRctOutputId, RctOutput, TxHash, TxId},
|
||||||
StorableVec,
|
StorableVec,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
|
||||||
key_image::{add_key_image, remove_key_image},
|
|
||||||
output::{add_output, add_rct_output, get_rct_num_outputs, remove_output, remove_rct_output},
|
|
||||||
};
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Private
|
//---------------------------------------------------------------------------------------------------- Private
|
||||||
/// Add a [`Transaction`] (and related data) to the database.
|
/// Add a [`Transaction`] (and related data) to the database.
|
||||||
///
|
///
|
||||||
|
@ -196,7 +181,7 @@ pub fn add_tx(
|
||||||
|
|
||||||
/// Remove a transaction from the database with its [`TxHash`].
|
/// Remove a transaction from the database with its [`TxHash`].
|
||||||
///
|
///
|
||||||
/// This returns the [`TxId`] and [`TxBlob`] of the removed transaction.
|
/// This returns the [`TxId`] and [`TxBlob`](crate::types::TxBlob) of the removed transaction.
|
||||||
///
|
///
|
||||||
#[doc = doc_add_block_inner_invariant!()]
|
#[doc = doc_add_block_inner_invariant!()]
|
||||||
///
|
///
|
||||||
|
@ -256,7 +241,7 @@ pub fn remove_tx(
|
||||||
|
|
||||||
//------------------------------------------------------ Outputs
|
//------------------------------------------------------ Outputs
|
||||||
// Remove each output in the transaction.
|
// Remove each output in the transaction.
|
||||||
for (i, output) in tx.prefix.outputs.iter().enumerate() {
|
for output in &tx.prefix.outputs {
|
||||||
// Outputs with clear amounts.
|
// Outputs with clear amounts.
|
||||||
if let Some(amount) = output.amount {
|
if let Some(amount) = output.amount {
|
||||||
// RingCT miner outputs.
|
// RingCT miner outputs.
|
||||||
|
@ -338,12 +323,13 @@ pub fn tx_exists(
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Tests
|
//---------------------------------------------------------------------------------------------------- Tests
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[allow(clippy::significant_drop_tightening)]
|
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
|
tables::Tables,
|
||||||
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
|
||||||
Env,
|
transaction::TxRw,
|
||||||
|
Env, EnvInner,
|
||||||
};
|
};
|
||||||
use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3};
|
use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3};
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
@ -351,7 +337,7 @@ mod test {
|
||||||
/// Tests all above tx functions when only inputting `Transaction` data (no Block).
|
/// Tests all above tx functions when only inputting `Transaction` data (no Block).
|
||||||
#[test]
|
#[test]
|
||||||
fn all_tx_functions() {
|
fn all_tx_functions() {
|
||||||
let (env, tmp) = tmp_concrete_env();
|
let (env, _tmp) = tmp_concrete_env();
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
assert_all_tables_are_empty(&env);
|
assert_all_tables_are_empty(&env);
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//! Database memory map resizing algorithms.
|
//! Database memory map resizing algorithms.
|
||||||
//!
|
//!
|
||||||
//! This modules contains [`ResizeAlgorithm`] which determines how the
|
//! This modules contains [`ResizeAlgorithm`] which determines how the
|
||||||
//! [`ConcreteEnv`](crate::ConcreteEnv) resizes it's memory map when needing more space.
|
//! [`ConcreteEnv`](crate::ConcreteEnv) resizes its memory map when needing more space.
|
||||||
//! This value is in [`Config`](crate::config::Config) and can be selected at runtime.
|
//! This value is in [`Config`](crate::config::Config) and can be selected at runtime.
|
||||||
//!
|
//!
|
||||||
//! Although, it is only used by `ConcreteEnv` if [`Env::MANUAL_RESIZE`](crate::env::Env::MANUAL_RESIZE) is `true`.
|
//! Although, it is only used by `ConcreteEnv` if [`Env::MANUAL_RESIZE`](crate::env::Env::MANUAL_RESIZE) is `true`.
|
||||||
|
@ -27,12 +27,12 @@ use std::{num::NonZeroUsize, sync::OnceLock};
|
||||||
/// The function/algorithm used by the
|
/// The function/algorithm used by the
|
||||||
/// database when resizing the memory map.
|
/// database when resizing the memory map.
|
||||||
///
|
///
|
||||||
/// # TODO
|
// # SOMEDAY
|
||||||
/// We could test around with different algorithms.
|
// We could test around with different algorithms.
|
||||||
/// Calling `heed::Env::resize` is surprisingly fast,
|
// Calling `heed::Env::resize` is surprisingly fast,
|
||||||
/// around `0.0000082s` on my machine. We could probably
|
// around `0.0000082s` on my machine. We could probably
|
||||||
/// get away with smaller and more frequent resizes.
|
// get away with smaller and more frequent resizes.
|
||||||
/// **With the caveat being we are taking a `WriteGuard` to a `RwLock`.**
|
// **With the caveat being we are taking a `WriteGuard` to a `RwLock`.**
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
|
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub enum ResizeAlgorithm {
|
pub enum ResizeAlgorithm {
|
||||||
|
|
|
@ -6,7 +6,7 @@ use std::sync::Arc;
|
||||||
use crate::{
|
use crate::{
|
||||||
config::Config,
|
config::Config,
|
||||||
error::InitError,
|
error::InitError,
|
||||||
service::{write::DatabaseWriter, DatabaseReadHandle, DatabaseWriteHandle},
|
service::{DatabaseReadHandle, DatabaseWriteHandle},
|
||||||
ConcreteEnv, Env,
|
ConcreteEnv, Env,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -20,21 +20,11 @@ use crate::{
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
/// This will forward the error if [`Env::open`] failed.
|
/// This will forward the error if [`Env::open`] failed.
|
||||||
//
|
|
||||||
// INVARIANT:
|
|
||||||
// `cuprate_database` depends on the fact that this is the only
|
|
||||||
// function that hands out the handles. After that, they can be
|
|
||||||
// cloned, however they must eventually be dropped and shouldn't
|
|
||||||
// be leaked.
|
|
||||||
//
|
|
||||||
// As the reader thread-pool and writer thread both rely on the
|
|
||||||
// disconnection (drop) of these channels for shutdown behavior,
|
|
||||||
// leaking these handles could cause data to not get flushed to disk.
|
|
||||||
pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle), InitError> {
|
pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle), InitError> {
|
||||||
let reader_threads = config.reader_threads;
|
let reader_threads = config.reader_threads;
|
||||||
|
|
||||||
// Initialize the database itself.
|
// Initialize the database itself.
|
||||||
let db: Arc<ConcreteEnv> = Arc::new(ConcreteEnv::open(config)?);
|
let db = Arc::new(ConcreteEnv::open(config)?);
|
||||||
|
|
||||||
// Spawn the Reader thread pool and Writer.
|
// Spawn the Reader thread pool and Writer.
|
||||||
let readers = DatabaseReadHandle::init(&db, reader_threads);
|
let readers = DatabaseReadHandle::init(&db, reader_threads);
|
||||||
|
|
|
@ -50,13 +50,69 @@
|
||||||
//! This channel can be `.await`ed upon to (eventually) receive
|
//! This channel can be `.await`ed upon to (eventually) receive
|
||||||
//! the corresponding `Response` to your `Request`.
|
//! the corresponding `Response` to your `Request`.
|
||||||
//!
|
//!
|
||||||
//!
|
|
||||||
//!
|
|
||||||
//! [req_r]: cuprate_types::service::ReadRequest
|
//! [req_r]: cuprate_types::service::ReadRequest
|
||||||
//!
|
//!
|
||||||
//! [req_w]: cuprate_types::service::WriteRequest
|
//! [req_w]: cuprate_types::service::WriteRequest
|
||||||
//!
|
//!
|
||||||
//! [resp]: cuprate_types::service::Response
|
//! [resp]: cuprate_types::service::Response
|
||||||
|
//!
|
||||||
|
//! # Example
|
||||||
|
//! Simple usage of `service`.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! use hex_literal::hex;
|
||||||
|
//! use tower::{Service, ServiceExt};
|
||||||
|
//!
|
||||||
|
//! use cuprate_types::service::{ReadRequest, WriteRequest, Response};
|
||||||
|
//! use cuprate_test_utils::data::block_v16_tx0;
|
||||||
|
//!
|
||||||
|
//! use cuprate_database::{ConcreteEnv, config::ConfigBuilder, Env};
|
||||||
|
//!
|
||||||
|
//! # #[tokio::main]
|
||||||
|
//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
//! // Create a configuration for the database environment.
|
||||||
|
//! let db_dir = tempfile::tempdir()?;
|
||||||
|
//! let config = ConfigBuilder::new()
|
||||||
|
//! .db_directory(db_dir.path().to_path_buf())
|
||||||
|
//! .build();
|
||||||
|
//!
|
||||||
|
//! // Initialize the database thread-pool.
|
||||||
|
//! let (mut read_handle, mut write_handle) = cuprate_database::service::init(config)?;
|
||||||
|
//!
|
||||||
|
//! // Prepare a request to write block.
|
||||||
|
//! let mut block = block_v16_tx0().clone();
|
||||||
|
//! # block.height = 0 as u64; // must be 0th height or panic in `add_block()`
|
||||||
|
//! let request = WriteRequest::WriteBlock(block);
|
||||||
|
//!
|
||||||
|
//! // Send the request.
|
||||||
|
//! // We receive back an `async` channel that will
|
||||||
|
//! // eventually yield the result when `service`
|
||||||
|
//! // is done writing the block.
|
||||||
|
//! let response_channel = write_handle.ready().await?.call(request);
|
||||||
|
//!
|
||||||
|
//! // Block write was OK.
|
||||||
|
//! let response = response_channel.await?;
|
||||||
|
//! assert_eq!(response, Response::WriteBlockOk);
|
||||||
|
//!
|
||||||
|
//! // Now, let's try getting the block hash
|
||||||
|
//! // of the block we just wrote.
|
||||||
|
//! let request = ReadRequest::BlockHash(0);
|
||||||
|
//! let response_channel = read_handle.ready().await?.call(request);
|
||||||
|
//! let response = response_channel.await?;
|
||||||
|
//! assert_eq!(
|
||||||
|
//! response,
|
||||||
|
//! Response::BlockHash(
|
||||||
|
//! hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428")
|
||||||
|
//! )
|
||||||
|
//! );
|
||||||
|
//!
|
||||||
|
//! // This causes the writer thread on the
|
||||||
|
//! // other side of this handle to exit...
|
||||||
|
//! drop(write_handle);
|
||||||
|
//! // ...and this causes the reader thread-pool to exit.
|
||||||
|
//! drop(read_handle);
|
||||||
|
//! # Ok(()) }
|
||||||
|
//! ```
|
||||||
|
|
||||||
mod read;
|
mod read;
|
||||||
pub use read::DatabaseReadHandle;
|
pub use read::DatabaseReadHandle;
|
||||||
|
|
|
@ -3,18 +3,12 @@
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::{
|
||||||
collections::{HashMap, HashSet},
|
collections::{HashMap, HashSet},
|
||||||
num::NonZeroUsize,
|
sync::Arc,
|
||||||
ops::Range,
|
|
||||||
sync::{Arc, RwLock},
|
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
};
|
};
|
||||||
|
|
||||||
use cfg_if::cfg_if;
|
|
||||||
use crossbeam::channel::Receiver;
|
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
|
|
||||||
use futures::{channel::oneshot, ready};
|
use futures::{channel::oneshot, ready};
|
||||||
use monero_serai::{transaction::Timelock, H};
|
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||||
use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator};
|
|
||||||
use thread_local::ThreadLocal;
|
use thread_local::ThreadLocal;
|
||||||
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
|
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
|
||||||
use tokio_util::sync::PollSemaphore;
|
use tokio_util::sync::PollSemaphore;
|
||||||
|
@ -27,21 +21,16 @@ use cuprate_types::{
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::ReaderThreads,
|
config::ReaderThreads,
|
||||||
constants::DATABASE_CORRUPT_MSG,
|
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
ops::{
|
ops::{
|
||||||
block::{get_block_extended_header_from_height, get_block_info},
|
block::{get_block_extended_header_from_height, get_block_info},
|
||||||
blockchain::{cumulative_generated_coins, top_block_height},
|
blockchain::{cumulative_generated_coins, top_block_height},
|
||||||
key_image::key_image_exists,
|
key_image::key_image_exists,
|
||||||
output::{
|
output::id_to_output_on_chain,
|
||||||
get_output, get_rct_output, id_to_output_on_chain, output_to_output_on_chain,
|
|
||||||
rct_output_to_output_on_chain,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
service::types::{ResponseReceiver, ResponseResult, ResponseSender},
|
service::types::{ResponseReceiver, ResponseResult, ResponseSender},
|
||||||
tables::{BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, Tables},
|
tables::{BlockHeights, BlockInfos, Tables},
|
||||||
types::{Amount, AmountIndex, BlockHeight, KeyImage, OutputFlags, PreRctOutputId},
|
types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId},
|
||||||
unsafe_sendable::UnsafeSendable,
|
|
||||||
ConcreteEnv, DatabaseRo, Env, EnvInner,
|
ConcreteEnv, DatabaseRo, Env, EnvInner,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -208,7 +197,7 @@ fn map_request(
|
||||||
) {
|
) {
|
||||||
use ReadRequest as R;
|
use ReadRequest as R;
|
||||||
|
|
||||||
/* TODO: pre-request handling, run some code for each request? */
|
/* SOMEDAY: pre-request handling, run some code for each request? */
|
||||||
|
|
||||||
let response = match request {
|
let response = match request {
|
||||||
R::BlockExtendedHeader(block) => block_extended_header(env, block),
|
R::BlockExtendedHeader(block) => block_extended_header(env, block),
|
||||||
|
@ -226,7 +215,7 @@ fn map_request(
|
||||||
println!("database reader failed to send response: {e:?}");
|
println!("database reader failed to send response: {e:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO: post-request handling, run some code for each request? */
|
/* SOMEDAY: post-request handling, run some code for each request? */
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Thread Local
|
//---------------------------------------------------------------------------------------------------- Thread Local
|
||||||
|
@ -294,7 +283,7 @@ macro_rules! get_tables {
|
||||||
// All functions below assume that this is the case, such that
|
// All functions below assume that this is the case, such that
|
||||||
// `par_*()` functions will not block the _global_ rayon thread-pool.
|
// `par_*()` functions will not block the _global_ rayon thread-pool.
|
||||||
|
|
||||||
// TODO: implement multi-transaction read atomicity.
|
// FIXME: implement multi-transaction read atomicity.
|
||||||
// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576874589>.
|
// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576874589>.
|
||||||
|
|
||||||
/// [`ReadRequest::BlockExtendedHeader`].
|
/// [`ReadRequest::BlockExtendedHeader`].
|
||||||
|
@ -481,7 +470,7 @@ fn check_k_is_not_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> Res
|
||||||
key_image_exists(&key_image, tables.key_images())
|
key_image_exists(&key_image, tables.key_images())
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO:
|
// FIXME:
|
||||||
// Create/use `enum cuprate_types::Exist { Does, DoesNot }`
|
// Create/use `enum cuprate_types::Exist { Does, DoesNot }`
|
||||||
// or similar instead of `bool` for clarity.
|
// or similar instead of `bool` for clarity.
|
||||||
// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1581536526>
|
// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1581536526>
|
||||||
|
|
|
@ -1,25 +1,14 @@
|
||||||
//! `crate::service` tests.
|
//! `crate::service` tests.
|
||||||
//!
|
//!
|
||||||
//! This module contains general tests for the `service` implementation.
|
//! This module contains general tests for the `service` implementation.
|
||||||
//!
|
|
||||||
//! Testing a thread-pool is slightly more complicated,
|
|
||||||
//! so this file provides TODO.
|
|
||||||
|
|
||||||
// This is only imported on `#[cfg(test)]` in `mod.rs`.
|
// This is only imported on `#[cfg(test)]` in `mod.rs`.
|
||||||
|
#![allow(clippy::await_holding_lock, clippy::too_many_lines)]
|
||||||
#![allow(
|
|
||||||
clippy::significant_drop_tightening,
|
|
||||||
clippy::await_holding_lock,
|
|
||||||
clippy::too_many_lines
|
|
||||||
)]
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Use
|
//---------------------------------------------------------------------------------------------------- Use
|
||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map::Entry, HashMap, HashSet},
|
collections::{HashMap, HashSet},
|
||||||
sync::{
|
sync::Arc,
|
||||||
atomic::{AtomicU64, Ordering},
|
|
||||||
Arc,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
@ -28,20 +17,20 @@ use tower::{Service, ServiceExt};
|
||||||
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
|
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
|
||||||
use cuprate_types::{
|
use cuprate_types::{
|
||||||
service::{ReadRequest, Response, WriteRequest},
|
service::{ReadRequest, Response, WriteRequest},
|
||||||
ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation,
|
OutputOnChain, VerifiedBlockInformation,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::Config,
|
config::ConfigBuilder,
|
||||||
ops::{
|
ops::{
|
||||||
block::{get_block_extended_header_from_height, get_block_info},
|
block::{get_block_extended_header_from_height, get_block_info},
|
||||||
blockchain::{chain_height, top_block_height},
|
blockchain::chain_height,
|
||||||
output::{get_output, id_to_output_on_chain, output_to_output_on_chain},
|
output::id_to_output_on_chain,
|
||||||
},
|
},
|
||||||
service::{init, DatabaseReadHandle, DatabaseWriteHandle},
|
service::{init, DatabaseReadHandle, DatabaseWriteHandle},
|
||||||
tables::{KeyImages, Tables, TablesIter},
|
tables::{Tables, TablesIter},
|
||||||
tests::AssertTableLen,
|
tests::AssertTableLen,
|
||||||
types::{Amount, AmountIndex, KeyImage, PreRctOutputId},
|
types::{Amount, AmountIndex, PreRctOutputId},
|
||||||
ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError,
|
ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -54,7 +43,10 @@ fn init_service() -> (
|
||||||
tempfile::TempDir,
|
tempfile::TempDir,
|
||||||
) {
|
) {
|
||||||
let tempdir = tempfile::tempdir().unwrap();
|
let tempdir = tempfile::tempdir().unwrap();
|
||||||
let config = Config::low_power(Some(tempdir.path().into()));
|
let config = ConfigBuilder::new()
|
||||||
|
.db_directory(tempdir.path().into())
|
||||||
|
.low_power()
|
||||||
|
.build();
|
||||||
let (reader, writer) = init(config).unwrap();
|
let (reader, writer) = init(config).unwrap();
|
||||||
let env = reader.env().clone();
|
let env = reader.env().clone();
|
||||||
(reader, writer, env, tempdir)
|
(reader, writer, env, tempdir)
|
||||||
|
@ -169,7 +161,7 @@ async fn test_template(
|
||||||
#[allow(clippy::cast_possible_truncation)]
|
#[allow(clippy::cast_possible_truncation)]
|
||||||
Ok(count) => (*amount, count as usize),
|
Ok(count) => (*amount, count as usize),
|
||||||
Err(RuntimeError::KeyNotFound) => (*amount, 0),
|
Err(RuntimeError::KeyNotFound) => (*amount, 0),
|
||||||
Err(e) => panic!(),
|
Err(e) => panic!("{e:?}"),
|
||||||
})
|
})
|
||||||
.collect::<HashMap<Amount, usize>>(),
|
.collect::<HashMap<Amount, usize>>(),
|
||||||
));
|
));
|
||||||
|
@ -196,7 +188,7 @@ async fn test_template(
|
||||||
println!("response: {response:#?}, expected_response: {expected_response:#?}");
|
println!("response: {response:#?}, expected_response: {expected_response:#?}");
|
||||||
match response {
|
match response {
|
||||||
Ok(resp) => assert_eq!(resp, expected_response.unwrap()),
|
Ok(resp) => assert_eq!(resp, expected_response.unwrap()),
|
||||||
Err(ref e) => assert!(matches!(response, expected_response)),
|
Err(_) => assert!(matches!(response, _expected_response)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -303,7 +295,7 @@ async fn test_template(
|
||||||
/// If this test fails, something is very wrong.
|
/// If this test fails, something is very wrong.
|
||||||
#[test]
|
#[test]
|
||||||
fn init_drop() {
|
fn init_drop() {
|
||||||
let (reader, writer, env, _tempdir) = init_service();
|
let (_reader, _writer, _env, _tempdir) = init_service();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assert write/read correctness of [`block_v1_tx2`].
|
/// Assert write/read correctness of [`block_v1_tx2`].
|
||||||
|
|
|
@ -15,7 +15,6 @@ use cuprate_types::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
constants::DATABASE_CORRUPT_MSG,
|
|
||||||
env::{Env, EnvInner},
|
env::{Env, EnvInner},
|
||||||
error::RuntimeError,
|
error::RuntimeError,
|
||||||
service::types::{ResponseReceiver, ResponseResult, ResponseSender},
|
service::types::{ResponseReceiver, ResponseResult, ResponseSender},
|
||||||
|
@ -136,7 +135,6 @@ impl DatabaseWriter {
|
||||||
/// How many times should we retry handling the request on resize errors?
|
/// How many times should we retry handling the request on resize errors?
|
||||||
///
|
///
|
||||||
/// This is 1 on automatically resizing databases, meaning there is only 1 iteration.
|
/// This is 1 on automatically resizing databases, meaning there is only 1 iteration.
|
||||||
#[allow(clippy::items_after_statements)]
|
|
||||||
const REQUEST_RETRY_LIMIT: usize = if ConcreteEnv::MANUAL_RESIZE { 3 } else { 1 };
|
const REQUEST_RETRY_LIMIT: usize = if ConcreteEnv::MANUAL_RESIZE { 3 } else { 1 };
|
||||||
|
|
||||||
// Map [`Request`]'s to specific database functions.
|
// Map [`Request`]'s to specific database functions.
|
||||||
|
@ -152,7 +150,7 @@ impl DatabaseWriter {
|
||||||
// to represent this retry logic with recursive
|
// to represent this retry logic with recursive
|
||||||
// functions instead of a loop.
|
// functions instead of a loop.
|
||||||
'retry: for retry in 0..REQUEST_RETRY_LIMIT {
|
'retry: for retry in 0..REQUEST_RETRY_LIMIT {
|
||||||
// TODO: will there be more than 1 write request?
|
// FIXME: will there be more than 1 write request?
|
||||||
// this won't have to be an enum.
|
// this won't have to be an enum.
|
||||||
let response = match &request {
|
let response = match &request {
|
||||||
WriteRequest::WriteBlock(block) => write_block(&self.env, block),
|
WriteRequest::WriteBlock(block) => write_block(&self.env, block),
|
||||||
|
@ -208,12 +206,6 @@ impl DatabaseWriter {
|
||||||
// - ...retry until panic
|
// - ...retry until panic
|
||||||
unreachable!();
|
unreachable!();
|
||||||
}
|
}
|
||||||
|
|
||||||
// The only case the ['main] loop breaks should be a:
|
|
||||||
// - direct function return
|
|
||||||
// - panic
|
|
||||||
// anything below should be unreachable.
|
|
||||||
unreachable!();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -239,14 +231,13 @@ fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseR
|
||||||
|
|
||||||
match result {
|
match result {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
tx_rw.commit()?;
|
TxRw::commit(tx_rw)?;
|
||||||
Ok(Response::WriteBlockOk)
|
Ok(Response::WriteBlockOk)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// INVARIANT: ensure database atomicity by aborting
|
// INVARIANT: ensure database atomicity by aborting
|
||||||
// the transaction on `add_block()` failures.
|
// the transaction on `add_block()` failures.
|
||||||
tx_rw
|
TxRw::abort(tx_rw)
|
||||||
.abort()
|
|
||||||
.expect("could not maintain database atomicity by aborting write transaction");
|
.expect("could not maintain database atomicity by aborting write transaction");
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,9 @@
|
||||||
//! (De)serialization for table keys & values.
|
//! (De)serialization for table keys & values.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::{borrow::Borrow, fmt::Debug};
|
||||||
borrow::{Borrow, Cow},
|
|
||||||
char::ToLowercase,
|
|
||||||
fmt::Debug,
|
|
||||||
io::{Read, Write},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use bytemuck::{Pod, Zeroable};
|
use bytemuck::Pod;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Storable
|
//---------------------------------------------------------------------------------------------------- Storable
|
||||||
|
@ -25,16 +19,14 @@ use bytes::Bytes;
|
||||||
/// Any type that implements:
|
/// Any type that implements:
|
||||||
/// - [`bytemuck::Pod`]
|
/// - [`bytemuck::Pod`]
|
||||||
/// - [`Debug`]
|
/// - [`Debug`]
|
||||||
/// - [`ToOwned`]
|
|
||||||
///
|
///
|
||||||
/// will automatically implement [`Storable`].
|
/// will automatically implement [`Storable`].
|
||||||
///
|
///
|
||||||
/// This includes:
|
/// This includes:
|
||||||
/// - Most primitive types
|
/// - Most primitive types
|
||||||
/// - All types in [`tables`](crate::tables)
|
/// - All types in [`tables`](crate::tables)
|
||||||
/// - Slices, e.g, `[T] where T: Storable`
|
|
||||||
///
|
///
|
||||||
/// See [`StorableVec`] for storing slices of `T: Storable`.
|
/// See [`StorableVec`] & [`StorableBytes`] for storing slices of `T: Storable`.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use cuprate_database::*;
|
/// # use cuprate_database::*;
|
||||||
|
@ -142,6 +134,7 @@ where
|
||||||
///
|
///
|
||||||
/// This is needed as `impl Storable for Vec<T>` runs into impl conflicts.
|
/// This is needed as `impl Storable for Vec<T>` runs into impl conflicts.
|
||||||
///
|
///
|
||||||
|
/// # Example
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use cuprate_database::*;
|
/// # use cuprate_database::*;
|
||||||
/// //---------------------------------------------------- u8
|
/// //---------------------------------------------------- u8
|
||||||
|
@ -284,7 +277,7 @@ mod test {
|
||||||
println!("serialized: {se:?}, deserialized: {de:?}\n");
|
println!("serialized: {se:?}, deserialized: {de:?}\n");
|
||||||
|
|
||||||
// Assert we wrote correct amount of bytes.
|
// Assert we wrote correct amount of bytes.
|
||||||
if let Some(len) = T::BYTE_LENGTH {
|
if T::BYTE_LENGTH.is_some() {
|
||||||
assert_eq!(se.len(), expected_bytes.len());
|
assert_eq!(se.len(), expected_bytes.len());
|
||||||
}
|
}
|
||||||
// Assert the data is the same.
|
// Assert the data is the same.
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
//! Database table abstraction; `trait Table`.
|
//! Database table abstraction; `trait Table`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::fmt::Debug;
|
|
||||||
|
|
||||||
use crate::{key::Key, storable::Storable};
|
use crate::{key::Key, storable::Storable};
|
||||||
|
|
||||||
|
@ -13,7 +12,7 @@ use crate::{key::Key, storable::Storable};
|
||||||
/// ## Sealed
|
/// ## Sealed
|
||||||
/// This trait is [`Sealed`](https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed).
|
/// This trait is [`Sealed`](https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed).
|
||||||
///
|
///
|
||||||
/// It is, and can only be implemented on the types inside [`tables`][crate::tables].
|
/// It is only implemented on the types inside [`tables`][crate::tables].
|
||||||
pub trait Table: crate::tables::private::Sealed + 'static {
|
pub trait Table: crate::tables::private::Sealed + 'static {
|
||||||
/// Name of the database table.
|
/// Name of the database table.
|
||||||
const NAME: &'static str;
|
const NAME: &'static str;
|
||||||
|
|
|
@ -1,6 +1,21 @@
|
||||||
//! Database tables.
|
//! Database tables.
|
||||||
//!
|
//!
|
||||||
//! This module contains all the table definitions used by `cuprate-database`.
|
//! # Table marker structs
|
||||||
|
//! This module contains all the table definitions used by `cuprate_database`.
|
||||||
|
//!
|
||||||
|
//! The zero-sized structs here represents the table type;
|
||||||
|
//! they all are essentially marker types that implement [`Table`].
|
||||||
|
//!
|
||||||
|
//! Table structs are `CamelCase`, and their static string
|
||||||
|
//! names used by the actual database backend are `snake_case`.
|
||||||
|
//!
|
||||||
|
//! For example: [`BlockBlobs`] -> `block_blobs`.
|
||||||
|
//!
|
||||||
|
//! # Traits
|
||||||
|
//! This module also contains a set of traits for
|
||||||
|
//! accessing _all_ tables defined here at once.
|
||||||
|
//!
|
||||||
|
//! For example, this is the object returned by [`EnvInner::open_tables`](crate::EnvInner::open_tables).
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -25,6 +40,7 @@ pub(super) mod private {
|
||||||
//---------------------------------------------------------------------------------------------------- `trait Tables[Mut]`
|
//---------------------------------------------------------------------------------------------------- `trait Tables[Mut]`
|
||||||
/// Creates:
|
/// Creates:
|
||||||
/// - `pub trait Tables`
|
/// - `pub trait Tables`
|
||||||
|
/// - `pub trait TablesIter`
|
||||||
/// - `pub trait TablesMut`
|
/// - `pub trait TablesMut`
|
||||||
/// - Blanket implementation for `(tuples, containing, all, open, database, tables, ...)`
|
/// - Blanket implementation for `(tuples, containing, all, open, database, tables, ...)`
|
||||||
///
|
///
|
||||||
|
@ -54,10 +70,14 @@ macro_rules! define_trait_tables {
|
||||||
/// ```rust,ignore
|
/// ```rust,ignore
|
||||||
/// let tables = open_tables();
|
/// let tables = open_tables();
|
||||||
///
|
///
|
||||||
/// // The accessor function `block_info_v1s()` returns the field
|
/// // The accessor function `block_infos()` returns the field
|
||||||
/// // containing an open database table for `BlockInfoV1s`.
|
/// // containing an open database table for `BlockInfos`.
|
||||||
/// let _ = tables.block_info_v1s();
|
/// let _ = tables.block_infos();
|
||||||
/// ```
|
/// ```
|
||||||
|
///
|
||||||
|
/// See also:
|
||||||
|
/// - [`TablesMut`]
|
||||||
|
/// - [`TablesIter`]
|
||||||
pub trait Tables: private::Sealed {
|
pub trait Tables: private::Sealed {
|
||||||
// This expands to creating `fn field_accessor_functions()`
|
// This expands to creating `fn field_accessor_functions()`
|
||||||
// for each passed `$table` type.
|
// for each passed `$table` type.
|
||||||
|
@ -85,6 +105,9 @@ macro_rules! define_trait_tables {
|
||||||
///
|
///
|
||||||
/// This is the same as [`Tables`] but includes `_iter()` variants.
|
/// This is the same as [`Tables`] but includes `_iter()` variants.
|
||||||
///
|
///
|
||||||
|
/// Note that this trait is a supertrait of `Tables`,
|
||||||
|
/// as in it can use all of its functions as well.
|
||||||
|
///
|
||||||
/// See [`Tables`] for documentation - this trait exists for the same reasons.
|
/// See [`Tables`] for documentation - this trait exists for the same reasons.
|
||||||
pub trait TablesIter: private::Sealed + Tables {
|
pub trait TablesIter: private::Sealed + Tables {
|
||||||
$(
|
$(
|
||||||
|
@ -99,6 +122,9 @@ macro_rules! define_trait_tables {
|
||||||
///
|
///
|
||||||
/// This is the same as [`Tables`] but for mutable accesses.
|
/// This is the same as [`Tables`] but for mutable accesses.
|
||||||
///
|
///
|
||||||
|
/// Note that this trait is a supertrait of `Tables`,
|
||||||
|
/// as in it can use all of its functions as well.
|
||||||
|
///
|
||||||
/// See [`Tables`] for documentation - this trait exists for the same reasons.
|
/// See [`Tables`] for documentation - this trait exists for the same reasons.
|
||||||
pub trait TablesMut: private::Sealed + Tables {
|
pub trait TablesMut: private::Sealed + Tables {
|
||||||
$(
|
$(
|
||||||
|
@ -207,14 +233,20 @@ macro_rules! define_trait_tables {
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format: $table_type => $index
|
// Input format: $table_type => $index
|
||||||
//
|
//
|
||||||
// The $index:
|
// The $index:
|
||||||
// - Simply increments by 1 for each table
|
// - Simply increments by 1 for each table
|
||||||
// - Must be 0..
|
// - Must be 0..
|
||||||
// - Must end at the total amount of table types
|
// - Must end at the total amount of table types - 1
|
||||||
//
|
//
|
||||||
// Compile errors will occur if these aren't satisfied.
|
// Compile errors will occur if these aren't satisfied.
|
||||||
|
//
|
||||||
|
// $index is just the `tuple.$index`, as the above [`define_trait_tables`]
|
||||||
|
// macro has a blanket impl for `(all, table, types, ...)` and we must map
|
||||||
|
// each type to a tuple index explicitly.
|
||||||
|
//
|
||||||
|
// FIXME: there's definitely an automatic way to this :)
|
||||||
define_trait_tables! {
|
define_trait_tables! {
|
||||||
BlockInfos => 0,
|
BlockInfos => 0,
|
||||||
BlockBlobs => 1,
|
BlockBlobs => 1,
|
||||||
|
@ -294,6 +326,9 @@ macro_rules! tables {
|
||||||
// Table struct.
|
// Table struct.
|
||||||
$(#[$attr])*
|
$(#[$attr])*
|
||||||
// The below test show the `snake_case` table name in cargo docs.
|
// The below test show the `snake_case` table name in cargo docs.
|
||||||
|
#[doc = concat!("- Key: [`", stringify!($key), "`]")]
|
||||||
|
#[doc = concat!("- Value: [`", stringify!($value), "`]")]
|
||||||
|
///
|
||||||
/// ## Table Name
|
/// ## Table Name
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use cuprate_database::{*,tables::*};
|
/// # use cuprate_database::{*,tables::*};
|
||||||
|
@ -332,19 +367,30 @@ macro_rules! tables {
|
||||||
// b) `Env::open` to make sure it creates the table (for all backends)
|
// b) `Env::open` to make sure it creates the table (for all backends)
|
||||||
// c) `call_fn_on_all_tables_or_early_return!()` macro defined in this file
|
// c) `call_fn_on_all_tables_or_early_return!()` macro defined in this file
|
||||||
tables! {
|
tables! {
|
||||||
/// TODO
|
/// Serialized block blobs (bytes).
|
||||||
|
///
|
||||||
|
/// Contains the serialized version of all blocks.
|
||||||
BlockBlobs,
|
BlockBlobs,
|
||||||
BlockHeight => BlockBlob,
|
BlockHeight => BlockBlob,
|
||||||
|
|
||||||
/// TODO
|
/// Block heights.
|
||||||
|
///
|
||||||
|
/// Contains the height of all blocks.
|
||||||
BlockHeights,
|
BlockHeights,
|
||||||
BlockHash => BlockHeight,
|
BlockHash => BlockHeight,
|
||||||
|
|
||||||
/// TODO
|
/// Block information.
|
||||||
|
///
|
||||||
|
/// Contains metadata of all blocks.
|
||||||
BlockInfos,
|
BlockInfos,
|
||||||
BlockHeight => BlockInfo,
|
BlockHeight => BlockInfo,
|
||||||
|
|
||||||
/// TODO
|
/// Set of key images.
|
||||||
|
///
|
||||||
|
/// Contains all the key images known to be spent.
|
||||||
|
///
|
||||||
|
/// This table has `()` as the value type, as in,
|
||||||
|
/// it is a set of key images.
|
||||||
KeyImages,
|
KeyImages,
|
||||||
KeyImage => (),
|
KeyImage => (),
|
||||||
|
|
||||||
|
@ -355,18 +401,26 @@ tables! {
|
||||||
NumOutputs,
|
NumOutputs,
|
||||||
Amount => u64,
|
Amount => u64,
|
||||||
|
|
||||||
/// TODO
|
/// Pruned transaction blobs (bytes).
|
||||||
|
///
|
||||||
|
/// Contains the pruned portion of serialized transaction data.
|
||||||
PrunedTxBlobs,
|
PrunedTxBlobs,
|
||||||
TxId => PrunedBlob,
|
TxId => PrunedBlob,
|
||||||
|
|
||||||
/// TODO
|
/// Pre-RCT output data.
|
||||||
Outputs,
|
Outputs,
|
||||||
PreRctOutputId => Output,
|
PreRctOutputId => Output,
|
||||||
|
|
||||||
|
/// Prunable transaction blobs (bytes).
|
||||||
|
///
|
||||||
|
/// Contains the prunable portion of serialized transaction data.
|
||||||
// SOMEDAY: impl when `monero-serai` supports pruning
|
// SOMEDAY: impl when `monero-serai` supports pruning
|
||||||
PrunableTxBlobs,
|
PrunableTxBlobs,
|
||||||
TxId => PrunableBlob,
|
TxId => PrunableBlob,
|
||||||
|
|
||||||
|
/// Prunable transaction hashes.
|
||||||
|
///
|
||||||
|
/// Contains the prunable portion of transaction hashes.
|
||||||
// SOMEDAY: impl when `monero-serai` supports pruning
|
// SOMEDAY: impl when `monero-serai` supports pruning
|
||||||
PrunableHashes,
|
PrunableHashes,
|
||||||
TxId => PrunableHash,
|
TxId => PrunableHash,
|
||||||
|
@ -377,27 +431,40 @@ tables! {
|
||||||
// Properties,
|
// Properties,
|
||||||
// StorableString => StorableVec,
|
// StorableString => StorableVec,
|
||||||
|
|
||||||
/// TODO
|
/// RCT output data.
|
||||||
RctOutputs,
|
RctOutputs,
|
||||||
AmountIndex => RctOutput,
|
AmountIndex => RctOutput,
|
||||||
|
|
||||||
/// SOMEDAY: remove when `monero-serai` supports pruning
|
/// Transaction blobs (bytes).
|
||||||
|
///
|
||||||
|
/// Contains the serialized version of all transactions.
|
||||||
|
// SOMEDAY: remove when `monero-serai` supports pruning
|
||||||
TxBlobs,
|
TxBlobs,
|
||||||
TxId => TxBlob,
|
TxId => TxBlob,
|
||||||
|
|
||||||
/// TODO
|
/// Transaction indices.
|
||||||
|
///
|
||||||
|
/// Contains the indices all transactions.
|
||||||
TxIds,
|
TxIds,
|
||||||
TxHash => TxId,
|
TxHash => TxId,
|
||||||
|
|
||||||
/// TODO
|
/// Transaction heights.
|
||||||
|
///
|
||||||
|
/// Contains the block height associated with all transactions.
|
||||||
TxHeights,
|
TxHeights,
|
||||||
TxId => BlockHeight,
|
TxId => BlockHeight,
|
||||||
|
|
||||||
/// TODO
|
/// Transaction outputs.
|
||||||
|
///
|
||||||
|
/// Contains the list of `AmountIndex`'s of the
|
||||||
|
/// outputs associated with all transactions.
|
||||||
TxOutputs,
|
TxOutputs,
|
||||||
TxId => AmountIndices,
|
TxId => AmountIndices,
|
||||||
|
|
||||||
/// TODO
|
/// Transaction unlock time.
|
||||||
|
///
|
||||||
|
/// Contains the unlock time of transactions IF they have one.
|
||||||
|
/// Transactions without unlock times will not exist in this table.
|
||||||
TxUnlockTime,
|
TxUnlockTime,
|
||||||
TxId => UnlockTime,
|
TxId => UnlockTime,
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,24 +4,12 @@
|
||||||
//! - enabled on #[cfg(test)]
|
//! - enabled on #[cfg(test)]
|
||||||
//! - only used internally
|
//! - only used internally
|
||||||
|
|
||||||
#![allow(clippy::significant_drop_tightening)]
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::fmt::Debug;
|
||||||
fmt::Debug,
|
|
||||||
sync::{Arc, OnceLock},
|
|
||||||
};
|
|
||||||
|
|
||||||
use monero_serai::{
|
|
||||||
ringct::{RctPrunable, RctSignatures},
|
|
||||||
transaction::{Timelock, Transaction, TransactionPrefix},
|
|
||||||
};
|
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
use crate::{
|
use crate::{config::ConfigBuilder, tables::Tables, ConcreteEnv, DatabaseRo, Env, EnvInner};
|
||||||
config::Config, key::Key, storable::Storable, tables::Tables, transaction::TxRo, ConcreteEnv,
|
|
||||||
DatabaseRo, Env, EnvInner,
|
|
||||||
};
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Struct
|
//---------------------------------------------------------------------------------------------------- Struct
|
||||||
/// Named struct to assert the length of all tables.
|
/// Named struct to assert the length of all tables.
|
||||||
|
@ -78,7 +66,10 @@ impl AssertTableLen {
|
||||||
/// FIXME: changing this to `-> impl Env` causes lifetime errors...
|
/// FIXME: changing this to `-> impl Env` causes lifetime errors...
|
||||||
pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) {
|
pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) {
|
||||||
let tempdir = tempfile::tempdir().unwrap();
|
let tempdir = tempfile::tempdir().unwrap();
|
||||||
let config = Config::low_power(Some(tempdir.path().into()));
|
let config = ConfigBuilder::new()
|
||||||
|
.db_directory(tempdir.path().into())
|
||||||
|
.low_power()
|
||||||
|
.build();
|
||||||
let env = ConcreteEnv::open(config).unwrap();
|
let env = ConcreteEnv::open(config).unwrap();
|
||||||
|
|
||||||
(env, tempdir)
|
(env, tempdir)
|
||||||
|
|
|
@ -1,21 +1,21 @@
|
||||||
//! Database transaction abstraction; `trait TxRo`, `trait TxRw`.
|
//! Database transaction abstraction; `trait TxRo`, `trait TxRw`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use crate::{config::SyncMode, env::Env, error::RuntimeError};
|
use crate::error::RuntimeError;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- TxRo
|
//---------------------------------------------------------------------------------------------------- TxRo
|
||||||
/// Read-only database transaction.
|
/// Read-only database transaction.
|
||||||
///
|
///
|
||||||
/// Returned from [`EnvInner::tx_ro`](crate::EnvInner::tx_ro).
|
/// Returned from [`EnvInner::tx_ro`](crate::EnvInner::tx_ro).
|
||||||
///
|
///
|
||||||
/// # TODO
|
/// # Commit
|
||||||
/// I don't think we need this, we can just drop the `tx_ro`?
|
/// It's recommended but may not be necessary to call [`TxRo::commit`] in certain cases:
|
||||||
/// <https://docs.rs/heed/0.20.0-alpha.9/heed/struct.RoTxn.html#method.commit>
|
/// - <https://docs.rs/heed/0.20.0-alpha.9/heed/struct.RoTxn.html#method.commit>
|
||||||
pub trait TxRo<'env> {
|
pub trait TxRo<'env> {
|
||||||
/// Commit the read-only transaction.
|
/// Commit the read-only transaction.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
/// This operation is infallible (will always return `Ok(())`) with the `redb` backend.
|
/// This operation will always return `Ok(())` with the `redb` backend.
|
||||||
fn commit(self) -> Result<(), RuntimeError>;
|
fn commit(self) -> Result<(), RuntimeError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,20 +29,15 @@ pub trait TxRw<'env> {
|
||||||
/// Note that this doesn't necessarily sync the database caches to disk.
|
/// Note that this doesn't necessarily sync the database caches to disk.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
/// This operation is infallible (will always return `Ok(())`) with the `redb` backend.
|
/// This operation will always return `Ok(())` with the `redb` backend.
|
||||||
///
|
///
|
||||||
/// Else, this will only return:
|
/// If `Env::MANUAL_RESIZE == true`,
|
||||||
/// - [`RuntimeError::ResizeNeeded`] (if `Env::MANUAL_RESIZE == true`)
|
/// [`RuntimeError::ResizeNeeded`] may be returned.
|
||||||
/// - [`RuntimeError::Io`]
|
|
||||||
fn commit(self) -> Result<(), RuntimeError>;
|
fn commit(self) -> Result<(), RuntimeError>;
|
||||||
|
|
||||||
/// Abort the transaction, erasing any writes that have occurred.
|
/// Abort the transaction, erasing any writes that have occurred.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
/// This operation is infallible (will always return `Ok(())`) with the `heed` backend.
|
/// This operation will always return `Ok(())` with the `heed` backend.
|
||||||
///
|
|
||||||
/// Else, this will only return:
|
|
||||||
/// - [`RuntimeError::ResizeNeeded`] (if `Env::MANUAL_RESIZE == true`)
|
|
||||||
/// - [`RuntimeError::Io`]
|
|
||||||
fn abort(self) -> Result<(), RuntimeError>;
|
fn abort(self) -> Result<(), RuntimeError>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
//! Database [table](crate::tables) types.
|
//! Database [table](crate::tables) types.
|
||||||
//!
|
//!
|
||||||
//! This module contains all types used by the database tables.
|
//! This module contains all types used by the database tables,
|
||||||
|
//! and aliases for common Monero-related types that use the
|
||||||
|
//! same underlying primitive type.
|
||||||
//!
|
//!
|
||||||
//! TODO: Add schema here or a link to it.
|
//! <!-- FIXME: Add schema here or a link to it when complete -->
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* <============================================> VERY BIG SCARY SAFETY MESSAGE <============================================>
|
* <============================================> VERY BIG SCARY SAFETY MESSAGE <============================================>
|
||||||
|
@ -39,7 +41,7 @@
|
||||||
#![forbid(unsafe_code)] // if you remove this line i will steal your monero
|
#![forbid(unsafe_code)] // if you remove this line i will steal your monero
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use bytemuck::{AnyBitPattern, NoUninit, Pod, Zeroable};
|
use bytemuck::{Pod, Zeroable};
|
||||||
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -47,55 +49,59 @@ use serde::{Deserialize, Serialize};
|
||||||
use crate::storable::StorableVec;
|
use crate::storable::StorableVec;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Aliases
|
//---------------------------------------------------------------------------------------------------- Aliases
|
||||||
// TODO: document these, why they exist, and their purpose.
|
// These type aliases exist as many Monero-related types are the exact same.
|
||||||
//
|
// For clarity, they're given type aliases as to not confuse them.
|
||||||
// Notes:
|
|
||||||
// - Keep this sorted A-Z
|
|
||||||
|
|
||||||
/// TODO
|
/// An output's amount.
|
||||||
pub type Amount = u64;
|
pub type Amount = u64;
|
||||||
|
|
||||||
/// TODO
|
/// The index of an [`Amount`] in a list of duplicate `Amount`s.
|
||||||
pub type AmountIndex = u64;
|
pub type AmountIndex = u64;
|
||||||
|
|
||||||
/// TODO
|
/// A list of [`AmountIndex`]s.
|
||||||
pub type AmountIndices = StorableVec<AmountIndex>;
|
pub type AmountIndices = StorableVec<AmountIndex>;
|
||||||
|
|
||||||
/// TODO
|
/// A serialized block.
|
||||||
pub type BlockBlob = StorableVec<u8>;
|
pub type BlockBlob = StorableVec<u8>;
|
||||||
|
|
||||||
/// TODO
|
/// A block's hash.
|
||||||
pub type BlockHash = [u8; 32];
|
pub type BlockHash = [u8; 32];
|
||||||
|
|
||||||
/// TODO
|
/// A block's height.
|
||||||
pub type BlockHeight = u64;
|
pub type BlockHeight = u64;
|
||||||
|
|
||||||
/// TODO
|
/// A key image.
|
||||||
pub type KeyImage = [u8; 32];
|
pub type KeyImage = [u8; 32];
|
||||||
|
|
||||||
/// TODO
|
/// Pruned serialized bytes.
|
||||||
pub type PrunedBlob = StorableVec<u8>;
|
pub type PrunedBlob = StorableVec<u8>;
|
||||||
|
|
||||||
/// TODO
|
/// A prunable serialized bytes.
|
||||||
pub type PrunableBlob = StorableVec<u8>;
|
pub type PrunableBlob = StorableVec<u8>;
|
||||||
|
|
||||||
/// TODO
|
/// A prunable hash.
|
||||||
pub type PrunableHash = [u8; 32];
|
pub type PrunableHash = [u8; 32];
|
||||||
|
|
||||||
/// TODO
|
/// A serialized transaction.
|
||||||
pub type TxBlob = StorableVec<u8>;
|
pub type TxBlob = StorableVec<u8>;
|
||||||
|
|
||||||
/// TODO
|
/// A transaction's global index, or ID.
|
||||||
pub type TxId = u64;
|
pub type TxId = u64;
|
||||||
|
|
||||||
/// TODO
|
/// A transaction's hash.
|
||||||
pub type TxHash = [u8; 32];
|
pub type TxHash = [u8; 32];
|
||||||
|
|
||||||
/// TODO
|
/// The unlock time value of an output.
|
||||||
pub type UnlockTime = u64;
|
pub type UnlockTime = u64;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- BlockInfoV1
|
//---------------------------------------------------------------------------------------------------- BlockInfoV1
|
||||||
/// TODO
|
/// A identifier for a pre-RCT [`Output`].
|
||||||
|
///
|
||||||
|
/// This can also serve as an identifier for [`RctOutput`]'s
|
||||||
|
/// when [`PreRctOutputId::amount`] is set to `0`, although,
|
||||||
|
/// in that case, only [`AmountIndex`] needs to be known.
|
||||||
|
///
|
||||||
|
/// This is the key to the [`Outputs`](crate::tables::Outputs) table.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use std::borrow::*;
|
/// # use std::borrow::*;
|
||||||
|
@ -121,14 +127,24 @@ pub type UnlockTime = u64;
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct PreRctOutputId {
|
pub struct PreRctOutputId {
|
||||||
/// TODO
|
/// Amount of the output.
|
||||||
|
///
|
||||||
|
/// This should be `0` if the output is an [`RctOutput`].
|
||||||
pub amount: Amount,
|
pub amount: Amount,
|
||||||
/// TODO
|
/// The index of the output with the same `amount`.
|
||||||
|
///
|
||||||
|
/// In the case of [`Output`]'s, this is the index of the list
|
||||||
|
/// of outputs with the same clear amount.
|
||||||
|
///
|
||||||
|
/// In the case of [`RctOutput`]'s, this is the
|
||||||
|
/// global index of _all_ `RctOutput`s
|
||||||
pub amount_index: AmountIndex,
|
pub amount_index: AmountIndex,
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- BlockInfoV3
|
//---------------------------------------------------------------------------------------------------- BlockInfoV3
|
||||||
/// TODO
|
/// Block information.
|
||||||
|
///
|
||||||
|
/// This is the value in the [`BlockInfos`](crate::tables::BlockInfos) table.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use std::borrow::*;
|
/// # use std::borrow::*;
|
||||||
|
@ -160,27 +176,34 @@ pub struct PreRctOutputId {
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct BlockInfo {
|
pub struct BlockInfo {
|
||||||
/// TODO
|
/// The UNIX time at which the block was mined.
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
/// TODO
|
/// The total amount of coins mined in all blocks so far, including this block's.
|
||||||
pub cumulative_generated_coins: u64,
|
pub cumulative_generated_coins: u64,
|
||||||
/// TODO
|
/// The adjusted block size, in bytes.
|
||||||
|
///
|
||||||
|
/// See [`block_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#blocks-weight).
|
||||||
pub weight: u64,
|
pub weight: u64,
|
||||||
/// Least-significant 64 bits of the 128-bit cumulative difficulty.
|
/// Least-significant 64 bits of the 128-bit cumulative difficulty.
|
||||||
pub cumulative_difficulty_low: u64,
|
pub cumulative_difficulty_low: u64,
|
||||||
/// Most-significant 64 bits of the 128-bit cumulative difficulty.
|
/// Most-significant 64 bits of the 128-bit cumulative difficulty.
|
||||||
pub cumulative_difficulty_high: u64,
|
pub cumulative_difficulty_high: u64,
|
||||||
/// TODO
|
/// The block's hash.
|
||||||
pub block_hash: [u8; 32],
|
pub block_hash: [u8; 32],
|
||||||
/// TODO
|
/// The total amount of RCT outputs so far, including this block's.
|
||||||
pub cumulative_rct_outs: u64,
|
pub cumulative_rct_outs: u64,
|
||||||
/// TODO
|
/// The long term block weight, based on the median weight of the preceding `100_000` blocks.
|
||||||
|
///
|
||||||
|
/// See [`long_term_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#long-term-block-weight).
|
||||||
pub long_term_weight: u64,
|
pub long_term_weight: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- OutputFlags
|
//---------------------------------------------------------------------------------------------------- OutputFlags
|
||||||
bitflags::bitflags! {
|
bitflags::bitflags! {
|
||||||
/// TODO
|
/// Bit flags for [`Output`]s and [`RctOutput`]s,
|
||||||
|
///
|
||||||
|
/// Currently only the first bit is used and, if set,
|
||||||
|
/// it means this output has a non-zero unlock time.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use std::borrow::*;
|
/// # use std::borrow::*;
|
||||||
|
@ -209,7 +232,7 @@ bitflags::bitflags! {
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Output
|
//---------------------------------------------------------------------------------------------------- Output
|
||||||
/// TODO
|
/// A pre-RCT (v1) output's data.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use std::borrow::*;
|
/// # use std::borrow::*;
|
||||||
|
@ -237,18 +260,20 @@ bitflags::bitflags! {
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct Output {
|
pub struct Output {
|
||||||
/// TODO
|
/// The public key of the output.
|
||||||
pub key: [u8; 32],
|
pub key: [u8; 32],
|
||||||
/// We could get this from the tx_idx with the Tx Heights table but that would require another look up per out.
|
/// The block height this output belongs to.
|
||||||
|
// PERF: We could get this from the tx_idx with the `TxHeights`
|
||||||
|
// table but that would require another look up per out.
|
||||||
pub height: u32,
|
pub height: u32,
|
||||||
/// Bit flags for this output, currently only the first bit is used and, if set, it means this output has a non-zero unlock time.
|
/// Bit flags for this output.
|
||||||
pub output_flags: OutputFlags,
|
pub output_flags: OutputFlags,
|
||||||
/// TODO
|
/// The index of the transaction this output belongs to.
|
||||||
pub tx_idx: u64,
|
pub tx_idx: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- RctOutput
|
//---------------------------------------------------------------------------------------------------- RctOutput
|
||||||
/// TODO
|
/// An RCT (v2+) output's data.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use std::borrow::*;
|
/// # use std::borrow::*;
|
||||||
|
@ -277,13 +302,15 @@ pub struct Output {
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct RctOutput {
|
pub struct RctOutput {
|
||||||
/// TODO
|
/// The public key of the output.
|
||||||
pub key: [u8; 32],
|
pub key: [u8; 32],
|
||||||
/// We could get this from the tx_idx with the Tx Heights table but that would require another look up per out.
|
/// The block height this output belongs to.
|
||||||
|
// PERF: We could get this from the tx_idx with the `TxHeights`
|
||||||
|
// table but that would require another look up per out.
|
||||||
pub height: u32,
|
pub height: u32,
|
||||||
/// Bit flags for this output, currently only the first bit is used and, if set, it means this output has a non-zero unlock time.
|
/// Bit flags for this output, currently only the first bit is used and, if set, it means this output has a non-zero unlock time.
|
||||||
pub output_flags: OutputFlags,
|
pub output_flags: OutputFlags,
|
||||||
/// TODO
|
/// The index of the transaction this output belongs to.
|
||||||
pub tx_idx: u64,
|
pub tx_idx: u64,
|
||||||
/// The amount commitment of this output.
|
/// The amount commitment of this output.
|
||||||
pub commitment: [u8; 32],
|
pub commitment: [u8; 32],
|
||||||
|
|
|
@ -8,8 +8,6 @@ use std::{
|
||||||
|
|
||||||
use bytemuck::TransparentWrapper;
|
use bytemuck::TransparentWrapper;
|
||||||
|
|
||||||
use crate::storable::StorableVec;
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Aliases
|
//---------------------------------------------------------------------------------------------------- Aliases
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, TransparentWrapper)]
|
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, TransparentWrapper)]
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
|
@ -43,6 +41,7 @@ impl<T> UnsafeSendable<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extract the inner `T`.
|
/// Extract the inner `T`.
|
||||||
|
#[allow(dead_code)]
|
||||||
pub(crate) fn into_inner(self) -> T {
|
pub(crate) fn into_inner(self) -> T {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,31 @@
|
||||||
//! Cuprate directories and filenames.
|
//! Cuprate directories and filenames.
|
||||||
//!
|
//!
|
||||||
//! # TODO
|
//! # Environment variables on Linux
|
||||||
//! Document how environment variables can change these.
|
//! Note that this module's functions uses [`dirs`],
|
||||||
|
//! which adheres to the XDG standard on Linux.
|
||||||
//!
|
//!
|
||||||
//! # Reference
|
//! This means that the values returned by these functions
|
||||||
//! <https://github.com/Cuprate/cuprate/issues/46>
|
//! may change at runtime depending on environment variables,
|
||||||
//! <https://docs.rs/dirs>
|
//! for example:
|
||||||
|
//!
|
||||||
|
//! By default the config directory is `~/.config`, however
|
||||||
|
//! if `$XDG_CONFIG_HOME` is set to something, that will be
|
||||||
|
//! used instead.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! # use cuprate_helper::fs::*;
|
||||||
|
//! # if cfg!(target_os = "linux") {
|
||||||
|
//! std::env::set_var("XDG_CONFIG_HOME", "/custom/path");
|
||||||
|
//! assert_eq!(
|
||||||
|
//! cuprate_config_dir().to_string_lossy(),
|
||||||
|
//! "/custom/path/cuprate"
|
||||||
|
//! );
|
||||||
|
//! # }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Reference:
|
||||||
|
//! - <https://github.com/Cuprate/cuprate/issues/46>
|
||||||
|
//! - <https://docs.rs/dirs>
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Use
|
//---------------------------------------------------------------------------------------------------- Use
|
||||||
use std::{
|
use std::{
|
||||||
|
|
27
p2p/dandelion/Cargo.toml
Normal file
27
p2p/dandelion/Cargo.toml
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
[package]
|
||||||
|
name = "dandelion_tower"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
license = "MIT"
|
||||||
|
authors = ["Boog900"]
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = ["txpool"]
|
||||||
|
txpool = ["dep:rand_distr", "dep:tokio-util", "dep:tokio"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tower = { workspace = true, features = ["discover", "util"] }
|
||||||
|
tracing = { workspace = true, features = ["std"] }
|
||||||
|
|
||||||
|
futures = { workspace = true, features = ["std"] }
|
||||||
|
tokio = { workspace = true, features = ["rt", "sync", "macros"], optional = true}
|
||||||
|
tokio-util = { workspace = true, features = ["time"], optional = true }
|
||||||
|
|
||||||
|
rand = { workspace = true, features = ["std", "std_rng"] }
|
||||||
|
rand_distr = { workspace = true, features = ["std"], optional = true }
|
||||||
|
|
||||||
|
thiserror = { workspace = true }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync"] }
|
||||||
|
proptest = { workspace = true, features = ["default"] }
|
149
p2p/dandelion/src/config.rs
Normal file
149
p2p/dandelion/src/config.rs
Normal file
|
@ -0,0 +1,149 @@
|
||||||
|
use std::{
|
||||||
|
ops::{Mul, Neg},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// When calculating the embargo timeout using the formula: `(-k*(k-1)*hop)/(2*log(1-ep))`
|
||||||
|
///
|
||||||
|
/// (1 - ep) is the probability that a transaction travels for `k` hops before a nodes embargo timeout fires, this constant is (1 - ep).
|
||||||
|
const EMBARGO_FULL_TRAVEL_PROBABILITY: f64 = 0.90;
|
||||||
|
|
||||||
|
/// The graph type to use for dandelion routing, the dandelion paper recommends [Graph::FourRegular].
|
||||||
|
///
|
||||||
|
/// The decision between line graphs and 4-regular graphs depend on the priorities of the system, if
|
||||||
|
/// linkability of transactions is a first order concern then line graphs may be better, however 4-regular graphs
|
||||||
|
/// can give constant-order privacy benefits against adversaries with knowledge of the graph.
|
||||||
|
///
|
||||||
|
/// See appendix C of the dandelion++ paper.
|
||||||
|
#[derive(Default, Debug, Copy, Clone)]
|
||||||
|
pub enum Graph {
|
||||||
|
/// Line graph.
|
||||||
|
///
|
||||||
|
/// When this is selected one peer will be chosen from the outbound peers each epoch to route transactions
|
||||||
|
/// to.
|
||||||
|
///
|
||||||
|
/// In general this is not recommend over [`Graph::FourRegular`] but may be better for certain systems.
|
||||||
|
Line,
|
||||||
|
/// Quasi-4-Regular.
|
||||||
|
///
|
||||||
|
/// When this is selected two peers will be chosen from the outbound peers each epoch, each stem transaction
|
||||||
|
/// received will then be sent to one of these two peers. Transactions from the same node will always go to the
|
||||||
|
/// same peer.
|
||||||
|
#[default]
|
||||||
|
FourRegular,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The config used to initialize dandelion.
|
||||||
|
///
|
||||||
|
/// One notable missing item from the config is `Tbase` AKA the timeout parameter to prevent black hole
|
||||||
|
/// attacks. This is removed from the config for simplicity, `Tbase` is calculated using the formula provided
|
||||||
|
/// in the D++ paper:
|
||||||
|
///
|
||||||
|
/// `(-k*(k-1)*hop)/(2*log(1-ep))`
|
||||||
|
///
|
||||||
|
/// Where `k` is calculated from the fluff probability, `hop` is `time_between_hop` and `ep` is fixed at `0.1`.
|
||||||
|
///
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub struct DandelionConfig {
|
||||||
|
/// The time it takes for a stem transaction to pass through a node, including network latency.
|
||||||
|
///
|
||||||
|
/// It's better to be safe and put a slightly higher value than lower.
|
||||||
|
pub time_between_hop: Duration,
|
||||||
|
/// The duration of an epoch.
|
||||||
|
pub epoch_duration: Duration,
|
||||||
|
/// `q` in the dandelion paper, this is the probability that a node will be in the fluff state for
|
||||||
|
/// a certain epoch.
|
||||||
|
///
|
||||||
|
/// The dandelion paper recommends to make this value small, but the smaller this value, the higher
|
||||||
|
/// the broadcast latency.
|
||||||
|
///
|
||||||
|
/// It is recommended for this value to be <= `0.2`, this value *MUST* be in range `0.0..=1.0`.
|
||||||
|
pub fluff_probability: f64,
|
||||||
|
/// The graph type.
|
||||||
|
pub graph: Graph,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DandelionConfig {
|
||||||
|
/// Returns the number of outbound peers to use to stem transactions.
|
||||||
|
///
|
||||||
|
/// This value depends on the [`Graph`] chosen.
|
||||||
|
pub fn number_of_stems(&self) -> usize {
|
||||||
|
match self.graph {
|
||||||
|
Graph::Line => 1,
|
||||||
|
Graph::FourRegular => 2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the average embargo timeout, `Tbase` in the dandelion++ paper.
|
||||||
|
///
|
||||||
|
/// This is the average embargo timeout _only including this node_ with `k` nodes also putting an embargo timeout
|
||||||
|
/// using the exponential distribution, the average until one of them fluffs is `Tbase / k`.
|
||||||
|
pub fn average_embargo_timeout(&self) -> Duration {
|
||||||
|
// we set k equal to the expected stem length with this fluff probability.
|
||||||
|
let k = self.expected_stem_length();
|
||||||
|
let time_between_hop = self.time_between_hop.as_secs_f64();
|
||||||
|
|
||||||
|
Duration::from_secs_f64(
|
||||||
|
// (-k*(k-1)*hop)/(2*ln(1-ep))
|
||||||
|
((k.neg() * (k - 1.0) * time_between_hop)
|
||||||
|
/ EMBARGO_FULL_TRAVEL_PROBABILITY.ln().mul(2.0))
|
||||||
|
.ceil(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the expected length of a stem.
|
||||||
|
pub fn expected_stem_length(&self) -> f64 {
|
||||||
|
self.fluff_probability.recip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::{
|
||||||
|
f64::consts::E,
|
||||||
|
ops::{Mul, Neg},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proptest::{prop_assert, proptest};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn monerod_average_embargo_timeout() {
|
||||||
|
let cfg = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(175),
|
||||||
|
epoch_duration: Default::default(),
|
||||||
|
fluff_probability: 0.125,
|
||||||
|
graph: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(cfg.average_embargo_timeout(), Duration::from_secs(47));
|
||||||
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#[test]
|
||||||
|
fn embargo_full_travel_probablity_correct(time_between_hop in 1_u64..1_000_000, fluff_probability in 0.000001..1.0) {
|
||||||
|
let cfg = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(time_between_hop),
|
||||||
|
epoch_duration: Default::default(),
|
||||||
|
fluff_probability,
|
||||||
|
graph: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// assert that the `average_embargo_timeout` is high enough that the probability of `k` nodes
|
||||||
|
// not diffusing before expected diffusion is greater than or equal to `EMBARGO_FULL_TRAVEL_PROBABLY`
|
||||||
|
//
|
||||||
|
// using the formula from in appendix B.5
|
||||||
|
let k = cfg.expected_stem_length();
|
||||||
|
let time_between_hop = cfg.time_between_hop.as_secs_f64();
|
||||||
|
|
||||||
|
let average_embargo_timeout = cfg.average_embargo_timeout().as_secs_f64();
|
||||||
|
|
||||||
|
let probability =
|
||||||
|
E.powf((k.neg() * (k - 1.0) * time_between_hop) / average_embargo_timeout.mul(2.0));
|
||||||
|
|
||||||
|
prop_assert!(probability >= EMBARGO_FULL_TRAVEL_PROBABILITY, "probability = {probability}, average_embargo_timeout = {average_embargo_timeout}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
70
p2p/dandelion/src/lib.rs
Normal file
70
p2p/dandelion/src/lib.rs
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
//! # Dandelion Tower
|
||||||
|
//!
|
||||||
|
//! This crate implements [dandelion++](https://arxiv.org/pdf/1805.11060.pdf), using [`tower`].
|
||||||
|
//!
|
||||||
|
//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPool`](pool::DandelionPool).
|
||||||
|
//! The router is pretty minimal and only handles the absolute necessary data to route transactions, whereas the
|
||||||
|
//! pool keeps track of all data necessary for dandelion++ but requires you to provide a backing tx-pool.
|
||||||
|
//!
|
||||||
|
//! This split was done not because the [`DandelionPool`](pool::DandelionPool) is unnecessary but because it is hard
|
||||||
|
//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPool`](pool::DandelionPool)
|
||||||
|
//! requires you to implement part of the paper yourself.
|
||||||
|
//!
|
||||||
|
//! # Features
|
||||||
|
//!
|
||||||
|
//! This crate only has one feature `txpool` which enables [`DandelionPool`](pool::DandelionPool).
|
||||||
|
//!
|
||||||
|
//! # Needed Services
|
||||||
|
//!
|
||||||
|
//! To use this crate you need to provide a few types.
|
||||||
|
//!
|
||||||
|
//! ## Diffuse Service
|
||||||
|
//!
|
||||||
|
//! This service should implement diffusion, which is sending the transaction to every peer, with each peer
|
||||||
|
//! having a timer using the exponential distribution and batch sending all txs that were queued in that time.
|
||||||
|
//!
|
||||||
|
//! The diffuse service should have a request of [`DiffuseRequest`](traits::DiffuseRequest) and it's error
|
||||||
|
//! should be [`tower::BoxError`].
|
||||||
|
//!
|
||||||
|
//! ## Outbound Peer Discoverer
|
||||||
|
//!
|
||||||
|
//! The outbound peer [`Discover`](tower::discover::Discover) should provide a stream of randomly selected outbound
|
||||||
|
//! peers, these peers will then be used to route stem txs to.
|
||||||
|
//!
|
||||||
|
//! The peers will not be returned anywhere, so it is recommended to wrap them in some sort of drop guard that returns
|
||||||
|
//! them back to a peer set.
|
||||||
|
//!
|
||||||
|
//! ## Peer Service
|
||||||
|
//!
|
||||||
|
//! This service represents a connection to an individual peer, this should be returned from the Outbound Peer
|
||||||
|
//! Discover. This should immediately send the transaction to the peer when requested, i.e. it should _not_ set
|
||||||
|
//! a timer.
|
||||||
|
//!
|
||||||
|
//! The diffuse service should have a request of [`StemRequest`](traits::StemRequest) and it's error
|
||||||
|
//! should be [`tower::BoxError`].
|
||||||
|
//!
|
||||||
|
//! ## Backing Pool
|
||||||
|
//!
|
||||||
|
//! ([`DandelionPool`](pool::DandelionPool) only)
|
||||||
|
//!
|
||||||
|
//! This service is a backing tx-pool, in memory or on disk.
|
||||||
|
//! The backing pool should have a request of [`TxStoreRequest`](traits::TxStoreRequest) and a response of
|
||||||
|
//! [`TxStoreResponse`](traits::TxStoreResponse), with an error of [`tower::BoxError`].
|
||||||
|
//!
|
||||||
|
//! Users should keep a handle to the backing pool to request data from it, when requesting data you _must_
|
||||||
|
//! make sure you only look in the public pool if you are going to be giving data to peers, as stem transactions
|
||||||
|
//! must stay private.
|
||||||
|
//!
|
||||||
|
//! When removing data, for example because of a new block, you can remove from both pools provided it doesn't leak
|
||||||
|
//! any data about stem transactions. You will probably want to set up a task that monitors the tx pool for stuck transactions,
|
||||||
|
//! transactions that slipped in just as one was removed etc, this crate does not handle that.
|
||||||
|
mod config;
|
||||||
|
#[cfg(feature = "txpool")]
|
||||||
|
pub mod pool;
|
||||||
|
mod router;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
pub mod traits;
|
||||||
|
|
||||||
|
pub use config::*;
|
||||||
|
pub use router::*;
|
510
p2p/dandelion/src/pool.rs
Normal file
510
p2p/dandelion/src/pool.rs
Normal file
|
@ -0,0 +1,510 @@
|
||||||
|
//! # Dandelion++ Pool
|
||||||
|
//!
|
||||||
|
//! This module contains [`DandelionPool`] which is a thin wrapper around a backing transaction store,
|
||||||
|
//! which fully implements the dandelion++ protocol.
|
||||||
|
//!
|
||||||
|
//! ### How To Get Txs From [`DandelionPool`].
|
||||||
|
//!
|
||||||
|
//! [`DandelionPool`] does not provide a full tx-pool API. You cannot retrieve transactions from it or
|
||||||
|
//! check what transactions are in it, to do this you must keep a handle to the backing transaction store
|
||||||
|
//! yourself.
|
||||||
|
//!
|
||||||
|
//! The reason for this is, the [`DandelionPool`] will only itself be passing these requests onto the backing
|
||||||
|
//! pool, so it makes sense to remove the "middle man".
|
||||||
|
//!
|
||||||
|
//! ### Keep Stem Transactions Hidden
|
||||||
|
//!
|
||||||
|
//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden.
|
||||||
|
//! So handle any requests to the tx-pool like the stem side of the pool does not exist.
|
||||||
|
//!
|
||||||
|
use std::{
|
||||||
|
collections::{HashMap, HashSet},
|
||||||
|
future::Future,
|
||||||
|
hash::Hash,
|
||||||
|
marker::PhantomData,
|
||||||
|
pin::Pin,
|
||||||
|
task::{Context, Poll},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::{FutureExt, StreamExt};
|
||||||
|
use rand::prelude::*;
|
||||||
|
use rand_distr::Exp;
|
||||||
|
use tokio::{
|
||||||
|
sync::{mpsc, oneshot},
|
||||||
|
task::JoinSet,
|
||||||
|
};
|
||||||
|
use tokio_util::{sync::PollSender, time::DelayQueue};
|
||||||
|
use tower::{Service, ServiceExt};
|
||||||
|
use tracing::Instrument;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
traits::{TxStoreRequest, TxStoreResponse},
|
||||||
|
DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Start the [`DandelionPool`].
|
||||||
|
///
|
||||||
|
/// This function spawns the [`DandelionPool`] and returns [`DandelionPoolService`] which can be used to send
|
||||||
|
/// requests to the pool.
|
||||||
|
///
|
||||||
|
/// ### Args
|
||||||
|
///
|
||||||
|
/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPool`].
|
||||||
|
/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow
|
||||||
|
/// user to customise routing functionality.
|
||||||
|
/// - `backing_pool` is the backing transaction storage service
|
||||||
|
/// - `config` is [`DandelionConfig`].
|
||||||
|
pub fn start_dandelion_pool<P, R, Tx, TxID, PID>(
|
||||||
|
buffer_size: usize,
|
||||||
|
dandelion_router: R,
|
||||||
|
backing_pool: P,
|
||||||
|
config: DandelionConfig,
|
||||||
|
) -> DandelionPoolService<Tx, TxID, PID>
|
||||||
|
where
|
||||||
|
Tx: Clone + Send + 'static,
|
||||||
|
TxID: Hash + Eq + Clone + Send + 'static,
|
||||||
|
PID: Hash + Eq + Clone + Send + 'static,
|
||||||
|
P: Service<
|
||||||
|
TxStoreRequest<Tx, TxID>,
|
||||||
|
Response = TxStoreResponse<Tx, TxID>,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
> + Send
|
||||||
|
+ 'static,
|
||||||
|
P::Future: Send + 'static,
|
||||||
|
R: Service<DandelionRouteReq<Tx, PID>, Response = State, Error = DandelionRouterError>
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
R::Future: Send + 'static,
|
||||||
|
{
|
||||||
|
let (tx, rx) = mpsc::channel(buffer_size);
|
||||||
|
|
||||||
|
let pool = DandelionPool {
|
||||||
|
dandelion_router,
|
||||||
|
backing_pool,
|
||||||
|
routing_set: JoinSet::new(),
|
||||||
|
stem_origins: HashMap::new(),
|
||||||
|
embargo_timers: DelayQueue::new(),
|
||||||
|
embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(),
|
||||||
|
config,
|
||||||
|
_tx: PhantomData,
|
||||||
|
};
|
||||||
|
|
||||||
|
let span = tracing::debug_span!("dandelion_pool");
|
||||||
|
|
||||||
|
tokio::spawn(pool.run(rx).instrument(span));
|
||||||
|
|
||||||
|
DandelionPoolService {
|
||||||
|
tx: PollSender::new(tx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, thiserror::Error)]
|
||||||
|
#[error("The dandelion pool was shutdown")]
|
||||||
|
pub struct DandelionPoolShutDown;
|
||||||
|
|
||||||
|
/// An incoming transaction for the [`DandelionPool`] to handle.
|
||||||
|
///
|
||||||
|
/// Users may notice there is no way to check if the dandelion-pool wants a tx according to an inventory message like seen
|
||||||
|
/// in Bitcoin, only having a request for a full tx. Users should look in the *public* backing pool to handle inv messages,
|
||||||
|
/// and request txs even if they are in the stem pool.
|
||||||
|
pub struct IncomingTx<Tx, TxID, PID> {
|
||||||
|
/// The transaction.
|
||||||
|
///
|
||||||
|
/// It is recommended to put this in an [`Arc`](std::sync::Arc) as it needs to be cloned to send to the backing
|
||||||
|
/// tx pool and [`DandelionRouter`](crate::DandelionRouter)
|
||||||
|
pub tx: Tx,
|
||||||
|
/// The transaction ID.
|
||||||
|
pub tx_id: TxID,
|
||||||
|
/// The routing state of this transaction.
|
||||||
|
pub tx_state: TxState<PID>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The dandelion tx pool service.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DandelionPoolService<Tx, TxID, PID> {
|
||||||
|
/// The channel to [`DandelionPool`].
|
||||||
|
tx: PollSender<(IncomingTx<Tx, TxID, PID>, oneshot::Sender<()>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Tx, TxID, PID> Service<IncomingTx<Tx, TxID, PID>> for DandelionPoolService<Tx, TxID, PID>
|
||||||
|
where
|
||||||
|
Tx: Clone + Send,
|
||||||
|
TxID: Hash + Eq + Clone + Send + 'static,
|
||||||
|
PID: Hash + Eq + Clone + Send + 'static,
|
||||||
|
{
|
||||||
|
type Response = ();
|
||||||
|
type Error = DandelionPoolShutDown;
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: IncomingTx<Tx, TxID, PID>) -> Self::Future {
|
||||||
|
// although the channel isn't sending anything we want to wait for the request to be handled before continuing.
|
||||||
|
let (tx, rx) = oneshot::channel();
|
||||||
|
|
||||||
|
let res = self
|
||||||
|
.tx
|
||||||
|
.send_item((req, tx))
|
||||||
|
.map_err(|_| DandelionPoolShutDown);
|
||||||
|
|
||||||
|
async move {
|
||||||
|
res?;
|
||||||
|
rx.await.expect("Oneshot dropped before response!");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The dandelion++ tx pool.
|
||||||
|
///
|
||||||
|
/// See the [module docs](self) for more.
|
||||||
|
pub struct DandelionPool<P, R, Tx, TxID, PID> {
|
||||||
|
/// The dandelion++ router
|
||||||
|
dandelion_router: R,
|
||||||
|
/// The backing tx storage.
|
||||||
|
backing_pool: P,
|
||||||
|
/// The set of tasks that are running the future returned from `dandelion_router`.
|
||||||
|
routing_set: JoinSet<(TxID, Result<State, TxState<PID>>)>,
|
||||||
|
|
||||||
|
/// The origin of stem transactions.
|
||||||
|
stem_origins: HashMap<TxID, HashSet<PID>>,
|
||||||
|
|
||||||
|
/// Current stem pool embargo timers.
|
||||||
|
embargo_timers: DelayQueue<TxID>,
|
||||||
|
/// The distrobution to sample to get embargo timers.
|
||||||
|
embargo_dist: Exp<f64>,
|
||||||
|
|
||||||
|
/// The d++ config.
|
||||||
|
config: DandelionConfig,
|
||||||
|
|
||||||
|
_tx: PhantomData<Tx>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P, R, Tx, TxID, PID> DandelionPool<P, R, Tx, TxID, PID>
|
||||||
|
where
|
||||||
|
Tx: Clone + Send,
|
||||||
|
TxID: Hash + Eq + Clone + Send + 'static,
|
||||||
|
PID: Hash + Eq + Clone + Send + 'static,
|
||||||
|
P: Service<
|
||||||
|
TxStoreRequest<Tx, TxID>,
|
||||||
|
Response = TxStoreResponse<Tx, TxID>,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
>,
|
||||||
|
P::Future: Send + 'static,
|
||||||
|
R: Service<DandelionRouteReq<Tx, PID>, Response = State, Error = DandelionRouterError>,
|
||||||
|
R::Future: Send + 'static,
|
||||||
|
{
|
||||||
|
/// Stores the tx in the backing pools stem pool, setting the embargo timer, stem origin and steming the tx.
|
||||||
|
async fn store_tx_and_stem(
|
||||||
|
&mut self,
|
||||||
|
tx: Tx,
|
||||||
|
tx_id: TxID,
|
||||||
|
from: Option<PID>,
|
||||||
|
) -> Result<(), tower::BoxError> {
|
||||||
|
self.backing_pool
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(TxStoreRequest::Store(
|
||||||
|
tx.clone(),
|
||||||
|
tx_id.clone(),
|
||||||
|
State::Stem,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let embargo_timer = self.embargo_dist.sample(&mut thread_rng());
|
||||||
|
tracing::debug!(
|
||||||
|
"Setting embargo timer for stem tx: {} seconds.",
|
||||||
|
embargo_timer
|
||||||
|
);
|
||||||
|
self.embargo_timers
|
||||||
|
.insert(tx_id.clone(), Duration::from_secs_f64(embargo_timer));
|
||||||
|
|
||||||
|
self.stem_tx(tx, tx_id, from).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stems the tx, setting the stem origin, if it wasn't already set.
|
||||||
|
///
|
||||||
|
/// This function does not add the tx to the backing pool.
|
||||||
|
async fn stem_tx(
|
||||||
|
&mut self,
|
||||||
|
tx: Tx,
|
||||||
|
tx_id: TxID,
|
||||||
|
from: Option<PID>,
|
||||||
|
) -> Result<(), tower::BoxError> {
|
||||||
|
if let Some(peer) = &from {
|
||||||
|
self.stem_origins
|
||||||
|
.entry(tx_id.clone())
|
||||||
|
.or_default()
|
||||||
|
.insert(peer.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let state = from
|
||||||
|
.map(|from| TxState::Stem { from })
|
||||||
|
.unwrap_or(TxState::Local);
|
||||||
|
|
||||||
|
let fut = self
|
||||||
|
.dandelion_router
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(DandelionRouteReq {
|
||||||
|
tx,
|
||||||
|
state: state.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
|
self.routing_set
|
||||||
|
.spawn(fut.map(|res| (tx_id, res.map_err(|_| state))));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stores the tx in the backing pool and fluffs the tx, removing the stem data for this tx.
|
||||||
|
async fn store_and_fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> {
|
||||||
|
// fluffs the tx first to prevent timing attacks where we could fluff at different average times
|
||||||
|
// depending on if the tx was in the stem pool already or not.
|
||||||
|
// Massively overkill but this is a minimal change.
|
||||||
|
self.fluff_tx(tx.clone(), tx_id.clone()).await?;
|
||||||
|
|
||||||
|
// Remove the tx from the maps used during the stem phase.
|
||||||
|
self.stem_origins.remove(&tx_id);
|
||||||
|
|
||||||
|
self.backing_pool
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(TxStoreRequest::Store(tx, tx_id, State::Fluff))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
|
||||||
|
// map. These timers should be relatively short, so it shouldn't be a problem.
|
||||||
|
//self.embargo_timers.try_remove(&tx_id);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fluffs a tx, does not add the tx to the tx pool.
|
||||||
|
async fn fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> {
|
||||||
|
let fut = self
|
||||||
|
.dandelion_router
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(DandelionRouteReq {
|
||||||
|
tx,
|
||||||
|
state: TxState::Fluff,
|
||||||
|
});
|
||||||
|
|
||||||
|
self.routing_set
|
||||||
|
.spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff))));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Function to handle an incoming [`DandelionPoolRequest::IncomingTx`].
|
||||||
|
async fn handle_incoming_tx(
|
||||||
|
&mut self,
|
||||||
|
tx: Tx,
|
||||||
|
tx_state: TxState<PID>,
|
||||||
|
tx_id: TxID,
|
||||||
|
) -> Result<(), tower::BoxError> {
|
||||||
|
let TxStoreResponse::Contains(have_tx) = self
|
||||||
|
.backing_pool
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(TxStoreRequest::Contains(tx_id.clone()))
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
|
panic!("Backing tx pool responded with wrong response for request.");
|
||||||
|
};
|
||||||
|
// If we have already fluffed this tx then we don't need to do anything.
|
||||||
|
if have_tx == Some(State::Fluff) {
|
||||||
|
tracing::debug!("Already fluffed incoming tx, ignoring.");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
match tx_state {
|
||||||
|
TxState::Stem { from } => {
|
||||||
|
if self
|
||||||
|
.stem_origins
|
||||||
|
.get(&tx_id)
|
||||||
|
.is_some_and(|peers| peers.contains(&from))
|
||||||
|
{
|
||||||
|
tracing::debug!("Received stem tx twice from same peer, fluffing it");
|
||||||
|
// The same peer sent us a tx twice, fluff it.
|
||||||
|
self.promote_and_fluff_tx(tx_id).await
|
||||||
|
} else {
|
||||||
|
// This could be a new tx or it could have already been stemed, but we still stem it again
|
||||||
|
// unless the same peer sends us a tx twice.
|
||||||
|
tracing::debug!("Steming incoming tx");
|
||||||
|
self.store_tx_and_stem(tx, tx_id, Some(from)).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
TxState::Fluff => {
|
||||||
|
tracing::debug!("Fluffing incoming tx");
|
||||||
|
self.store_and_fluff_tx(tx, tx_id).await
|
||||||
|
}
|
||||||
|
TxState::Local => {
|
||||||
|
// If we have already stemed this tx then nothing to do.
|
||||||
|
if have_tx.is_some() {
|
||||||
|
tracing::debug!("Received a local tx that we already have, skipping");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
tracing::debug!("Steming local transaction");
|
||||||
|
self.store_tx_and_stem(tx, tx_id, None).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Promotes a tx to the clear pool.
|
||||||
|
async fn promote_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> {
|
||||||
|
// Remove the tx from the maps used during the stem phase.
|
||||||
|
self.stem_origins.remove(&tx_id);
|
||||||
|
|
||||||
|
// The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
|
||||||
|
// map. These timers should be relatively short, so it shouldn't be a problem.
|
||||||
|
//self.embargo_timers.try_remove(&tx_id);
|
||||||
|
|
||||||
|
self.backing_pool
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(TxStoreRequest::Promote(tx_id))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Promotes a tx to the public fluff pool and fluffs the tx.
|
||||||
|
async fn promote_and_fluff_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> {
|
||||||
|
tracing::debug!("Promoting transaction to public pool and fluffing it.");
|
||||||
|
|
||||||
|
let TxStoreResponse::Transaction(tx) = self
|
||||||
|
.backing_pool
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(TxStoreRequest::Get(tx_id.clone()))
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
|
panic!("Backing tx pool responded with wrong response for request.");
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some((tx, state)) = tx else {
|
||||||
|
tracing::debug!("Could not find tx, skipping.");
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
if state == State::Fluff {
|
||||||
|
tracing::debug!("Transaction already fluffed, skipping.");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.promote_tx(tx_id.clone()).await?;
|
||||||
|
self.fluff_tx(tx, tx_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a tx stored in the fluff _OR_ stem pool.
|
||||||
|
async fn get_tx_from_pool(&mut self, tx_id: TxID) -> Result<Option<Tx>, tower::BoxError> {
|
||||||
|
let TxStoreResponse::Transaction(tx) = self
|
||||||
|
.backing_pool
|
||||||
|
.ready()
|
||||||
|
.await?
|
||||||
|
.call(TxStoreRequest::Get(tx_id))
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
|
panic!("Backing tx pool responded with wrong response for request.");
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(tx.map(|tx| tx.0))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Starts the [`DandelionPool`].
|
||||||
|
async fn run(
|
||||||
|
mut self,
|
||||||
|
mut rx: mpsc::Receiver<(IncomingTx<Tx, TxID, PID>, oneshot::Sender<()>)>,
|
||||||
|
) {
|
||||||
|
tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config);
|
||||||
|
|
||||||
|
// On start up we just fluff all txs left in the stem pool.
|
||||||
|
let Ok(TxStoreResponse::IDs(ids)) = (&mut self.backing_pool)
|
||||||
|
.oneshot(TxStoreRequest::IDsInStemPool)
|
||||||
|
.await
|
||||||
|
else {
|
||||||
|
tracing::error!("Failed to get transactions in stem pool.");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
"Fluffing {} txs that are currently in the stem pool",
|
||||||
|
ids.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
for id in ids {
|
||||||
|
if let Err(e) = self.promote_and_fluff_tx(id).await {
|
||||||
|
tracing::error!("Failed to fluff tx in the stem pool at start up, {e}.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tracing::trace!("Waiting for next event.");
|
||||||
|
tokio::select! {
|
||||||
|
// biased to handle current txs before routing new ones.
|
||||||
|
biased;
|
||||||
|
Some(fired) = self.embargo_timers.next() => {
|
||||||
|
tracing::debug!("Embargo timer fired, did not see stem tx in time.");
|
||||||
|
|
||||||
|
let tx_id = fired.into_inner();
|
||||||
|
if let Err(e) = self.promote_and_fluff_tx(tx_id).await {
|
||||||
|
tracing::error!("Error handling fired embargo timer: {e}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(Ok((tx_id, res))) = self.routing_set.join_next() => {
|
||||||
|
tracing::trace!("Received d++ routing result.");
|
||||||
|
|
||||||
|
let res = match res {
|
||||||
|
Ok(State::Fluff) => {
|
||||||
|
tracing::debug!("Transaction was fluffed upgrading it to the public pool.");
|
||||||
|
self.promote_tx(tx_id).await
|
||||||
|
}
|
||||||
|
Err(tx_state) => {
|
||||||
|
tracing::debug!("Error routing transaction, trying again.");
|
||||||
|
|
||||||
|
match self.get_tx_from_pool(tx_id.clone()).await {
|
||||||
|
Ok(Some(tx)) => match tx_state {
|
||||||
|
TxState::Fluff => self.fluff_tx(tx, tx_id).await,
|
||||||
|
TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await,
|
||||||
|
TxState::Local => self.stem_tx(tx, tx_id, None).await,
|
||||||
|
}
|
||||||
|
Err(e) => Err(e),
|
||||||
|
_ => continue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(State::Stem) => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = res {
|
||||||
|
tracing::error!("Error handling transaction routing return: {e}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req = rx.recv() => {
|
||||||
|
tracing::debug!("Received new tx to route.");
|
||||||
|
|
||||||
|
let Some((IncomingTx { tx, tx_state, tx_id }, res_tx)) = req else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = self.handle_incoming_tx(tx, tx_state, tx_id).await {
|
||||||
|
let _ = res_tx.send(());
|
||||||
|
|
||||||
|
tracing::error!("Error handling transaction in dandelion pool: {e}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let _ = res_tx.send(());
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
348
p2p/dandelion/src/router.rs
Normal file
348
p2p/dandelion/src/router.rs
Normal file
|
@ -0,0 +1,348 @@
|
||||||
|
//! # Dandelion++ Router
|
||||||
|
//!
|
||||||
|
//! This module contains [`DandelionRouter`] which is a [`Service`]. It that handles keeping the
|
||||||
|
//! current dandelion++ [`State`] and deciding where to send transactions based on their [`TxState`].
|
||||||
|
//!
|
||||||
|
//! ### What The Router Does Not Do
|
||||||
|
//!
|
||||||
|
//! It does not handle anything to do with keeping transactions long term, i.e. embargo timers and handling
|
||||||
|
//! loops in the stem. It is up to implementers to do this if they decide not top use [`DandelionPool`](crate::pool::DandelionPool)
|
||||||
|
//!
|
||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
future::Future,
|
||||||
|
hash::Hash,
|
||||||
|
marker::PhantomData,
|
||||||
|
pin::Pin,
|
||||||
|
task::{ready, Context, Poll},
|
||||||
|
time::Instant,
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::TryFutureExt;
|
||||||
|
use rand::{distributions::Bernoulli, prelude::*, thread_rng};
|
||||||
|
use tower::{
|
||||||
|
discover::{Change, Discover},
|
||||||
|
Service,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
traits::{DiffuseRequest, StemRequest},
|
||||||
|
DandelionConfig,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// An error returned from the [`DandelionRouter`]
|
||||||
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
pub enum DandelionRouterError {
|
||||||
|
/// This error is probably recoverable so the request should be retried.
|
||||||
|
#[error("Peer chosen to route stem txs to had an err: {0}.")]
|
||||||
|
PeerError(tower::BoxError),
|
||||||
|
/// The broadcast service returned an error.
|
||||||
|
#[error("Broadcast service returned an err: {0}.")]
|
||||||
|
BroadcastError(tower::BoxError),
|
||||||
|
/// The outbound peer discoverer returned an error, this is critical.
|
||||||
|
#[error("The outbound peer discoverer returned an err: {0}.")]
|
||||||
|
OutboundPeerDiscoverError(tower::BoxError),
|
||||||
|
/// The outbound peer discoverer returned [`None`].
|
||||||
|
#[error("The outbound peer discoverer exited.")]
|
||||||
|
OutboundPeerDiscoverExited,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The dandelion++ state.
|
||||||
|
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||||
|
pub enum State {
|
||||||
|
/// Fluff state, in this state we are diffusing stem transactions to all peers.
|
||||||
|
Fluff,
|
||||||
|
/// Stem state, in this state we are stemming stem transactions to a single outbound peer.
|
||||||
|
Stem,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The routing state of a transaction.
|
||||||
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
|
pub enum TxState<ID> {
|
||||||
|
/// Fluff state.
|
||||||
|
Fluff,
|
||||||
|
/// Stem state.
|
||||||
|
Stem {
|
||||||
|
/// The peer who sent us this transaction's ID.
|
||||||
|
from: ID,
|
||||||
|
},
|
||||||
|
/// Local - the transaction originated from our node.
|
||||||
|
Local,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A request to route a transaction.
|
||||||
|
pub struct DandelionRouteReq<Tx, ID> {
|
||||||
|
/// The transaction.
|
||||||
|
pub tx: Tx,
|
||||||
|
/// The transaction state.
|
||||||
|
pub state: TxState<ID>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The dandelion router service.
|
||||||
|
pub struct DandelionRouter<P, B, ID, S, Tx> {
|
||||||
|
// pub(crate) is for tests
|
||||||
|
/// A [`Discover`] where we can get outbound peers from.
|
||||||
|
outbound_peer_discover: Pin<Box<P>>,
|
||||||
|
/// A [`Service`] which handle broadcasting (diffusing) transactions.
|
||||||
|
broadcast_svc: B,
|
||||||
|
|
||||||
|
/// The current state.
|
||||||
|
current_state: State,
|
||||||
|
/// The time at which this epoch started.
|
||||||
|
epoch_start: Instant,
|
||||||
|
|
||||||
|
/// The stem our local transactions will be sent to.
|
||||||
|
local_route: Option<ID>,
|
||||||
|
/// A [`HashMap`] linking peer's IDs to IDs in `stem_peers`.
|
||||||
|
stem_routes: HashMap<ID, ID>,
|
||||||
|
/// Peers we are using for stemming.
|
||||||
|
///
|
||||||
|
/// This will contain peers, even in [`State::Fluff`] to allow us to stem [`TxState::Local`]
|
||||||
|
/// transactions.
|
||||||
|
pub(crate) stem_peers: HashMap<ID, S>,
|
||||||
|
|
||||||
|
/// The distribution to sample to get the [`State`], true is [`State::Fluff`].
|
||||||
|
state_dist: Bernoulli,
|
||||||
|
|
||||||
|
/// The config.
|
||||||
|
config: DandelionConfig,
|
||||||
|
|
||||||
|
/// The routers tracing span.
|
||||||
|
span: tracing::Span,
|
||||||
|
|
||||||
|
_tx: PhantomData<Tx>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Tx, ID, P, B, S> DandelionRouter<P, B, ID, S, Tx>
|
||||||
|
where
|
||||||
|
ID: Hash + Eq + Clone,
|
||||||
|
P: Discover<Key = ID, Service = S, Error = tower::BoxError>,
|
||||||
|
B: Service<DiffuseRequest<Tx>, Error = tower::BoxError>,
|
||||||
|
S: Service<StemRequest<Tx>, Error = tower::BoxError>,
|
||||||
|
{
|
||||||
|
/// Creates a new [`DandelionRouter`], with the provided services and config.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
/// This function panics if [`DandelionConfig::fluff_probability`] is not `0.0..=1.0`.
|
||||||
|
pub fn new(broadcast_svc: B, outbound_peer_discover: P, config: DandelionConfig) -> Self {
|
||||||
|
// get the current state
|
||||||
|
let state_dist = Bernoulli::new(config.fluff_probability)
|
||||||
|
.expect("Fluff probability was not between 0 and 1");
|
||||||
|
|
||||||
|
let current_state = if state_dist.sample(&mut thread_rng()) {
|
||||||
|
State::Fluff
|
||||||
|
} else {
|
||||||
|
State::Stem
|
||||||
|
};
|
||||||
|
|
||||||
|
DandelionRouter {
|
||||||
|
outbound_peer_discover: Box::pin(outbound_peer_discover),
|
||||||
|
broadcast_svc,
|
||||||
|
current_state,
|
||||||
|
epoch_start: Instant::now(),
|
||||||
|
local_route: None,
|
||||||
|
stem_routes: HashMap::new(),
|
||||||
|
stem_peers: HashMap::new(),
|
||||||
|
state_dist,
|
||||||
|
config,
|
||||||
|
span: tracing::debug_span!("dandelion_router", state = ?current_state),
|
||||||
|
_tx: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This function gets the number of outbound peers from the [`Discover`] required for the selected [`Graph`](crate::Graph).
|
||||||
|
fn poll_prepare_graph(
|
||||||
|
&mut self,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Result<(), DandelionRouterError>> {
|
||||||
|
let peers_needed = match self.current_state {
|
||||||
|
State::Stem => self.config.number_of_stems(),
|
||||||
|
// When in the fluff state we only need one peer, the one for our txs.
|
||||||
|
State::Fluff => 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
while self.stem_peers.len() < peers_needed {
|
||||||
|
match ready!(self
|
||||||
|
.outbound_peer_discover
|
||||||
|
.as_mut()
|
||||||
|
.poll_discover(cx)
|
||||||
|
.map_err(DandelionRouterError::OutboundPeerDiscoverError))
|
||||||
|
.ok_or(DandelionRouterError::OutboundPeerDiscoverExited)??
|
||||||
|
{
|
||||||
|
Change::Insert(key, svc) => {
|
||||||
|
self.stem_peers.insert(key, svc);
|
||||||
|
}
|
||||||
|
Change::Remove(key) => {
|
||||||
|
self.stem_peers.remove(&key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fluff_tx(&mut self, tx: Tx) -> B::Future {
|
||||||
|
self.broadcast_svc.call(DiffuseRequest(tx))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stem_tx(&mut self, tx: Tx, from: ID) -> S::Future {
|
||||||
|
loop {
|
||||||
|
let stem_route = self.stem_routes.entry(from.clone()).or_insert_with(|| {
|
||||||
|
self.stem_peers
|
||||||
|
.iter()
|
||||||
|
.choose(&mut thread_rng())
|
||||||
|
.expect("No peers in `stem_peers` was poll_ready called?")
|
||||||
|
.0
|
||||||
|
.clone()
|
||||||
|
});
|
||||||
|
|
||||||
|
let Some(peer) = self.stem_peers.get_mut(stem_route) else {
|
||||||
|
self.stem_routes.remove(&from);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
return peer.call(StemRequest(tx));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stem_local_tx(&mut self, tx: Tx) -> S::Future {
|
||||||
|
loop {
|
||||||
|
let stem_route = self.local_route.get_or_insert_with(|| {
|
||||||
|
self.stem_peers
|
||||||
|
.iter()
|
||||||
|
.choose(&mut thread_rng())
|
||||||
|
.expect("No peers in `stem_peers` was poll_ready called?")
|
||||||
|
.0
|
||||||
|
.clone()
|
||||||
|
});
|
||||||
|
|
||||||
|
let Some(peer) = self.stem_peers.get_mut(stem_route) else {
|
||||||
|
self.local_route.take();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
return peer.call(StemRequest(tx));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
## Generics ##
|
||||||
|
|
||||||
|
Tx: The tx type
|
||||||
|
ID: Peer Id type - unique identifier for nodes.
|
||||||
|
P: Peer Set discover - where we can get outbound peers from
|
||||||
|
B: Broadcast service - where we send txs to get diffused.
|
||||||
|
S: The Peer service - handles routing messages to a single node.
|
||||||
|
*/
|
||||||
|
impl<Tx, ID, P, B, S> Service<DandelionRouteReq<Tx, ID>> for DandelionRouter<P, B, ID, S, Tx>
|
||||||
|
where
|
||||||
|
ID: Hash + Eq + Clone,
|
||||||
|
P: Discover<Key = ID, Service = S, Error = tower::BoxError>,
|
||||||
|
B: Service<DiffuseRequest<Tx>, Error = tower::BoxError>,
|
||||||
|
B::Future: Send + 'static,
|
||||||
|
S: Service<StemRequest<Tx>, Error = tower::BoxError>,
|
||||||
|
S::Future: Send + 'static,
|
||||||
|
{
|
||||||
|
type Response = State;
|
||||||
|
type Error = DandelionRouterError;
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
if self.epoch_start.elapsed() > self.config.epoch_duration {
|
||||||
|
// clear all the stem routing data.
|
||||||
|
self.stem_peers.clear();
|
||||||
|
self.stem_routes.clear();
|
||||||
|
self.local_route.take();
|
||||||
|
|
||||||
|
self.current_state = if self.state_dist.sample(&mut thread_rng()) {
|
||||||
|
State::Fluff
|
||||||
|
} else {
|
||||||
|
State::Stem
|
||||||
|
};
|
||||||
|
|
||||||
|
self.span
|
||||||
|
.record("state", format!("{:?}", self.current_state));
|
||||||
|
tracing::debug!(parent: &self.span, "Starting new d++ epoch",);
|
||||||
|
|
||||||
|
self.epoch_start = Instant::now();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut peers_pending = false;
|
||||||
|
|
||||||
|
let span = &self.span;
|
||||||
|
|
||||||
|
self.stem_peers
|
||||||
|
.retain(|_, peer_svc| match peer_svc.poll_ready(cx) {
|
||||||
|
Poll::Ready(res) => res
|
||||||
|
.inspect_err(|e| {
|
||||||
|
tracing::debug!(
|
||||||
|
parent: span,
|
||||||
|
"Peer returned an error on `poll_ready`: {e}, removing from router.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.is_ok(),
|
||||||
|
Poll::Pending => {
|
||||||
|
// Pending peers should be kept - they have not errored yet.
|
||||||
|
peers_pending = true;
|
||||||
|
true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if peers_pending {
|
||||||
|
return Poll::Pending;
|
||||||
|
}
|
||||||
|
|
||||||
|
// now we have removed the failed peers check if we still have enough for the graph chosen.
|
||||||
|
ready!(self.poll_prepare_graph(cx)?);
|
||||||
|
|
||||||
|
ready!(self
|
||||||
|
.broadcast_svc
|
||||||
|
.poll_ready(cx)
|
||||||
|
.map_err(DandelionRouterError::BroadcastError)?);
|
||||||
|
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: DandelionRouteReq<Tx, ID>) -> Self::Future {
|
||||||
|
tracing::trace!(parent: &self.span, "Handling route request.");
|
||||||
|
|
||||||
|
match req.state {
|
||||||
|
TxState::Fluff => Box::pin(
|
||||||
|
self.fluff_tx(req.tx)
|
||||||
|
.map_ok(|_| State::Fluff)
|
||||||
|
.map_err(DandelionRouterError::BroadcastError),
|
||||||
|
),
|
||||||
|
TxState::Stem { from } => match self.current_state {
|
||||||
|
State::Fluff => {
|
||||||
|
tracing::debug!(parent: &self.span, "Fluffing stem tx.");
|
||||||
|
|
||||||
|
Box::pin(
|
||||||
|
self.fluff_tx(req.tx)
|
||||||
|
.map_ok(|_| State::Fluff)
|
||||||
|
.map_err(DandelionRouterError::BroadcastError),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
State::Stem => {
|
||||||
|
tracing::trace!(parent: &self.span, "Steming transaction");
|
||||||
|
|
||||||
|
Box::pin(
|
||||||
|
self.stem_tx(req.tx, from)
|
||||||
|
.map_ok(|_| State::Stem)
|
||||||
|
.map_err(DandelionRouterError::PeerError),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TxState::Local => {
|
||||||
|
tracing::debug!(parent: &self.span, "Steming local tx.");
|
||||||
|
|
||||||
|
Box::pin(
|
||||||
|
self.stem_local_tx(req.tx)
|
||||||
|
.map_ok(|_| State::Stem)
|
||||||
|
.map_err(DandelionRouterError::PeerError),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
130
p2p/dandelion/src/tests/mod.rs
Normal file
130
p2p/dandelion/src/tests/mod.rs
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
mod pool;
|
||||||
|
mod router;
|
||||||
|
|
||||||
|
use std::{collections::HashMap, future::Future, hash::Hash, sync::Arc};
|
||||||
|
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
use tokio::sync::mpsc::{self, UnboundedReceiver};
|
||||||
|
use tower::{
|
||||||
|
discover::{Discover, ServiceList},
|
||||||
|
util::service_fn,
|
||||||
|
Service, ServiceExt,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
traits::{TxStoreRequest, TxStoreResponse},
|
||||||
|
State,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn mock_discover_svc<Req: Send + 'static>() -> (
|
||||||
|
impl Discover<
|
||||||
|
Key = usize,
|
||||||
|
Service = impl Service<
|
||||||
|
Req,
|
||||||
|
Future = impl Future<Output = Result<(), tower::BoxError>> + Send + 'static,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
> + Send
|
||||||
|
+ 'static,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
>,
|
||||||
|
UnboundedReceiver<(u64, Req)>,
|
||||||
|
) {
|
||||||
|
let (tx, rx) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let discover = ServiceList::new((0..).map(move |i| {
|
||||||
|
let tx_2 = tx.clone();
|
||||||
|
|
||||||
|
service_fn(move |req| {
|
||||||
|
tx_2.send((i, req)).unwrap();
|
||||||
|
|
||||||
|
async move { Ok::<(), tower::BoxError>(()) }
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
.map_err(Into::into);
|
||||||
|
|
||||||
|
(discover, rx)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn mock_broadcast_svc<Req: Send + 'static>() -> (
|
||||||
|
impl Service<
|
||||||
|
Req,
|
||||||
|
Future = impl Future<Output = Result<(), tower::BoxError>> + Send + 'static,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
> + Send
|
||||||
|
+ 'static,
|
||||||
|
UnboundedReceiver<Req>,
|
||||||
|
) {
|
||||||
|
let (tx, rx) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
(
|
||||||
|
service_fn(move |req| {
|
||||||
|
tx.send(req).unwrap();
|
||||||
|
|
||||||
|
async move { Ok::<(), tower::BoxError>(()) }
|
||||||
|
}),
|
||||||
|
rx,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)] // just test code.
|
||||||
|
pub fn mock_in_memory_backing_pool<
|
||||||
|
Tx: Clone + Send + 'static,
|
||||||
|
TxID: Clone + Hash + Eq + Send + 'static,
|
||||||
|
>() -> (
|
||||||
|
impl Service<
|
||||||
|
TxStoreRequest<Tx, TxID>,
|
||||||
|
Response = TxStoreResponse<Tx, TxID>,
|
||||||
|
Future = impl Future<Output = Result<TxStoreResponse<Tx, TxID>, tower::BoxError>>
|
||||||
|
+ Send
|
||||||
|
+ 'static,
|
||||||
|
Error = tower::BoxError,
|
||||||
|
> + Send
|
||||||
|
+ 'static,
|
||||||
|
Arc<std::sync::Mutex<HashMap<TxID, (Tx, State)>>>,
|
||||||
|
) {
|
||||||
|
let txs = Arc::new(std::sync::Mutex::new(HashMap::new()));
|
||||||
|
let txs_2 = txs.clone();
|
||||||
|
|
||||||
|
(
|
||||||
|
service_fn(move |req: TxStoreRequest<Tx, TxID>| {
|
||||||
|
let txs = txs.clone();
|
||||||
|
async move {
|
||||||
|
match req {
|
||||||
|
TxStoreRequest::Store(tx, tx_id, state) => {
|
||||||
|
txs.lock().unwrap().insert(tx_id, (tx, state));
|
||||||
|
Ok(TxStoreResponse::Ok)
|
||||||
|
}
|
||||||
|
TxStoreRequest::Get(tx_id) => {
|
||||||
|
let tx_state = txs.lock().unwrap().get(&tx_id).cloned();
|
||||||
|
Ok(TxStoreResponse::Transaction(tx_state))
|
||||||
|
}
|
||||||
|
TxStoreRequest::Contains(tx_id) => Ok(TxStoreResponse::Contains(
|
||||||
|
txs.lock().unwrap().get(&tx_id).map(|res| res.1),
|
||||||
|
)),
|
||||||
|
TxStoreRequest::IDsInStemPool => {
|
||||||
|
// horribly inefficient, but it's test code :)
|
||||||
|
let ids = txs
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, (_, state))| matches!(state, State::Stem))
|
||||||
|
.map(|tx| tx.0.clone())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
Ok(TxStoreResponse::IDs(ids))
|
||||||
|
}
|
||||||
|
TxStoreRequest::Promote(tx_id) => {
|
||||||
|
let _ = txs
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.get_mut(&tx_id)
|
||||||
|
.map(|tx| tx.1 = State::Fluff);
|
||||||
|
|
||||||
|
Ok(TxStoreResponse::Ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
txs_2,
|
||||||
|
)
|
||||||
|
}
|
42
p2p/dandelion/src/tests/pool.rs
Normal file
42
p2p/dandelion/src/tests/pool.rs
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
pool::{start_dandelion_pool, IncomingTx},
|
||||||
|
DandelionConfig, DandelionRouter, Graph, TxState,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn basic_functionality() {
|
||||||
|
let config = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(175),
|
||||||
|
epoch_duration: Duration::from_secs(0), // make every poll ready change state
|
||||||
|
fluff_probability: 0.2,
|
||||||
|
graph: Graph::FourRegular,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
|
||||||
|
let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
|
||||||
|
|
||||||
|
let router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
|
||||||
|
|
||||||
|
let (pool_svc, pool) = mock_in_memory_backing_pool();
|
||||||
|
|
||||||
|
let mut pool_svc = start_dandelion_pool(15, router, pool_svc, config);
|
||||||
|
|
||||||
|
pool_svc
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(IncomingTx {
|
||||||
|
tx: 0_usize,
|
||||||
|
tx_id: 1_usize,
|
||||||
|
tx_state: TxState::Fluff,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(pool.lock().unwrap().contains_key(&1));
|
||||||
|
assert!(broadcast_rx.try_recv().is_ok())
|
||||||
|
}
|
237
p2p/dandelion/src/tests/router.rs
Normal file
237
p2p/dandelion/src/tests/router.rs
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use tower::{Service, ServiceExt};
|
||||||
|
|
||||||
|
use crate::{DandelionConfig, DandelionRouteReq, DandelionRouter, Graph, TxState};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
/// make sure the number of stemm peers is correct.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn number_stems_correct() {
|
||||||
|
let mut config = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(175),
|
||||||
|
epoch_duration: Duration::from_secs(60_000),
|
||||||
|
fluff_probability: 0.0, // we want to be in stem state
|
||||||
|
graph: Graph::FourRegular,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (broadcast_svc, _broadcast_rx) = mock_broadcast_svc();
|
||||||
|
let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
|
||||||
|
|
||||||
|
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
|
||||||
|
|
||||||
|
const FROM_PEER: usize = 20;
|
||||||
|
|
||||||
|
// send a request to make the generic bound inference work, without specifying types.
|
||||||
|
router
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(DandelionRouteReq {
|
||||||
|
tx: 0_usize,
|
||||||
|
state: TxState::Stem { from: FROM_PEER },
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(router.stem_peers.len(), 2); // Graph::FourRegular
|
||||||
|
|
||||||
|
config.graph = Graph::Line;
|
||||||
|
|
||||||
|
let (broadcast_svc, _broadcast_rx) = mock_broadcast_svc();
|
||||||
|
let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
|
||||||
|
|
||||||
|
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
|
||||||
|
|
||||||
|
// send a request to make the generic bound inference work, without specifying types.
|
||||||
|
router
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(DandelionRouteReq {
|
||||||
|
tx: 0_usize,
|
||||||
|
state: TxState::Stem { from: FROM_PEER },
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(router.stem_peers.len(), 1); // Graph::Line
|
||||||
|
}
|
||||||
|
|
||||||
|
/// make sure a tx from the same peer goes to the same peer.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn routes_consistent() {
|
||||||
|
let config = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(175),
|
||||||
|
epoch_duration: Duration::from_secs(60_000),
|
||||||
|
fluff_probability: 0.0, // we want this test to always stem
|
||||||
|
graph: Graph::FourRegular,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
|
||||||
|
let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
|
||||||
|
|
||||||
|
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
|
||||||
|
|
||||||
|
const FROM_PEER: usize = 20;
|
||||||
|
|
||||||
|
// The router will panic if it attempts to flush.
|
||||||
|
broadcast_rx.close();
|
||||||
|
|
||||||
|
for _ in 0..30 {
|
||||||
|
router
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(DandelionRouteReq {
|
||||||
|
tx: 0_usize,
|
||||||
|
state: TxState::Stem { from: FROM_PEER },
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut stem_peer = None;
|
||||||
|
let mut total_txs = 0;
|
||||||
|
|
||||||
|
while let Ok((peer_id, _)) = outbound_rx.try_recv() {
|
||||||
|
let stem_peer = stem_peer.get_or_insert(peer_id);
|
||||||
|
// make sure all peer ids are the same (so the same svc got all txs).
|
||||||
|
assert_eq!(*stem_peer, peer_id);
|
||||||
|
|
||||||
|
total_txs += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(total_txs, 30);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// make sure local txs always stem - even in fluff state.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn local_always_stem() {
|
||||||
|
let config = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(175),
|
||||||
|
epoch_duration: Duration::from_secs(60_000),
|
||||||
|
fluff_probability: 1.0, // we want this test to always fluff
|
||||||
|
graph: Graph::FourRegular,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
|
||||||
|
let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
|
||||||
|
|
||||||
|
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
|
||||||
|
|
||||||
|
// The router will panic if it attempts to flush.
|
||||||
|
broadcast_rx.close();
|
||||||
|
|
||||||
|
for _ in 0..30 {
|
||||||
|
router
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(DandelionRouteReq {
|
||||||
|
tx: 0_usize,
|
||||||
|
state: TxState::Local,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut stem_peer = None;
|
||||||
|
let mut total_txs = 0;
|
||||||
|
|
||||||
|
while let Ok((peer_id, _)) = outbound_rx.try_recv() {
|
||||||
|
let stem_peer = stem_peer.get_or_insert(peer_id);
|
||||||
|
// make sure all peer ids are the same (so the same svc got all txs).
|
||||||
|
assert_eq!(*stem_peer, peer_id);
|
||||||
|
|
||||||
|
total_txs += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(total_txs, 30);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// make sure local txs always stem - even in fluff state.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn stem_txs_fluff_in_state_fluff() {
|
||||||
|
let config = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(175),
|
||||||
|
epoch_duration: Duration::from_secs(60_000),
|
||||||
|
fluff_probability: 1.0, // we want this test to always fluff
|
||||||
|
graph: Graph::FourRegular,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
|
||||||
|
let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
|
||||||
|
|
||||||
|
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
|
||||||
|
|
||||||
|
const FROM_PEER: usize = 20;
|
||||||
|
|
||||||
|
// The router will panic if it attempts to stem.
|
||||||
|
outbound_rx.close();
|
||||||
|
|
||||||
|
for _ in 0..30 {
|
||||||
|
router
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(DandelionRouteReq {
|
||||||
|
tx: 0_usize,
|
||||||
|
state: TxState::Stem { from: FROM_PEER },
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut total_txs = 0;
|
||||||
|
|
||||||
|
while broadcast_rx.try_recv().is_ok() {
|
||||||
|
total_txs += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(total_txs, 30);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// make sure we get all txs sent to the router out in a stem or a fluff.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn random_routing() {
|
||||||
|
let config = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(175),
|
||||||
|
epoch_duration: Duration::from_secs(0), // make every poll ready change state
|
||||||
|
fluff_probability: 0.2,
|
||||||
|
graph: Graph::FourRegular,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
|
||||||
|
let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
|
||||||
|
|
||||||
|
let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
|
||||||
|
|
||||||
|
for _ in 0..3000 {
|
||||||
|
router
|
||||||
|
.ready()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.call(DandelionRouteReq {
|
||||||
|
tx: 0_usize,
|
||||||
|
state: TxState::Stem {
|
||||||
|
from: rand::random(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut total_txs = 0;
|
||||||
|
|
||||||
|
while broadcast_rx.try_recv().is_ok() {
|
||||||
|
total_txs += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
while outbound_rx.try_recv().is_ok() {
|
||||||
|
total_txs += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(total_txs, 3000);
|
||||||
|
}
|
49
p2p/dandelion/src/traits.rs
Normal file
49
p2p/dandelion/src/traits.rs
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
/// A request to diffuse a transaction to all connected peers.
|
||||||
|
///
|
||||||
|
/// This crate does not handle diffusion it is left to implementers.
|
||||||
|
pub struct DiffuseRequest<Tx>(pub Tx);
|
||||||
|
|
||||||
|
/// A request sent to a single peer to stem this transaction.
|
||||||
|
pub struct StemRequest<Tx>(pub Tx);
|
||||||
|
|
||||||
|
#[cfg(feature = "txpool")]
|
||||||
|
/// A request sent to the backing transaction pool storage.
|
||||||
|
pub enum TxStoreRequest<Tx, TxID> {
|
||||||
|
/// A request to store a transaction with the ID to store it under and the pool to store it in.
|
||||||
|
///
|
||||||
|
/// If the tx is already in the pool then do nothing, unless the tx is in the stem pool then move it
|
||||||
|
/// to the fluff pool, _if this request state is fluff_.
|
||||||
|
Store(Tx, TxID, crate::State),
|
||||||
|
/// A request to retrieve a `Tx` with the given ID from the pool, should not remove that tx from the pool.
|
||||||
|
///
|
||||||
|
/// Must return [`TxStoreResponse::Transaction`]
|
||||||
|
Get(TxID),
|
||||||
|
/// Promote a transaction from the stem pool to the public pool.
|
||||||
|
///
|
||||||
|
/// If the tx is already in the fluff pool do nothing.
|
||||||
|
///
|
||||||
|
/// This should not error if the tx isn't in the pool at all.
|
||||||
|
Promote(TxID),
|
||||||
|
/// A request to check if a translation is in the pool.
|
||||||
|
///
|
||||||
|
/// Must return [`TxStoreResponse::Contains`]
|
||||||
|
Contains(TxID),
|
||||||
|
/// Returns the IDs of all the transaction in the stem pool.
|
||||||
|
///
|
||||||
|
/// Must return [`TxStoreResponse::IDs`]
|
||||||
|
IDsInStemPool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "txpool")]
|
||||||
|
/// A response sent back from the backing transaction pool.
|
||||||
|
pub enum TxStoreResponse<Tx, TxID> {
|
||||||
|
/// A generic ok response.
|
||||||
|
Ok,
|
||||||
|
/// A response containing a [`Option`] for if the transaction is in the pool (Some) or not (None) and in which pool
|
||||||
|
/// the tx is in.
|
||||||
|
Contains(Option<crate::State>),
|
||||||
|
/// A response containing a requested transaction.
|
||||||
|
Transaction(Option<(Tx, crate::State)>),
|
||||||
|
/// A list of transaction IDs.
|
||||||
|
IDs(Vec<TxID>),
|
||||||
|
}
|
|
@ -1,21 +1,20 @@
|
||||||
# `cuprate-types`
|
# `cuprate-types`
|
||||||
Various data types shared by Cuprate.
|
Various data types shared by Cuprate.
|
||||||
|
|
||||||
<!-- Did you know markdown automatically increments number lists, even if they are all 1...? -->
|
- [1. File Structure](#1-file-structure)
|
||||||
1. [File Structure](#file-structure)
|
- [1.1 `src/`](#11-src)
|
||||||
- [`src/`](#src)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# File Structure
|
## 1. File Structure
|
||||||
A quick reference of the structure of the folders & files in `cuprate-types`.
|
A quick reference of the structure of the folders & files in `cuprate-types`.
|
||||||
|
|
||||||
Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`.
|
Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`.
|
||||||
|
|
||||||
## `src/`
|
### 1.1 `src/`
|
||||||
The top-level `src/` files.
|
The top-level `src/` files.
|
||||||
|
|
||||||
| File | Purpose |
|
| File | Purpose |
|
||||||
|---------------------|---------|
|
|---------------------|---------|
|
||||||
| `service.rs` | Types used in database requests; `enum {Request,Response}`
|
| `service.rs` | Types used in database requests; `enum {Request,Response}`
|
||||||
| `types.rs` | Various general types used by Cuprate
|
| `types.rs` | Various general types used by Cuprate
|
|
@ -1,6 +1,10 @@
|
||||||
//! Cuprate shared data types.
|
//! Cuprate shared data types.
|
||||||
//!
|
//!
|
||||||
//! TODO
|
//! This crate is a kitchen-sink for data types that are shared across `Cuprate`.
|
||||||
|
//!
|
||||||
|
//! # Features flags
|
||||||
|
//! The `service` module, containing `cuprate_database` request/response
|
||||||
|
//! types, must be enabled with the `service` feature (on by default).
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Lints
|
//---------------------------------------------------------------------------------------------------- Lints
|
||||||
// Forbid lints.
|
// Forbid lints.
|
||||||
|
@ -59,7 +63,6 @@
|
||||||
unused_comparisons,
|
unused_comparisons,
|
||||||
nonstandard_style
|
nonstandard_style
|
||||||
)]
|
)]
|
||||||
#![allow(unreachable_code, unused_variables, dead_code, unused_imports)] // TODO: remove
|
|
||||||
#![allow(
|
#![allow(
|
||||||
// FIXME: this lint affects crates outside of
|
// FIXME: this lint affects crates outside of
|
||||||
// `database/` for some reason, allow for now.
|
// `database/` for some reason, allow for now.
|
||||||
|
@ -70,9 +73,6 @@
|
||||||
// although it is sometimes nice.
|
// although it is sometimes nice.
|
||||||
clippy::must_use_candidate,
|
clippy::must_use_candidate,
|
||||||
|
|
||||||
// TODO: should be removed after all `todo!()`'s are gone.
|
|
||||||
clippy::diverging_sub_expression,
|
|
||||||
|
|
||||||
clippy::module_name_repetitions,
|
clippy::module_name_repetitions,
|
||||||
clippy::module_inception,
|
clippy::module_inception,
|
||||||
clippy::redundant_pub_crate,
|
clippy::redundant_pub_crate,
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
//! Database [`ReadRequest`]s, [`WriteRequest`]s, and [`Response`]s.
|
//! Database [`ReadRequest`]s, [`WriteRequest`]s, and [`Response`]s.
|
||||||
|
//!
|
||||||
|
//! See [`cuprate_database`](https://github.com/Cuprate/cuprate/blob/00c3692eac6b2669e74cfd8c9b41c7e704c779ad/database/src/service/mod.rs#L1-L59)'s
|
||||||
|
//! `service` module for more usage/documentation.
|
||||||
|
//!
|
||||||
|
//! Tests that assert particular requests lead to particular
|
||||||
|
//! responses are also tested in `cuprate_database`.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::{
|
use std::{
|
||||||
|
@ -6,8 +12,6 @@ use std::{
|
||||||
ops::Range,
|
ops::Range,
|
||||||
};
|
};
|
||||||
|
|
||||||
use monero_serai::{block::Block, transaction::Transaction};
|
|
||||||
|
|
||||||
#[cfg(feature = "borsh")]
|
#[cfg(feature = "borsh")]
|
||||||
use borsh::{BorshDeserialize, BorshSerialize};
|
use borsh::{BorshDeserialize, BorshSerialize};
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
|
@ -17,63 +21,151 @@ use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- ReadRequest
|
//---------------------------------------------------------------------------------------------------- ReadRequest
|
||||||
/// A read request to the database.
|
/// A read request to the database.
|
||||||
|
///
|
||||||
|
/// This pairs with [`Response`], where each variant here
|
||||||
|
/// matches in name with a `Response` variant. For example,
|
||||||
|
/// the proper response for a [`ReadRequest::BlockHash`]
|
||||||
|
/// would be a [`Response::BlockHash`].
|
||||||
|
///
|
||||||
|
/// See `Response` for the expected responses per `Request`.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
||||||
pub enum ReadRequest {
|
pub enum ReadRequest {
|
||||||
/// TODO
|
/// Request a block's extended header.
|
||||||
|
///
|
||||||
|
/// The input is the block's height.
|
||||||
BlockExtendedHeader(u64),
|
BlockExtendedHeader(u64),
|
||||||
/// TODO
|
|
||||||
|
/// Request a block's hash.
|
||||||
|
///
|
||||||
|
/// The input is the block's height.
|
||||||
BlockHash(u64),
|
BlockHash(u64),
|
||||||
/// TODO
|
|
||||||
|
/// Request a range of block extended headers.
|
||||||
|
///
|
||||||
|
/// The input is a range of block heights.
|
||||||
BlockExtendedHeaderInRange(Range<u64>),
|
BlockExtendedHeaderInRange(Range<u64>),
|
||||||
/// TODO
|
|
||||||
|
/// Request the current chain height.
|
||||||
|
///
|
||||||
|
/// Note that this is not the top-block height.
|
||||||
ChainHeight,
|
ChainHeight,
|
||||||
/// TODO
|
|
||||||
|
/// Request the total amount of generated coins (atomic units) so far.
|
||||||
GeneratedCoins,
|
GeneratedCoins,
|
||||||
/// TODO
|
|
||||||
|
/// Request data for multiple outputs.
|
||||||
|
///
|
||||||
|
/// The input is a `HashMap` where:
|
||||||
|
/// - Key = output amount
|
||||||
|
/// - Value = set of amount indices
|
||||||
|
///
|
||||||
|
/// For pre-RCT outputs, the amount is non-zero,
|
||||||
|
/// and the amount indices represent the wanted
|
||||||
|
/// indices of duplicate amount outputs, i.e.:
|
||||||
|
///
|
||||||
|
/// ```ignore
|
||||||
|
/// // list of outputs with amount 10
|
||||||
|
/// [0, 1, 2, 3, 4, 5]
|
||||||
|
/// // ^ ^
|
||||||
|
/// // we only want these two, so we would provide
|
||||||
|
/// // `amount: 10, amount_index: {1, 3}`
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// For RCT outputs, the amounts would be `0` and
|
||||||
|
/// the amount indices would represent the global
|
||||||
|
/// RCT output indices.
|
||||||
Outputs(HashMap<u64, HashSet<u64>>),
|
Outputs(HashMap<u64, HashSet<u64>>),
|
||||||
/// TODO
|
|
||||||
|
/// Request the amount of outputs with a certain amount.
|
||||||
|
///
|
||||||
|
/// The input is a list of output amounts.
|
||||||
NumberOutputsWithAmount(Vec<u64>),
|
NumberOutputsWithAmount(Vec<u64>),
|
||||||
/// TODO
|
|
||||||
|
/// Check that all key images within a set arer not spent.
|
||||||
|
///
|
||||||
|
/// Input is a set of key images.
|
||||||
CheckKIsNotSpent(HashSet<[u8; 32]>),
|
CheckKIsNotSpent(HashSet<[u8; 32]>),
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- WriteRequest
|
//---------------------------------------------------------------------------------------------------- WriteRequest
|
||||||
/// A write request to the database.
|
/// A write request to the database.
|
||||||
|
///
|
||||||
|
/// There is currently only 1 write request to the database,
|
||||||
|
/// as such, the only valid [`Response`] to this request is
|
||||||
|
/// the proper response for a [`Response::WriteBlockOk`].
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
||||||
pub enum WriteRequest {
|
pub enum WriteRequest {
|
||||||
/// TODO
|
/// Request that a block be written to the database.
|
||||||
|
///
|
||||||
|
/// Input is an already verified block.
|
||||||
WriteBlock(VerifiedBlockInformation),
|
WriteBlock(VerifiedBlockInformation),
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Response
|
//---------------------------------------------------------------------------------------------------- Response
|
||||||
/// A response from the database.
|
/// A response from the database.
|
||||||
|
///
|
||||||
|
/// These are the data types returned when using sending a `Request`.
|
||||||
|
///
|
||||||
|
/// This pairs with [`ReadRequest`] and [`WriteRequest`],
|
||||||
|
/// see those two for more info.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
||||||
pub enum Response {
|
pub enum Response {
|
||||||
//------------------------------------------------------ Reads
|
//------------------------------------------------------ Reads
|
||||||
/// TODO
|
/// Response to [`ReadRequest::BlockExtendedHeader`].
|
||||||
|
///
|
||||||
|
/// Inner value is the extended headed of the requested block.
|
||||||
BlockExtendedHeader(ExtendedBlockHeader),
|
BlockExtendedHeader(ExtendedBlockHeader),
|
||||||
/// TODO
|
|
||||||
|
/// Response to [`ReadRequest::BlockHash`].
|
||||||
|
///
|
||||||
|
/// Inner value is the hash of the requested block.
|
||||||
BlockHash([u8; 32]),
|
BlockHash([u8; 32]),
|
||||||
/// TODO
|
|
||||||
|
/// Response to [`ReadRequest::BlockExtendedHeaderInRange`].
|
||||||
|
///
|
||||||
|
/// Inner value is the list of extended header(s) of the requested block(s).
|
||||||
BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
|
BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
|
||||||
/// TODO
|
|
||||||
|
/// Response to [`ReadRequest::ChainHeight`].
|
||||||
|
///
|
||||||
|
/// Inner value is the chain height, and the top block's hash.
|
||||||
ChainHeight(u64, [u8; 32]),
|
ChainHeight(u64, [u8; 32]),
|
||||||
/// TODO
|
|
||||||
|
/// Response to [`ReadRequest::GeneratedCoins`].
|
||||||
|
///
|
||||||
|
/// Inner value is the total amount of generated coins so far, in atomic units.
|
||||||
GeneratedCoins(u64),
|
GeneratedCoins(u64),
|
||||||
/// TODO
|
|
||||||
|
/// Response to [`ReadRequest::Outputs`].
|
||||||
|
///
|
||||||
|
/// Inner value is all the outputs requested,
|
||||||
|
/// associated with their amount and amount index.
|
||||||
Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
|
Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
|
||||||
/// TODO
|
|
||||||
|
/// Response to [`ReadRequest::NumberOutputsWithAmount`].
|
||||||
|
///
|
||||||
|
/// Inner value is a `HashMap` of all the outputs requested where:
|
||||||
|
/// - Key = output amount
|
||||||
|
/// - Value = count of outputs with the same amount
|
||||||
NumberOutputsWithAmount(HashMap<u64, usize>),
|
NumberOutputsWithAmount(HashMap<u64, usize>),
|
||||||
/// TODO
|
|
||||||
/// returns true if key images are spent
|
/// Response to [`ReadRequest::CheckKIsNotSpent`].
|
||||||
|
///
|
||||||
|
/// The inner value is `true` if _any_ of the key images
|
||||||
|
/// were spent (exited in the database already).
|
||||||
|
///
|
||||||
|
/// The inner value is `false` if _none_ of the key images were spent.
|
||||||
CheckKIsNotSpent(bool),
|
CheckKIsNotSpent(bool),
|
||||||
|
|
||||||
//------------------------------------------------------ Writes
|
//------------------------------------------------------ Writes
|
||||||
/// TODO
|
/// Response to [`WriteRequest::WriteBlock`].
|
||||||
|
///
|
||||||
|
/// This response indicates that the requested block has
|
||||||
|
/// successfully been written to the database without error.
|
||||||
WriteBlockOk,
|
WriteBlockOk,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//! TODO
|
//! Various shared data types in Cuprate.
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -15,88 +15,113 @@ use borsh::{BorshDeserialize, BorshSerialize};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- ExtendedBlockHeader
|
//---------------------------------------------------------------------------------------------------- ExtendedBlockHeader
|
||||||
/// TODO
|
/// Extended header data of a block.
|
||||||
|
///
|
||||||
|
/// This contains various metadata of a block, but not the block blob itself.
|
||||||
|
///
|
||||||
|
/// For more definitions, see also: <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_last_block_header>.
|
||||||
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
||||||
pub struct ExtendedBlockHeader {
|
pub struct ExtendedBlockHeader {
|
||||||
/// TODO
|
/// The block's major version.
|
||||||
/// This is a `cuprate_consensus::HardFork`.
|
///
|
||||||
|
/// This can also be represented with `cuprate_consensus::HardFork`.
|
||||||
|
///
|
||||||
|
/// This is the same value as [`monero_serai::block::BlockHeader::major_version`].
|
||||||
pub version: u8,
|
pub version: u8,
|
||||||
/// TODO
|
/// The block's hard-fork vote.
|
||||||
/// This is a `cuprate_consensus::HardFork`.
|
///
|
||||||
|
/// This can also be represented with `cuprate_consensus::HardFork`.
|
||||||
|
///
|
||||||
|
/// This is the same value as [`monero_serai::block::BlockHeader::minor_version`].
|
||||||
pub vote: u8,
|
pub vote: u8,
|
||||||
/// TODO
|
/// The UNIX time at which the block was mined.
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
/// TODO
|
/// The total amount of coins mined in all blocks so far, including this block's.
|
||||||
pub cumulative_difficulty: u128,
|
pub cumulative_difficulty: u128,
|
||||||
/// TODO
|
/// The adjusted block size, in bytes.
|
||||||
pub block_weight: usize,
|
pub block_weight: usize,
|
||||||
/// TODO
|
/// The long term block weight, based on the median weight of the preceding `100_000` blocks.
|
||||||
pub long_term_weight: usize,
|
pub long_term_weight: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- TransactionVerificationData
|
//---------------------------------------------------------------------------------------------------- TransactionVerificationData
|
||||||
/// TODO
|
/// Data needed to verify a transaction.
|
||||||
|
///
|
||||||
|
/// This represents data that allows verification of a transaction,
|
||||||
|
/// although it doesn't mean it _has_ been verified.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
|
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
|
||||||
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
||||||
pub struct TransactionVerificationData {
|
pub struct TransactionVerificationData {
|
||||||
/// TODO
|
/// The transaction itself.
|
||||||
pub tx: Transaction,
|
pub tx: Transaction,
|
||||||
/// TODO
|
/// The serialized byte form of [`Self::tx`].
|
||||||
|
///
|
||||||
|
/// [`Transaction::serialize`].
|
||||||
pub tx_blob: Vec<u8>,
|
pub tx_blob: Vec<u8>,
|
||||||
/// TODO
|
/// The transaction's weight.
|
||||||
|
///
|
||||||
|
/// [`Transaction::weight`].
|
||||||
pub tx_weight: usize,
|
pub tx_weight: usize,
|
||||||
/// TODO
|
/// The transaction's total fees.
|
||||||
pub fee: u64,
|
pub fee: u64,
|
||||||
/// TODO
|
/// The transaction's hash.
|
||||||
|
///
|
||||||
|
/// [`Transaction::hash`].
|
||||||
pub tx_hash: [u8; 32],
|
pub tx_hash: [u8; 32],
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- VerifiedBlockInformation
|
//---------------------------------------------------------------------------------------------------- VerifiedBlockInformation
|
||||||
/// TODO
|
/// Verified information of a block.
|
||||||
|
///
|
||||||
|
/// This represents a block that has already been verified to be correct.
|
||||||
|
///
|
||||||
|
/// For more definitions, see also: <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_block>.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
|
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
|
||||||
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
||||||
pub struct VerifiedBlockInformation {
|
pub struct VerifiedBlockInformation {
|
||||||
/// TODO
|
/// The block itself.
|
||||||
pub block: Block,
|
pub block: Block,
|
||||||
/// TODO
|
/// The serialized byte form of [`Self::block`].
|
||||||
pub txs: Vec<Arc<TransactionVerificationData>>,
|
///
|
||||||
/// TODO
|
/// [`Block::serialize`].
|
||||||
pub block_hash: [u8; 32],
|
|
||||||
/// TODO
|
|
||||||
pub pow_hash: [u8; 32],
|
|
||||||
/// TODO
|
|
||||||
pub height: u64,
|
|
||||||
/// TODO
|
|
||||||
pub generated_coins: u64,
|
|
||||||
/// TODO
|
|
||||||
pub weight: usize,
|
|
||||||
/// TODO
|
|
||||||
pub long_term_weight: usize,
|
|
||||||
/// TODO
|
|
||||||
pub cumulative_difficulty: u128,
|
|
||||||
/// TODO
|
|
||||||
/// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1556694072>
|
|
||||||
/// <https://github.com/serai-dex/serai/blob/93be7a30674ecedfb325b6d09dc22d550d7c13f8/coins/monero/src/block.rs#L110>
|
|
||||||
pub block_blob: Vec<u8>,
|
pub block_blob: Vec<u8>,
|
||||||
|
/// All the transactions in the block, excluding the [`Block::miner_tx`].
|
||||||
|
pub txs: Vec<Arc<TransactionVerificationData>>,
|
||||||
|
/// The block's hash.
|
||||||
|
///
|
||||||
|
/// [`Block::hash`].
|
||||||
|
pub block_hash: [u8; 32],
|
||||||
|
/// The block's proof-of-work hash.
|
||||||
|
pub pow_hash: [u8; 32],
|
||||||
|
/// The block's height.
|
||||||
|
pub height: u64,
|
||||||
|
/// The amount of generated coins (atomic units) in this block.
|
||||||
|
pub generated_coins: u64,
|
||||||
|
/// The adjusted block size, in bytes.
|
||||||
|
pub weight: usize,
|
||||||
|
/// The long term block weight, which is the weight factored in with previous block weights.
|
||||||
|
pub long_term_weight: usize,
|
||||||
|
/// The cumulative difficulty of all blocks up until and including this block.
|
||||||
|
pub cumulative_difficulty: u128,
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- OutputOnChain
|
//---------------------------------------------------------------------------------------------------- OutputOnChain
|
||||||
/// An already approved previous transaction output.
|
/// An already existing transaction output.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
|
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
|
||||||
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
|
||||||
pub struct OutputOnChain {
|
pub struct OutputOnChain {
|
||||||
/// TODO
|
/// The block height this output belongs to.
|
||||||
pub height: u64,
|
pub height: u64,
|
||||||
/// TODO
|
/// The timelock of this output, if any.
|
||||||
pub time_lock: Timelock,
|
pub time_lock: Timelock,
|
||||||
/// TODO
|
/// The public key of this output, if any.
|
||||||
pub key: Option<EdwardsPoint>,
|
pub key: Option<EdwardsPoint>,
|
||||||
/// TODO
|
/// The output's commitment.
|
||||||
pub commitment: EdwardsPoint,
|
pub commitment: EdwardsPoint,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue