Merge branch 'main' into async-buffer

This commit is contained in:
Boog900 2024-05-22 22:03:28 +01:00
commit 73041b9fe1
No known key found for this signature in database
GPG key ID: 42AB1287CB0041C2
150 changed files with 10455 additions and 5623 deletions

View file

@ -0,0 +1,62 @@
# MIT License
#
# Copyright (c) 2022-2023 Luke Parker
# Copyright (c) Cuprate developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# Initially taken from Serai Dex: https://github.com/serai-dex/serai/blob/b823413c9b7ae6747b9af99e18379cfc49f4271a/.github/actions/monero/action.yml.
name: monerod-download
description: Downloads the core Monero daemon
inputs:
version:
description: "Version to download"
required: false
default: v0.18.3.3
runs:
using: "composite"
steps:
- name: Monero Daemon Cache
id: cache-monerod
uses: actions/cache@v3
with:
path: |
monerod
monerod.exe
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
- name: Download the Monero Daemon
if: steps.cache-monerod.outputs.cache-hit != 'true'
shell: bash
run: |
OS=${{ runner.os }}
ARCH=${{ runner.arch }}
case "$OS $ARCH" in
"Windows X64") FILE=monero-win-x64-${{ inputs.version }}.zip ;;
"Windows X86") FILE=monero-win-x86-${{ inputs.version }}.zip ;;
"Linux X64") FILE=monero-linux-x64-${{ inputs.version }}.tar.bz2 ;;
"Linux X86") FILE=monero-linux-x86-${{ inputs.version }}.tar.bz2 ;;
"macOS X64") FILE=monero-mac-x64-${{ inputs.version }}.tar.bz2 ;;
"macOS ARM64") FILE=monero-mac-armv8-${{ inputs.version }}.tar.bz2 ;;
*) exit 1 ;;
esac
curl -O -L https://downloads.getmonero.org/cli/$FILE
if [[ ${{ runner.os }} == Windows ]]; then
unzip $FILE
mv */monerod.exe monerod.exe
else
tar -xvf $FILE
mv */monerod monerod
fi

View file

@ -8,6 +8,7 @@ on:
workflow_dispatch:
env:
# Show colored output in CI.
CARGO_TERM_COLOR: always
# Show full panics.
RUST_BACKTRACE: "full"
@ -15,6 +16,8 @@ env:
RUST_MIN_STACK: 8000000
# Fail on documentation warnings.
RUSTDOCFLAGS: '-D warnings'
# Enable debug information generation for build dependencies.
CARGO_PROFILE_DEV_BUILD_OVERRIDE_DEBUG: true
jobs:
# Run format separately.
@ -53,10 +56,15 @@ jobs:
include:
- os: windows-latest
shell: msys2 {0}
# GNU Windows is used as we need
# `unistd.h` and more in `cryptonight/`.
rust: stable-x86_64-pc-windows-gnu
- os: macos-latest
shell: bash
rust: stable
- os: ubuntu-latest
shell: bash
rust: stable
defaults:
run:
@ -68,15 +76,21 @@ jobs:
with:
submodules: recursive
- name: Install Rust
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.rust }}
components: clippy
- name: Cache
uses: actions/cache@v3
with:
path: |
target
~/.cargo
~/.rustup
path: target
key: ${{ matrix.os }}
- name: Download monerod
uses: ./.github/actions/monerod-download
# Packages other than `Boost` used by `Monero` are listed here.
# https://github.com/monero-project/monero/blob/c444a7e002036e834bfb4c68f04a121ce1af5825/.github/workflows/build.yml#L71
@ -96,8 +110,21 @@ jobs:
update: true
install: mingw-w64-x86_64-toolchain mingw-w64-x86_64-boost msys2-runtime-devel git mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja
# HACK: 2024-05-14
# GCC 14+ fails to build `lmdb-master-sys` with no clear error message:
# <https://github.com/Cuprate/cuprate/pull/127>
#
# - MSYS2 repos carry older versions of packages
# - pacman lets us manually downgrade from package files
# - Note that `gcc` requires `gcc-libs`
- name: Downgrade to GCC 13.2 (Windows)
if: matrix.os == 'windows-latest'
run: |
wget https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
pacman -U --noconfirm mingw-w64-x86_64-gcc-13.2.0-6-any.pkg.tar.zst mingw-w64-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
- name: Documentation
run: cargo doc --workspace --all-features
run: cargo doc --workspace --all-features --no-deps
- name: Clippy (fail on warnings)
run: cargo clippy --workspace --all-features --all-targets -- -D warnings

1
.gitignore vendored
View file

@ -1,2 +1,3 @@
target/
.vscode
monerod

1162
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -11,7 +11,8 @@ members = [
"net/fixed-bytes",
"net/levin",
"net/monero-wire",
"p2p/async-buffer",
"p2p/cuprate-p2p",
"p2p/dandelion",
"p2p/monero-p2p",
"p2p/address-book",
"pruning",
@ -50,6 +51,7 @@ crypto-bigint = { version = "0.5.5", default-features = false }
crossbeam = { version = "0.8.4", default-features = false }
curve25519-dalek = { version = "4.1.1", default-features = false }
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
dashmap = { version = "5.5.3", default-features = false }
dirs = { version = "5.0.1", default-features = false }
futures = { version = "0.3.29", default-features = false }
hex = { version = "0.4.3", default-features = false }
@ -61,6 +63,7 @@ paste = { version = "1.0.14", default-features = false }
pin-project = { version = "1.1.3", default-features = false }
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
rand = { version = "0.8.5", default-features = false }
rand_distr = { version = "0.4.3", default-features = false }
rayon = { version = "1.9.0", default-features = false }
serde_bytes = { version = "0.11.12", default-features = false }
serde_json = { version = "1.0.108", default-features = false }
@ -76,7 +79,7 @@ tracing = { version = "0.1.40", default-features = false }
## workspace.dev-dependencies
tempfile = { version = "3" }
reqwest = { version = "0.11.24" }
pretty_assertions = { version = "1.4.0" }
proptest = { version = "1" }
proptest-derive = { version = "0.4.0" }

View file

@ -32,7 +32,10 @@ fn main() {
// 29 | extern int ftime (struct timeb *__timebuf)
// | ^~~~~
// This flag doesn't work on MSVC and breaks CI.
.flag_if_supported("-Wno-deprecated-declarations");
.flag_if_supported("-Wno-deprecated-declarations")
// `#include <boost>` isn't found without this in macOS CI.
// <https://github.com/Cuprate/cuprate/pull/116>
.flag_if_supported("-I/opt/homebrew/include");
// Optimization flags are automatically added.
// https://docs.rs/cc/latest/cc/struct.Build.html#method.opt_level

View file

@ -9,41 +9,51 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/database"
keywords = ["cuprate", "database"]
[features]
# default = ["heed", "redb", "service"]
default = ["heed", "redb", "service"]
# default = ["redb", "service"]
default = ["redb-memory", "service"]
# default = ["redb-memory", "service"]
heed = ["dep:heed"]
redb = ["dep:redb"]
redb-memory = ["redb"]
service = ["dep:crossbeam", "dep:futures", "dep:tokio", "dep:tokio-util", "dep:tower", "dep:rayon"]
[dependencies]
bitflags = { workspace = true, features = ["serde", "bytemuck"] }
bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] }
bytes = { workspace = true }
cfg-if = { workspace = true }
# FIXME:
# We only need the `thread` feature if `service` is enabled.
# Figure out how to enable features of an already pulled in dependency conditionally.
cuprate-helper = { path = "../helper", features = ["fs", "thread"] }
paste = { workspace = true }
page_size = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size.
thiserror = { workspace = true }
cuprate-helper = { path = "../helper", features = ["fs", "thread", "map"] }
cuprate-types = { path = "../types", features = ["service"] }
curve25519-dalek = { workspace = true }
monero-pruning = { path = "../pruning" }
monero-serai = { workspace = true, features = ["std"] }
paste = { workspace = true }
page_size = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size.
thiserror = { workspace = true }
# `service` feature.
crossbeam = { workspace = true, features = ["std"], optional = true }
futures = { workspace = true, optional = true }
tokio = { workspace = true, features = ["full"], optional = true }
tokio-util = { workspace = true, features = ["full"], optional = true }
tower = { workspace = true, features = ["full"], optional = true }
rayon = { workspace = true, optional = true }
crossbeam = { workspace = true, features = ["std"], optional = true }
futures = { workspace = true, optional = true }
tokio = { workspace = true, features = ["full"], optional = true }
tokio-util = { workspace = true, features = ["full"], optional = true }
tower = { workspace = true, features = ["full"], optional = true }
thread_local = { workspace = true }
rayon = { workspace = true, optional = true }
# Optional features.
heed = { version = "0.20.0-alpha.9", optional = true }
redb = { version = "2.0.0", optional = true }
heed = { version = "0.20.0", features = ["read-txn-no-tls"], optional = true }
redb = { version = "2.1.0", optional = true }
serde = { workspace = true, optional = true }
[dev-dependencies]
bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] }
cuprate-helper = { path = "../helper", features = ["thread"] }
cuprate-test-utils = { path = "../test-utils" }
page_size = { version = "0.6.0" }
tempfile = { version = "3.10.0" }
pretty_assertions = { workspace = true }
hex = { workspace = true }
hex-literal = { workspace = true }

View file

@ -1,33 +1,48 @@
# Database
Cuprate's database implementation.
<!-- Did you know markdown automatically increments number lists, even if they are all 1...? -->
1. [Documentation](#documentation)
1. [File Structure](#file-structure)
- [`src/`](#src)
- [`src/ops`](#src-ops)
- [`src/service/`](#src-service)
- [`src/backend/`](#src-backend)
1. [Backends](#backends)
- [`heed`](#heed)
- [`redb`](#redb)
- [`redb-memory`](#redb-memory)
- [`sanakirja`](#sanakirja)
- [`MDBX`](#mdbx)
1. [Layers](#layers)
- [Database](#database)
- [Trait](#trait)
- [ConcreteEnv](#concreteenv)
- [Thread-pool](#thread-pool)
- [Service](#service)
1. [Resizing](#resizing)
1. [Flushing](#flushing)
1. [(De)serialization](#deserialization)
- [1. Documentation](#1-documentation)
- [2. File structure](#2-file-structure)
- [2.1 `src/`](#21-src)
- [2.2 `src/backend/`](#22-srcbackend)
- [2.3 `src/config/`](#23-srcconfig)
- [2.4 `src/ops/`](#24-srcops)
- [2.5 `src/service/`](#25-srcservice)
- [3. Backends](#3-backends)
- [3.1 heed](#31-heed)
- [3.2 redb](#32-redb)
- [3.3 redb-memory](#33-redb-memory)
- [3.4 sanakirja](#34-sanakirja)
- [3.5 MDBX](#35-mdbx)
- [4. Layers](#4-layers)
- [4.1 Backend](#41-backend)
- [4.2 Trait](#42-trait)
- [4.3 ConcreteEnv](#43-concreteenv)
- [4.4 ops](#44-ops)
- [4.5 service](#45-service)
- [5. The service](#5-the-service)
- [5.1 Initialization](#51-initialization)
- [5.2 Requests](#53-requests)
- [5.3 Responses](#54-responses)
- [5.4 Thread model](#52-thread-model)
- [5.5 Shutdown](#55-shutdown)
- [6. Syncing](#6-Syncing)
- [7. Resizing](#7-resizing)
- [8. (De)serialization](#8-deserialization)
- [9. Schema](#9-schema)
- [9.1 Tables](#91-tables)
- [9.2 Multimap tables](#92-multimap-tables)
- [10. Known issues and tradeoffs](#10-known-issues-and-tradeoffs)
- [10.1 Traits abstracting backends](#101-traits-abstracting-backends)
- [10.2 Hot-swappable backends](#102-hot-swappable-backends)
- [10.3 Copying unaligned bytes](#103-copying-unaligned-bytes)
- [10.4 Endianness](#104-endianness)
- [10.5 Extra table data](#105-extra-table-data)
---
# Documentation
In general, documentation for `database/` is split into 3:
## 1. Documentation
Documentation for `database/` is split into 3 locations:
| Documentation location | Purpose |
|---------------------------|---------|
@ -35,7 +50,7 @@ In general, documentation for `database/` is split into 3:
| `cuprate-database` | Practical usage documentation/warnings/notes/etc
| Source file `// comments` | Implementation-specific details (e.g, how many reader threads to spawn?)
This README serves as the overview/design document.
This README serves as the implementation design document.
For actual practical usage, `cuprate-database`'s types and general usage are documented via standard Rust tooling.
@ -59,66 +74,41 @@ The code within `src/` is also littered with some `grep`-able comments containin
| `TODO` | This must be implemented; There should be 0 of these in production code
| `SOMEDAY` | This should be implemented... someday
# File Structure
## 2. File structure
A quick reference of the structure of the folders & files in `cuprate-database`.
Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`.
## `src/`
### 2.1 `src/`
The top-level `src/` files.
| File | Purpose |
|---------------------|---------|
| `config.rs` | Database `Env` configuration
| `constants.rs` | General constants used throughout `cuprate-database`
| `database.rs` | Abstracted database; `trait DatabaseR{o,w}`
| `env.rs` | Abstracted database environment; `trait Env`
| `error.rs` | Database error types
| `free.rs` | General free functions (related to the database)
| `key.rs` | Abstracted database keys; `trait Key`
| `resize.rs` | Database resizing algorithms
| `storable.rs` | Data (de)serialization; `trait Storable`
| `table.rs` | Database table abstraction; `trait Table`
| `tables.rs` | All the table definitions used by `cuprate-database`
| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}`
| `types.rs` | Database table schema types
| File | Purpose |
|------------------------|---------|
| `constants.rs` | General constants used throughout `cuprate-database`
| `database.rs` | Abstracted database; `trait DatabaseR{o,w}`
| `env.rs` | Abstracted database environment; `trait Env`
| `error.rs` | Database error types
| `free.rs` | General free functions (related to the database)
| `key.rs` | Abstracted database keys; `trait Key`
| `resize.rs` | Database resizing algorithms
| `storable.rs` | Data (de)serialization; `trait Storable`
| `table.rs` | Database table abstraction; `trait Table`
| `tables.rs` | All the table definitions used by `cuprate-database`
| `tests.rs` | Utilities for `cuprate_database` testing
| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}`
| `types.rs` | Database-specific types
| `unsafe_unsendable.rs` | Marker type to impl `Send` for objects not `Send`
## `src/ops/`
This folder contains the `cupate_database::ops` module.
TODO: more detailed descriptions.
| File | Purpose |
|-----------------|---------|
| `alt_block.rs` | Alternative blocks
| `block.rs` | Blocks
| `blockchain.rs` | Blockchain-related
| `output.rs` | Outputs
| `property.rs` | Properties
| `spent_key.rs` | Spent keys
| `tx.rs` | Transactions
## `src/service/`
This folder contains the `cupate_database::service` module.
| File | Purpose |
|----------------|---------|
| `free.rs` | General free functions used (related to `cuprate_database::service`)
| `read.rs` | Read thread-pool definitions and logic
| `request.rs` | Read/write `Request`s to the database
| `response.rs` | Read/write `Response`'s from the database
| `tests.rs` | Thread-pool tests and test helper functions
| `write.rs` | Write thread-pool definitions and logic
## `src/backend/`
This folder contains the actual database crates used as the backend for `cuprate-database`.
### 2.2 `src/backend/`
This folder contains the implementation for actual databases used as the backend for `cuprate-database`.
Each backend has its own folder.
| Folder | Purpose |
|--------------|---------|
| `heed/` | Backend using using forked [`heed`](https://github.com/Cuprate/heed)
| `sanakirja/` | Backend using [`sanakirja`](https://docs.rs/sanakirja)
| Folder/File | Purpose |
|-------------|---------|
| `heed/` | Backend using using [`heed`](https://github.com/meilisearch/heed) (LMDB)
| `redb/` | Backend using [`redb`](https://github.com/cberner/redb)
| `tests.rs` | Backend-agnostic tests
All backends follow the same file structure:
@ -128,19 +118,53 @@ All backends follow the same file structure:
| `env.rs` | Implementation of `trait Env`
| `error.rs` | Implementation of backend's errors to `cuprate_database`'s error types
| `storable.rs` | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization
| `tests.rs` | Tests for the specific backend
| `transaction.rs` | Implementation of `trait TxR{o,w}`
| `types.rs` | Type aliases for long backend-specific types
# Backends
`cuprate-database`'s `trait`s abstract over various actual databases.
### 2.3 `src/config/`
This folder contains the `cupate_database::config` module; configuration options for the database.
Each database's implementation is located in its respective file in `src/backend/${DATABASE_NAME}.rs`.
| File | Purpose |
|---------------------|---------|
| `config.rs` | Main database `Config` struct
| `reader_threads.rs` | Reader thread configuration for `service` thread-pool
| `sync_mode.rs` | Disk sync configuration for backends
## `heed`
The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB).
### 2.4 `src/ops/`
This folder contains the `cupate_database::ops` module.
`LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically.
These are higher-level functions abstracted over the database, that are Monero-related.
| File | Purpose |
|-----------------|---------|
| `block.rs` | Block related (main functions)
| `blockchain.rs` | Blockchain related (height, cumulative values, etc)
| `key_image.rs` | Key image related
| `macros.rs` | Macros specific to `ops/`
| `output.rs` | Output related
| `property.rs` | Database properties (pruned, version, etc)
| `tx.rs` | Transaction related
### 2.5 `src/service/`
This folder contains the `cupate_database::service` module.
The `async`hronous request/response API other Cuprate crates use instead of managing the database directly themselves.
| File | Purpose |
|----------------|---------|
| `free.rs` | General free functions used (related to `cuprate_database::service`)
| `read.rs` | Read thread-pool definitions and logic
| `tests.rs` | Thread-pool tests and test helper functions
| `types.rs` | `cuprate_database::service`-related type aliases
| `write.rs` | Writer thread definitions and logic
## 3. Backends
`cuprate-database`'s `trait`s allow abstracting over the actual database, such that any backend in particular could be used.
Each database's implementation for those `trait`'s are located in its respective folder in `src/backend/${DATABASE_NAME}/`.
### 3.1 heed
The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB). The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used. `LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically.
`heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are:
@ -149,11 +173,11 @@ The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB
| `data.mdb` | Main data file
| `lock.mdb` | Database lock file
TODO: document max readers limit: https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372. Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for.
`heed`-specific notes:
- [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for
- [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129)
TODO: document DB on remote filesystem: https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129.
## `redb`
### 3.2 redb
The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb).
The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used.
@ -164,45 +188,411 @@ The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used
|-------------|---------|
| `data.redb` | Main data file
TODO: document DB on remote filesystem (does redb allow this?)
<!-- TODO: document DB on remote filesystem (does redb allow this?) -->
## `redb-memory`
This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a key-value store that completely resides in memory instead of a file.
### 3.3 redb-memory
This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a database that completely resides in memory instead of a file.
All other details about this should be the same as the normal `redb` backend.
## `sanakirja`
### 3.4 sanakirja
[`sanakirja`](https://docs.rs/sanakirja) was a candidate as a backend, however there were problems with maximum value sizes.
The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.Storable.html) which was too small for our requirements. Using [`sanakirja::Slice`](https://docs.rs/sanakirja/1.4.1/sanakirja/union.Slice.html) and [sanakirja::UnsizedStorage](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.UnsizedStorable.html) was attempted, but there were bugs found when inserting a value in-between `512..=4096` bytes.
As such, it is not implemented.
## `MDBX`
[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement duplicate tables. It is also quite similar to the main backend LMDB (of which it was originally a fork of).
### 3.5 MDBX
[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement [`9.2 Multimap tables`](#92-multimap-tables). It is also quite similar to the main backend LMDB (of which it was originally a fork of).
As such, it is not implemented (yet).
# Layers
TODO: update with accurate information when ready, update image.
## 4. Layers
`cuprate_database` is logically abstracted into 5 layers, with each layer being built upon the last.
## Database
## Trait
## ConcreteEnv
## Thread
## Service
Starting from the lowest:
1. Backend
2. Trait
3. ConcreteEnv
4. `ops`
5. `service`
# Resizing
TODO: document resize algorithm:
- Exactly when it occurs
- How much bytes are added
<!-- TODO: insert image here after database/ split -->
All backends follow the same algorithm.
### 4.1 Backend
This is the actual database backend implementation (or a Rust shim over one).
# Flushing
TODO: document disk flushing behavior.
- Config options
- Backend-specific behavior
Examples:
- `heed` (LMDB)
- `redb`
# (De)serialization
TODO: document `Storable` and how databases (de)serialize types when storing/fetching.
`cuprate_database` itself just uses a backend, it does not implement one.
All backends have the following attributes:
- [Embedded](https://en.wikipedia.org/wiki/Embedded_database)
- [Multiversion concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control)
- [ACID](https://en.wikipedia.org/wiki/ACID)
- Are `(key, value)` oriented and have the expected API (`get()`, `insert()`, `delete()`)
- Are table oriented (`"table_name" -> (key, value)`)
- Allows concurrent readers
### 4.2 Trait
`cuprate_database` provides a set of `trait`s that abstract over the various database backends.
This allows the function signatures and behavior to stay the same but allows for swapping out databases in an easier fashion.
All common behavior of the backend's are encapsulated here and used instead of using the backend directly.
Examples:
- [`trait Env`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/env.rs)
- [`trait {TxRo, TxRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/transaction.rs)
- [`trait {DatabaseRo, DatabaseRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/database.rs)
For example, instead of calling `LMDB` or `redb`'s `get()` function directly, `DatabaseRo::get()` is called.
### 4.3 ConcreteEnv
This is the non-generic, concrete `struct` provided by `cuprate_database` that contains all the data necessary to operate the database. The actual database backend `ConcreteEnv` will use internally depends on which backend feature is used.
`ConcreteEnv` implements `trait Env`, which opens the door to all the other traits.
The equivalent objects in the backends themselves are:
- [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html)
- [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html)
This is the main object used when handling the database directly, although that is not strictly necessary as a user if the [`4.5 service`](#45-service) layer is used.
### 4.4 ops
These are Monero-specific functions that use the abstracted `trait` forms of the database.
Instead of dealing with the database directly:
- `get()`
- `delete()`
the `ops` layer provides more abstract functions that deal with commonly used Monero operations:
- `add_block()`
- `pop_block()`
### 4.5 service
The final layer abstracts the database completely into a [Monero-specific `async` request/response API](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/types/src/service.rs#L18-L78) using [`tower::Service`](https://docs.rs/tower/latest/tower/trait.Service.html).
For more information on this layer, see the next section: [`5. The service`](#5-the-service).
## 5. The service
The main API `cuprate_database` exposes for other crates to use is the `cuprate_database::service` module.
This module exposes an `async` request/response API with `tower::Service`, backed by a threadpool, that allows reading/writing Monero-related data from/to the database.
`cuprate_database::service` itself manages the database using a separate writer thread & reader thread-pool, and uses the previously mentioned [`4.4 ops`](#44-ops) functions when responding to requests.
### 5.1 Initialization
The service is started simply by calling: [`cuprate_database::service::init()`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/service/free.rs#L23).
This function initializes the database, spawns threads, and returns a:
- Read handle to the database (cloneable)
- Write handle to the database (not cloneable)
These "handles" implement the `tower::Service` trait, which allows sending requests and receiving responses `async`hronously.
### 5.2 Requests
Along with the 2 handles, there are 2 types of requests:
- [`ReadRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L23-L90)
- [`WriteRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L93-L105)
`ReadRequest` is for retrieving various types of information from the database.
`WriteRequest` currently only has 1 variant: to write a block to the database.
### 5.3 Responses
After sending one of the above requests using the read/write handle, the value returned is _not_ the response, yet an `async`hronous channel that will eventually return the response:
```rust,ignore
// Send a request.
// tower::Service::call()
// V
let response_channel: Channel = read_handle.call(ReadResponse::ChainHeight)?;
// Await the response.
let response: ReadResponse = response_channel.await?;
// Assert the response is what we expected.
assert_eq!(matches!(response), Response::ChainHeight(_));
```
After `await`ing the returned channel, a `Response` will eventually be returned when the `service` threadpool has fetched the value from the database and sent it off.
Both read/write requests variants match in name with `Response` variants, i.e.
- `ReadRequest::ChainHeight` leads to `Response::ChainHeight`
- `WriteRequest::WriteBlock` leads to `Response::WriteBlockOk`
### 5.4 Thread model
As mentioned in the [`4. Layers`](#4-layers) section, the base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread.
However, the `cuprate_database::service` API, _does_ have a thread model backing it.
When [`cuprate_database::service`'s initialization function](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33-L44) is called, threads will be spawned and maintained until the user drops (disconnects) the returned handles.
The current behavior for thread count is:
- [1 writer thread](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/write.rs#L52-L66)
- [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L104-L126)
For example, on a system with 32-threads, `cuprate_database` will spawn:
- 1 writer thread
- 32 reader threads
whose sole responsibility is to listen for database requests, access the database (potentially in parallel), and return a response.
Note that the `1 system thread = 1 reader thread` model is only the default setting, the reader thread count can be configured by the user to be any number between `1 .. amount_of_system_threads`.
The reader threads are managed by [`rayon`](https://docs.rs/rayon).
For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_database` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L490-L503).
### 5.5 Shutdown
Once the read/write handles are `Drop`ed, the backing thread(pool) will gracefully exit, automatically.
Note the writer thread and reader threadpool aren't connected whatsoever; dropping the write handle will make the writer thread exit, however, the reader handle is free to be held onto and can be continued to be read from - and vice-versa for the write handle.
## 6. Syncing
`cuprate_database`'s database has 5 disk syncing modes.
1. FastThenSafe
1. Safe
1. Async
1. Threshold
1. Fast
The default mode is `Safe`.
This means that upon each transaction commit, all the data that was written will be fully synced to disk. This is the slowest, but safest mode of operation.
Note that upon any database `Drop`, whether via `service` or dropping the database directly, the current implementation will sync to disk regardless of any configuration.
For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144).
## 7. Resizing
Database backends that require manually resizing will, by default, use a similar algorithm as `monerod`'s.
Note that this only relates to the `service` module, where the database is handled by `cuprate_database` itself, not the user. In the case of a user directly using `cuprate_database`, it is up to them on how to resize.
Within `service`, the resizing logic defined [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/service/write.rs#L139-L201) does the following:
- If there's not enough space to fit a write request's data, start a resize
- Each resize adds around [`1_073_745_920`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) bytes to the current map size
- A resize will be attempted `3` times before failing
There are other [resizing algorithms](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L38-L47) that define how the database's memory map grows, although currently the behavior of [`monerod`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) is closely followed.
## 8. (De)serialization
All types stored inside the database are either bytes already, or are perfectly bitcast-able.
As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions.
The size & layout of types is stable across compiler versions, as they are set and determined with [`#[repr(C)]`](https://doc.rust-lang.org/nomicon/other-reprs.html#reprc) and `bytemuck`'s derive macros such as [`bytemuck::Pod`](https://docs.rs/bytemuck/latest/bytemuck/derive.Pod.html).
Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type.
The main deserialization `trait` for database storage is: [`cuprate_database::Storable`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L16-L115).
- Before storage, the type is [simply cast into bytes](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L125)
- When fetching, the bytes are [simply cast into the type](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L130)
When a type is casted into bytes, [the reference is casted](https://docs.rs/bytemuck/latest/bytemuck/fn.bytes_of.html), i.e. this is zero-cost serialization.
However, it is worth noting that when bytes are casted into the type, [it is copied](https://docs.rs/bytemuck/latest/bytemuck/fn.pod_read_unaligned.html). This is due to byte alignment guarantee issues with both backends, see:
- https://github.com/AltSysrq/lmdb-zero/issues/8
- https://github.com/cberner/redb/issues/360
Without this, `bytemuck` will panic with [`TargetAlignmentGreaterAndInputNotAligned`](https://docs.rs/bytemuck/latest/bytemuck/enum.PodCastError.html#variant.TargetAlignmentGreaterAndInputNotAligned) when casting.
Copying the bytes fixes this problem, although it is more costly than necessary. However, in the main use-case for `cuprate_database` (the `service` module) the bytes would need to be owned regardless as the `Request/Response` API uses owned data types (`T`, `Vec<T>`, `HashMap<K, V>`, etc).
Practically speaking, this means lower-level database functions that normally look like such:
```rust
fn get(key: &Key) -> &Value;
```
end up looking like this in `cuprate_database`:
```rust
fn get(key: &Key) -> Value;
```
Since each backend has its own (de)serialization methods, our types are wrapped in compatibility types that map our `Storable` functions into whatever is required for the backend, e.g:
- [`StorableHeed<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/heed/storable.rs#L11-L45)
- [`StorableRedb<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/redb/storable.rs#L11-L30)
Compatibility structs also exist for any `Storable` containers:
- [`StorableVec<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L135-L191)
- [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241)
Again, it's unfortunate that these must be owned, although in `service`'s use-case, they would have to be owned anyway.
## 9. Schema
This following section contains Cuprate's database schema, it may change throughout the development of Cuprate, as such, nothing here is final.
### 9.1 Tables
The `CamelCase` names of the table headers documented here (e.g. `TxIds`) are the actual type name of the table within `cuprate_database`.
Note that words written within `code blocks` mean that it is a real type defined and usable within `cuprate_database`. Other standard types like u64 and type aliases (TxId) are written normally.
Within `cuprate_database::tables`, the below table is essentially defined as-is with [a macro](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/tables.rs#L369-L470).
Many of the data types stored are the same data types, although are different semantically, as such, a map of aliases used and their real data types is also provided below.
| Alias | Real Type |
|----------------------------------------------------|-----------|
| BlockHeight, Amount, AmountIndex, TxId, UnlockTime | u64
| BlockHash, KeyImage, TxHash, PrunableHash | [u8; 32]
| Table | Key | Value | Description |
|-------------------|----------------------|--------------------|-------------|
| `BlockBlobs` | BlockHeight | `StorableVec<u8>` | Maps a block's height to a serialized byte form of a block
| `BlockHeights` | BlockHash | BlockHeight | Maps a block's hash to its height
| `BlockInfos` | BlockHeight | `BlockInfo` | Contains metadata of all blocks
| `KeyImages` | KeyImage | () | This table is a set with no value, it stores transaction key images
| `NumOutputs` | Amount | u64 | Maps an output's amount to the number of outputs with that amount
| `Outputs` | `PreRctOutputId` | `Output` | This table contains legacy CryptoNote outputs which have clear amounts. This table will not contain an output with 0 amount.
| `PrunedTxBlobs` | TxId | `StorableVec<u8>` | Contains pruned transaction blobs (even if the database is not pruned)
| `PrunableTxBlobs` | TxId | `StorableVec<u8>` | Contains the prunable part of a transaction
| `PrunableHashes` | TxId | PrunableHash | Contains the hash of the prunable part of a transaction
| `RctOutputs` | AmountIndex | `RctOutput` | Contains RingCT outputs mapped from their global RCT index
| `TxBlobs` | TxId | `StorableVec<u8>` | Serialized transaction blobs (bytes)
| `TxIds` | TxHash | TxId | Maps a transaction's hash to its index/ID
| `TxHeights` | TxId | BlockHeight | Maps a transaction's ID to the height of the block it comes from
| `TxOutputs` | TxId | `StorableVec<u64>` | Gives the amount indices of a transaction's outputs
| `TxUnlockTime` | TxId | UnlockTime | Stores the unlock time of a transaction (only if it has a non-zero lock time)
The definitions for aliases and types (e.g. `RctOutput`) are within the [`cuprate_database::types`](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/types.rs#L51) module.
<!-- TODO(Boog900): We could split this table again into `RingCT (non-miner) Outputs` and `RingCT (miner) Outputs` as for miner outputs we can store the amount instead of commitment saving 24 bytes per miner output. -->
### 9.2 Multimap tables
When referencing outputs, Monero will [use the amount and the amount index](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L3447-L3449). This means 2 keys are needed to reach an output.
With LMDB you can set the `DUP_SORT` flag on a table and then set the key/value to:
```rust
Key = KEY_PART_1
```
```rust
Value = {
KEY_PART_2,
VALUE // The actual value we are storing.
}
```
Then you can set a custom value sorting function that only takes `KEY_PART_2` into account; this is how `monerod` does it.
This requires that the underlying database supports:
- multimap tables
- custom sort functions on values
- setting a cursor on a specific key/value
---
Another way to implement this is as follows:
```rust
Key = { KEY_PART_1, KEY_PART_2 }
```
```rust
Value = VALUE
```
Then the key type is simply used to look up the value; this is how `cuprate_database` does it.
For example, the key/value pair for outputs is:
```rust
PreRctOutputId => Output
```
where `PreRctOutputId` looks like this:
```rust
struct PreRctOutputId {
amount: u64,
amount_index: u64,
}
```
## 10. Known issues and tradeoffs
`cuprate_database` takes many tradeoffs, whether due to:
- Prioritizing certain values over others
- Not having a better solution
- Being "good enough"
This is a list of the larger ones, along with issues that don't have answers yet.
### 10.1 Traits abstracting backends
Although all database backends used are very similar, they have some crucial differences in small implementation details that must be worked around when conforming them to `cuprate_database`'s traits.
Put simply: using `cuprate_database`'s traits is less efficient and more awkward than using the backend directly.
For example:
- [Data types must be wrapped in compatibility layers when they otherwise wouldn't be](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/backend/heed/env.rs#L101-L116)
- [There are types that only apply to a specific backend, but are visible to all](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/error.rs#L86-L89)
- [There are extra layers of abstraction to smoothen the differences between all backends](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/env.rs#L62-L68)
- [Existing functionality of backends must be taken away, as it isn't supported in the others](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/database.rs#L27-L34)
This is a _tradeoff_ that `cuprate_database` takes, as:
- The backend itself is usually not the source of bottlenecks in the greater system, as such, small inefficiencies are OK
- None of the lost functionality is crucial for operation
- The ability to use, test, and swap between multiple database backends is [worth it](https://github.com/Cuprate/cuprate/pull/35#issuecomment-1952804393)
### 10.2 Hot-swappable backends
Using a different backend is really as simple as re-building `cuprate_database` with a different feature flag:
```bash
# Use LMDB.
cargo build --package cuprate-database --features heed
# Use redb.
cargo build --package cuprate-database --features redb
```
This is "good enough" for now, however ideally, this hot-swapping of backends would be able to be done at _runtime_.
As it is now, `cuprate_database` cannot compile both backends and swap based on user input at runtime; it must be compiled with a certain backend, which will produce a binary with only that backend.
This also means things like [CI testing multiple backends is awkward](https://github.com/Cuprate/cuprate/blob/main/.github/workflows/ci.yml#L132-L136), as we must re-compile with different feature flags instead.
### 10.3 Copying unaligned bytes
As mentioned in [`8. (De)serialization`](#8-deserialization), bytes are _copied_ when they are turned into a type `T` due to unaligned bytes being returned from database backends.
Using a regular reference cast results in an improperly aligned type `T`; [such a type even existing causes undefined behavior](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). In our case, `bytemuck` saves us by panicking before this occurs.
Thus, when using `cuprate_database`'s database traits, an _owned_ `T` is returned.
This is doubly unfortunately for `&[u8]` as this does not even need deserialization.
For example, `StorableVec` could have been this:
```rust
enum StorableBytes<'a, T: Storable> {
Owned(T),
Ref(&'a T),
}
```
but this would require supporting types that must be copied regardless with the occasional `&[u8]` that can be returned without casting. This was hard to do so in a generic way, thus all `[u8]`'s are copied and returned as owned `StorableVec`s.
This is a _tradeoff_ `cuprate_database` takes as:
- `bytemuck::pod_read_unaligned` is cheap enough
- The main API, `service`, needs to return owned value anyway
- Having no references removes a lot of lifetime complexity
The alternative is either:
- Using proper (de)serialization instead of casting (which comes with its own costs)
- Somehow fixing the alignment issues in the backends mentioned previously
### 10.4 Endianness
`cuprate_database`'s (de)serialization and storage of bytes are native-endian, as in, byte storage order will depend on the machine it is running on.
As Cuprate's build-targets are all little-endian ([big-endian by default machines barely exist](https://en.wikipedia.org/wiki/Endianness#Hardware)), this doesn't matter much and the byte ordering can be seen as a constant.
Practically, this means `cuprated`'s database files can be transferred across computers, as can `monerod`'s.
### 10.5 Extra table data
Some of `cuprate_database`'s tables differ from `monerod`'s tables, for example, the way [`9.2 Multimap tables`](#92-multimap-tables) tables are done requires that the primary key is stored _for all_ entries, compared to `monerod` only needing to store it once.
For example:
```rust
// `monerod` only stores `amount: 1` once,
// `cuprated` stores it each time it appears.
struct PreRctOutputId { amount: 1, amount_index: 0 }
struct PreRctOutputId { amount: 1, amount_index: 1 }
```
This means `cuprated`'s database will be slightly larger than `monerod`'s.
The current method `cuprate_database` uses will be "good enough" until usage shows that it must be optimized as multimap tables are tricky to implement across all backends.

View file

@ -1,16 +1,10 @@
//! Implementation of `trait Database` for `heed`.
//---------------------------------------------------------------------------------------------------- Import
use std::{
borrow::{Borrow, Cow},
cell::RefCell,
fmt::Debug,
ops::RangeBounds,
sync::RwLockReadGuard,
};
use std::{cell::RefCell, ops::RangeBounds};
use crate::{
backend::heed::{storable::StorableHeed, types::HeedDb},
backend::heed::types::HeedDb,
database::{DatabaseIter, DatabaseRo, DatabaseRw},
error::RuntimeError,
table::Table,
@ -137,7 +131,8 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
}
//---------------------------------------------------------------------------------------------------- DatabaseRo Impl
impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
// SAFETY: `HeedTableRo: !Send` as it holds a reference to `heed::RoTxn: Send + !Sync`.
unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
#[inline]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
get::<T>(&self.db, self.tx_ro, key)
@ -165,7 +160,9 @@ impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
}
//---------------------------------------------------------------------------------------------------- DatabaseRw Impl
impl<T: Table> DatabaseRo<T> for HeedTableRw<'_, '_, T> {
// SAFETY: The `Send` bound only applies to `HeedTableRo`.
// `HeedTableRw`'s write transaction is `!Send`.
unsafe impl<T: Table> DatabaseRo<T> for HeedTableRw<'_, '_, T> {
#[inline]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
get::<T>(&self.db, &self.tx_rw.borrow(), key)
@ -204,55 +201,56 @@ impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
Ok(())
}
#[inline]
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> {
// LMDB/heed does not return the value on deletion.
// So, fetch it first - then delete.
let value = get::<T>(&self.db, &self.tx_rw.borrow(), key)?;
match self.db.delete(&mut self.tx_rw.borrow_mut(), key) {
Ok(true) => Ok(value),
Err(e) => Err(e.into()),
// We just `get()`'ed the value - it is
// incorrect for it to suddenly not exist.
Ok(false) => unreachable!(),
}
}
#[inline]
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
let tx_rw = &mut self.tx_rw.borrow_mut();
// Get the first value first...
let Some(first) = self.db.first(tx_rw)? else {
// Get the value first...
let Some((key, value)) = self.db.first(tx_rw)? else {
return Err(RuntimeError::KeyNotFound);
};
// ...then remove it.
//
// We use an iterator because we want to semantically
// remove the _first_ and only the first `(key, value)`.
// `delete()` removes all keys including duplicates which
// is slightly different behavior.
let mut iter = self.db.iter_mut(tx_rw)?;
// SAFETY:
// It is undefined behavior to keep a reference of
// a value from this database while modifying it.
// We are deleting the value and never accessing
// the iterator again so this should be safe.
unsafe {
iter.del_current()?;
match self.db.delete(tx_rw, &key) {
Ok(true) => Ok((key, value)),
Err(e) => Err(e.into()),
// We just `get()`'ed the value - it is
// incorrect for it to suddenly not exist.
Ok(false) => unreachable!(),
}
Ok(first)
}
#[inline]
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
let tx_rw = &mut self.tx_rw.borrow_mut();
let Some(first) = self.db.last(tx_rw)? else {
// Get the value first...
let Some((key, value)) = self.db.last(tx_rw)? else {
return Err(RuntimeError::KeyNotFound);
};
let mut iter = self.db.rev_iter_mut(tx_rw)?;
// SAFETY:
// It is undefined behavior to keep a reference of
// a value from this database while modifying it.
// We are deleting the value and never accessing
// the iterator again so this should be safe.
unsafe {
iter.del_current()?;
// ...then remove it.
match self.db.delete(tx_rw, &key) {
Ok(true) => Ok((key, value)),
Err(e) => Err(e.into()),
// We just `get()`'ed the value - it is
// incorrect for it to suddenly not exist.
Ok(false) => unreachable!(),
}
Ok(first)
}
}

View file

@ -3,9 +3,8 @@
//---------------------------------------------------------------------------------------------------- Import
use std::{
cell::RefCell,
fmt::Debug,
ops::Deref,
sync::{RwLock, RwLockReadGuard, RwLockWriteGuard},
num::NonZeroUsize,
sync::{RwLock, RwLockReadGuard},
};
use heed::{DatabaseOpenOptions, EnvFlags, EnvOpenOptions};
@ -22,10 +21,11 @@ use crate::{
error::{InitError, RuntimeError},
resize::ResizeAlgorithm,
table::Table,
tables::call_fn_on_all_tables_or_early_return,
};
//---------------------------------------------------------------------------------------------------- Consts
/// TODO
/// Panic message when there's a table missing.
const PANIC_MSG_MISSING_TABLE: &str =
"cuprate_database::Env should uphold the invariant that all tables are already created";
@ -48,7 +48,7 @@ pub struct ConcreteEnv {
/// `reader_count` would be spinned on until 0, at which point
/// we are safe to resize.
///
/// Although, 3 atomic operations (check atomic bool, reader_count++, reader_count--)
/// Although, 3 atomic operations (check atomic bool, `reader_count++`, `reader_count--`)
/// turns out to be roughly as expensive as acquiring a non-contended `RwLock`,
/// the CPU sleeping instead of spinning is much better too.
///
@ -67,7 +67,7 @@ impl Drop for ConcreteEnv {
fn drop(&mut self) {
// INVARIANT: drop(ConcreteEnv) must sync.
//
// TODO:
// SOMEDAY:
// "if the environment has the MDB_NOSYNC flag set the flushes will be omitted,
// and with MDB_MAPASYNC they will be asynchronous."
// <http://www.lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>
@ -75,7 +75,7 @@ impl Drop for ConcreteEnv {
// We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)`
// to clear the no sync and async flags such that the below `self.sync()`
// _actually_ synchronously syncs.
if let Err(e) = crate::Env::sync(self) {
if let Err(_e) = crate::Env::sync(self) {
// TODO: log error?
}
@ -117,10 +117,11 @@ impl Env for ConcreteEnv {
#[cold]
#[inline(never)] // called once.
#[allow(clippy::items_after_statements)]
fn open(config: Config) -> Result<Self, InitError> {
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
let mut env_open_options = EnvOpenOptions::new();
// Map our `Config` sync mode to the LMDB environment flags.
//
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
@ -128,11 +129,21 @@ impl Env for ConcreteEnv {
SyncMode::Safe => EnvFlags::empty(),
SyncMode::Async => EnvFlags::MAP_ASYNC,
SyncMode::Fast => EnvFlags::NO_SYNC | EnvFlags::WRITE_MAP | EnvFlags::MAP_ASYNC,
// TODO: dynamic syncs are not implemented.
// SOMEDAY: dynamic syncs are not implemented.
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
};
let mut env_open_options = EnvOpenOptions::new();
// SAFETY: the flags we're setting are 'unsafe'
// from a data durability perspective, although,
// the user config wanted this.
//
// MAYBE: We may need to open/create tables with certain flags
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
// MAYBE: Set comparison functions for certain tables
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
unsafe {
env_open_options.flags(flags);
}
// Set the memory map size to
// (current disk size) + (a bit of leeway)
@ -151,7 +162,7 @@ impl Env for ConcreteEnv {
// Set the max amount of database tables.
// We know at compile time how many tables there are.
// TODO: ...how many?
// SOMEDAY: ...how many?
env_open_options.max_dbs(32);
// LMDB documentation:
@ -166,38 +177,33 @@ impl Env for ConcreteEnv {
// - Use at least 126 reader threads
// - Add 16 extra reader threads if <126
//
// TODO: This behavior is from `monerod`:
// FIXME: This behavior is from `monerod`:
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
// I believe this could be adjusted percentage-wise so very high
// thread PCs can benefit from something like (cuprated + anything that uses the DB in the future).
// For now:
// - No other program using our DB exists
// - Almost no-one has a 126+ thread CPU
#[allow(clippy::cast_possible_truncation)] // no-one has `u32::MAX`+ threads
let reader_threads = config.reader_threads.as_threads().get() as u32;
let reader_threads =
u32::try_from(config.reader_threads.as_threads().get()).unwrap_or(u32::MAX);
env_open_options.max_readers(if reader_threads < 110 {
126
} else {
reader_threads + 16
reader_threads.saturating_add(16)
});
// Create the database directory if it doesn't exist.
std::fs::create_dir_all(config.db_directory())?;
// Open the environment in the user's PATH.
let env = env_open_options.open(config.db_directory())?;
// TODO: Open/create tables with certain flags
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
// `heed` creates the database if it didn't exist.
// <https://docs.rs/heed/0.20.0-alpha.9/src/heed/env.rs.html#223-229>
// SAFETY: LMDB uses a memory-map backed file.
// <https://docs.rs/heed/0.20.0/heed/struct.EnvOpenOptions.html#method.open>
let env = unsafe { env_open_options.open(config.db_directory())? };
/// Function that creates the tables based off the passed `T: Table`.
fn create_table<T: Table>(
env: &heed::Env,
tx_rw: &mut heed::RwTxn<'_>,
) -> Result<(), InitError> {
println!("create_table(): {}", T::NAME); // TODO: use tracing.
DatabaseOpenOptions::new(env)
.name(<T as Table>::NAME)
.types::<StorableHeed<<T as Table>::Key>, StorableHeed<<T as Table>::Value>>()
@ -205,31 +211,17 @@ impl Env for ConcreteEnv {
Ok(())
}
use crate::tables::{
BlockBlobs, BlockHeights, BlockInfoV1s, BlockInfoV2s, BlockInfoV3s, KeyImages,
NumOutputs, Outputs, PrunableHashes, PrunableTxBlobs, PrunedTxBlobs, RctOutputs,
TxHeights, TxIds, TxUnlockTime,
};
let mut tx_rw = env.write_txn()?;
create_table::<BlockBlobs>(&env, &mut tx_rw)?;
create_table::<BlockHeights>(&env, &mut tx_rw)?;
create_table::<BlockInfoV1s>(&env, &mut tx_rw)?;
create_table::<BlockInfoV2s>(&env, &mut tx_rw)?;
create_table::<BlockInfoV3s>(&env, &mut tx_rw)?;
create_table::<KeyImages>(&env, &mut tx_rw)?;
create_table::<NumOutputs>(&env, &mut tx_rw)?;
create_table::<Outputs>(&env, &mut tx_rw)?;
create_table::<PrunableHashes>(&env, &mut tx_rw)?;
create_table::<PrunableTxBlobs>(&env, &mut tx_rw)?;
create_table::<PrunedTxBlobs>(&env, &mut tx_rw)?;
create_table::<RctOutputs>(&env, &mut tx_rw)?;
create_table::<TxHeights>(&env, &mut tx_rw)?;
create_table::<TxIds>(&env, &mut tx_rw)?;
create_table::<TxUnlockTime>(&env, &mut tx_rw)?;
// TODO: Set dupsort and comparison functions for certain tables
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
// Create all tables.
// FIXME: this macro is kinda awkward.
{
let env = &env;
let tx_rw = &mut tx_rw;
match call_fn_on_all_tables_or_early_return!(create_table(env, tx_rw)) {
Ok(_) => (),
Err(e) => return Err(e),
}
}
// INVARIANT: this should never return `ResizeNeeded` due to adding
// some tables since we added some leeway to the memory map above.
@ -249,11 +241,11 @@ impl Env for ConcreteEnv {
Ok(self.env.read().unwrap().force_sync()?)
}
fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) {
fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) -> NonZeroUsize {
let resize_algorithm = resize_algorithm.unwrap_or_else(|| self.config().resize_algorithm);
let current_size_bytes = self.current_map_size();
let new_size_bytes = resize_algorithm.resize(current_size_bytes).get();
let new_size_bytes = resize_algorithm.resize(current_size_bytes);
// SAFETY:
// Resizing requires that we have
@ -264,8 +256,14 @@ impl Env for ConcreteEnv {
// <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
unsafe {
// INVARIANT: `resize()` returns a valid `usize` to resize to.
self.env.write().unwrap().resize(new_size_bytes).unwrap();
self.env
.write()
.unwrap()
.resize(new_size_bytes.get())
.unwrap();
}
new_size_bytes
}
#[inline]

View file

@ -20,7 +20,6 @@ impl From<heed::Error> for crate::InitError {
E1::Mdb(mdb_error) => match mdb_error {
E2::Invalid => Self::Invalid,
E2::VersionMismatch => Self::InvalidVersion,
E2::Other(c_int) => Self::Unknown(Box::new(mdb_error)),
// "Located page was wrong type".
// <https://docs.rs/heed/latest/heed/enum.MdbError.html#variant.Corrupted>
@ -31,6 +30,7 @@ impl From<heed::Error> for crate::InitError {
// These errors shouldn't be returned on database init.
E2::Incompatible
| E2::Other(_)
| E2::BadTxn
| E2::Problem
| E2::KeyExist
@ -49,10 +49,9 @@ impl From<heed::Error> for crate::InitError {
| E2::Panic => Self::Unknown(Box::new(mdb_error)),
},
E1::InvalidDatabaseTyping
| E1::BadOpenOptions { .. }
| E1::Encoding(_)
| E1::Decoding(_) => Self::Unknown(Box::new(error)),
E1::BadOpenOptions { .. } | E1::Encoding(_) | E1::Decoding(_) => {
Self::Unknown(Box::new(error))
}
}
}
}
@ -109,7 +108,7 @@ impl From<heed::Error> for crate::RuntimeError {
// occurring indicates we did _not_ do that, which is a bug
// and we should panic.
//
// TODO: This can also mean _another_ process wrote to our
// FIXME: This can also mean _another_ process wrote to our
// LMDB file and increased the size. I don't think we need to accommodate for this.
// <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
// Although `monerod` reacts to that instead of `MDB_MAP_FULL`
@ -139,11 +138,9 @@ impl From<heed::Error> for crate::RuntimeError {
},
// Only if we write incorrect code.
E1::InvalidDatabaseTyping
| E1::DatabaseClosing
| E1::BadOpenOptions { .. }
| E1::Encoding(_)
| E1::Decoding(_) => panic!("fix the database code! {error:#?}"),
E1::DatabaseClosing | E1::BadOpenOptions { .. } | E1::Encoding(_) | E1::Decoding(_) => {
panic!("fix the database code! {error:#?}")
}
}
}
}

View file

@ -1,11 +1,11 @@
//! `cuprate_database::Storable` <-> `heed` serde trait compatibility layer.
//---------------------------------------------------------------------------------------------------- Use
use std::{borrow::Cow, fmt::Debug, marker::PhantomData};
use std::{borrow::Cow, marker::PhantomData};
use heed::{types::Bytes, BoxedError, BytesDecode, BytesEncode, Database};
use heed::{BoxedError, BytesDecode, BytesEncode};
use crate::{storable::Storable, storable::StorableVec};
use crate::storable::Storable;
//---------------------------------------------------------------------------------------------------- StorableHeed
/// The glue struct that implements `heed`'s (de)serialization
@ -47,6 +47,8 @@ where
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
use std::fmt::Debug;
use super::*;
use crate::{StorableBytes, StorableVec};

View file

@ -1,6 +1,6 @@
//! Implementation of `trait TxRo/TxRw` for `heed`.
use std::{cell::RefCell, ops::Deref, sync::RwLockReadGuard};
use std::cell::RefCell;
//---------------------------------------------------------------------------------------------------- Import
use crate::{

View file

@ -1,13 +1,4 @@
//! Database backends.
//!
//! TODO:
//! Create a test backend backed by `std::collections::HashMap`.
//!
//! The full type could be something like `HashMap<&'static str, HashMap<K, V>>`.
//! where the `str` is the table name, and the containing hashmap are are the
//! key and values.
//!
//! Not sure how duplicate keys will work.
cfg_if::cfg_if! {
// If both backends are enabled, fallback to `heed`.

View file

@ -1,12 +1,7 @@
//! Implementation of `trait DatabaseR{o,w}` for `redb`.
//---------------------------------------------------------------------------------------------------- Import
use std::{
borrow::{Borrow, Cow},
fmt::Debug,
marker::PhantomData,
ops::{Bound, Deref, RangeBounds},
};
use std::ops::RangeBounds;
use redb::ReadableTable;
@ -17,7 +12,6 @@ use crate::{
},
database::{DatabaseIter, DatabaseRo, DatabaseRw},
error::RuntimeError,
storable::Storable,
table::Table,
};
@ -118,7 +112,8 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
}
//---------------------------------------------------------------------------------------------------- DatabaseRo
impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
// SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
#[inline]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
get::<T>(self, key)
@ -146,7 +141,8 @@ impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
}
//---------------------------------------------------------------------------------------------------- DatabaseRw
impl<T: Table + 'static> DatabaseRo<T> for RedbTableRw<'_, T::Key, T::Value> {
// SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRw<'_, T::Key, T::Value> {
#[inline]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
get::<T>(self, key)
@ -188,6 +184,15 @@ impl<T: Table + 'static> DatabaseRw<T> for RedbTableRw<'_, T::Key, T::Value> {
Ok(())
}
#[inline]
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> {
if let Some(value) = redb::Table::remove(self, key)? {
Ok(value.value())
} else {
Err(RuntimeError::KeyNotFound)
}
}
#[inline]
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
let (key, value) = redb::Table::pop_first(self)?.ok_or(RuntimeError::KeyNotFound)?;

View file

@ -1,18 +1,14 @@
//! Implementation of `trait Env` for `redb`.
//---------------------------------------------------------------------------------------------------- Import
use std::{fmt::Debug, ops::Deref, path::Path, sync::Arc};
use crate::{
backend::redb::{
storable::StorableRedb,
types::{RedbTableRo, RedbTableRw},
},
backend::redb::storable::StorableRedb,
config::{Config, SyncMode},
database::{DatabaseIter, DatabaseRo, DatabaseRw},
env::{Env, EnvInner},
error::{InitError, RuntimeError},
table::Table,
tables::call_fn_on_all_tables_or_early_return,
TxRw,
};
@ -36,7 +32,8 @@ impl Drop for ConcreteEnv {
fn drop(&mut self) {
// INVARIANT: drop(ConcreteEnv) must sync.
if let Err(e) = self.sync() {
// TODO: log error?
// TODO: use tracing
println!("{e:#?}");
}
// TODO: log that we are dropping the database.
@ -53,23 +50,22 @@ impl Env for ConcreteEnv {
#[cold]
#[inline(never)] // called once.
#[allow(clippy::items_after_statements)]
fn open(config: Config) -> Result<Self, InitError> {
// TODO: dynamic syncs are not implemented.
// SOMEDAY: dynamic syncs are not implemented.
let durability = match config.sync_mode {
// TODO: There's also `redb::Durability::Paranoid`:
// FIXME: There's also `redb::Durability::Paranoid`:
// <https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Paranoid>
// should we use that instead of Immediate?
SyncMode::Safe => redb::Durability::Immediate,
SyncMode::Async => redb::Durability::Eventual,
SyncMode::Fast => redb::Durability::None,
// TODO: dynamic syncs are not implemented.
// SOMEDAY: dynamic syncs are not implemented.
SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
};
let env_builder = redb::Builder::new();
// TODO: we can set cache sizes with:
// FIXME: we can set cache sizes with:
// env_builder.set_cache(bytes);
// Use the in-memory backend if the feature is enabled.
@ -84,6 +80,7 @@ impl Env for ConcreteEnv {
.read(true)
.write(true)
.create(true)
.truncate(false)
.open(config.db_file())?;
env_builder.create_file(db_file)?
@ -95,8 +92,6 @@ impl Env for ConcreteEnv {
/// Function that creates the tables based off the passed `T: Table`.
fn create_table<T: Table>(tx_rw: &redb::WriteTransaction) -> Result<(), InitError> {
println!("create_table(): {}", T::NAME); // TODO: use tracing.
let table: redb::TableDefinition<
'static,
StorableRedb<<T as Table>::Key>,
@ -108,32 +103,20 @@ impl Env for ConcreteEnv {
Ok(())
}
use crate::tables::{
BlockBlobs, BlockHeights, BlockInfoV1s, BlockInfoV2s, BlockInfoV3s, KeyImages,
NumOutputs, Outputs, PrunableHashes, PrunableTxBlobs, PrunedTxBlobs, RctOutputs,
TxHeights, TxIds, TxUnlockTime,
};
let tx_rw = env.begin_write()?;
create_table::<BlockBlobs>(&tx_rw)?;
create_table::<BlockHeights>(&tx_rw)?;
create_table::<BlockInfoV1s>(&tx_rw)?;
create_table::<BlockInfoV2s>(&tx_rw)?;
create_table::<BlockInfoV3s>(&tx_rw)?;
create_table::<KeyImages>(&tx_rw)?;
create_table::<NumOutputs>(&tx_rw)?;
create_table::<Outputs>(&tx_rw)?;
create_table::<PrunableHashes>(&tx_rw)?;
create_table::<PrunableTxBlobs>(&tx_rw)?;
create_table::<PrunedTxBlobs>(&tx_rw)?;
create_table::<RctOutputs>(&tx_rw)?;
create_table::<TxHeights>(&tx_rw)?;
create_table::<TxIds>(&tx_rw)?;
create_table::<TxUnlockTime>(&tx_rw)?;
// Create all tables.
// FIXME: this macro is kinda awkward.
let mut tx_rw = env.begin_write()?;
{
let tx_rw = &mut tx_rw;
match call_fn_on_all_tables_or_early_return!(create_table(tx_rw)) {
Ok(_) => (),
Err(e) => return Err(e),
}
}
tx_rw.commit()?;
// Check for file integrity.
// TODO: should we do this? is it slow?
// FIXME: should we do this? is it slow?
env.check_integrity()?;
Ok(Self {

View file

@ -45,7 +45,7 @@ impl From<redb::StorageError> for InitError {
match error {
E::Io(e) => Self::Io(e),
E::Corrupted(s) => Self::Corrupt,
E::Corrupted(_) => Self::Corrupt,
// HACK: Handle new errors as `redb` adds them.
_ => Self::Unknown(Box::new(error)),
}
@ -56,8 +56,6 @@ impl From<redb::TransactionError> for InitError {
/// Created by `redb` in:
/// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write)
fn from(error: redb::TransactionError) -> Self {
use redb::StorageError as E;
match error {
redb::TransactionError::Storage(error) => error.into(),
// HACK: Handle new errors as `redb` adds them.
@ -70,7 +68,6 @@ impl From<redb::TableError> for InitError {
/// Created by `redb` in:
/// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table)
fn from(error: redb::TableError) -> Self {
use redb::StorageError as E2;
use redb::TableError as E;
match error {
@ -85,8 +82,6 @@ impl From<redb::CommitError> for InitError {
/// Created by `redb` in:
/// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit)
fn from(error: redb::CommitError) -> Self {
use redb::StorageError as E;
match error {
redb::CommitError::Storage(error) => error.into(),
// HACK: Handle new errors as `redb` adds them.
@ -102,8 +97,6 @@ impl From<redb::TransactionError> for RuntimeError {
/// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write)
/// - [`redb::Database::begin_read`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_read)
fn from(error: redb::TransactionError) -> Self {
use redb::StorageError as E;
match error {
redb::TransactionError::Storage(error) => error.into(),
@ -118,8 +111,6 @@ impl From<redb::CommitError> for RuntimeError {
/// Created by `redb` in:
/// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit)
fn from(error: redb::CommitError) -> Self {
use redb::StorageError as E;
match error {
redb::CommitError::Storage(error) => error.into(),
@ -135,7 +126,6 @@ impl From<redb::TableError> for RuntimeError {
/// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table)
/// - [`redb::ReadTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.ReadTransaction.html#method.open_table)
fn from(error: redb::TableError) -> Self {
use redb::StorageError as E2;
use redb::TableError as E;
match error {

View file

@ -1,7 +1,7 @@
//! `cuprate_database::Storable` <-> `redb` serde trait compatibility layer.
//---------------------------------------------------------------------------------------------------- Use
use std::{any::Any, borrow::Cow, cmp::Ordering, fmt::Debug, marker::PhantomData};
use std::{cmp::Ordering, fmt::Debug, marker::PhantomData};
use redb::TypeName;

View file

@ -2,8 +2,6 @@
//---------------------------------------------------------------------------------------------------- Import
use crate::{
config::SyncMode,
env::Env,
error::RuntimeError,
transaction::{TxRo, TxRw},
};

View file

@ -1,7 +1,7 @@
//! `redb` type aliases.
//---------------------------------------------------------------------------------------------------- Types
use crate::{backend::redb::storable::StorableRedb, table::Table};
use crate::backend::redb::storable::StorableRedb;
//---------------------------------------------------------------------------------------------------- Types
/// The concrete type for readable `redb` tables.

View file

@ -13,50 +13,31 @@
//!
//! `redb`, and it only must be enabled for it to be tested.
#![allow(
clippy::items_after_statements,
clippy::significant_drop_tightening,
clippy::cast_possible_truncation
)]
//---------------------------------------------------------------------------------------------------- Import
use std::borrow::{Borrow, Cow};
use crate::{
config::{Config, SyncMode},
database::{DatabaseIter, DatabaseRo, DatabaseRw},
env::{Env, EnvInner},
error::{InitError, RuntimeError},
error::RuntimeError,
resize::ResizeAlgorithm,
storable::StorableVec,
table::Table,
tables::{
BlockBlobs, BlockHeights, BlockInfoV1s, BlockInfoV2s, BlockInfoV3s, KeyImages, NumOutputs,
Outputs, PrunableHashes, PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxHeights, TxIds,
BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxBlobs, TxHeights, TxIds, TxOutputs,
TxUnlockTime,
},
tables::{TablesIter, TablesMut},
tests::tmp_concrete_env,
transaction::{TxRo, TxRw},
types::{
Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfoV1,
BlockInfoV2, BlockInfoV3, KeyImage, Output, PreRctOutputId, PrunableBlob, PrunableHash,
PrunedBlob, RctOutput, TxHash, TxId, UnlockTime,
Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage,
Output, OutputFlags, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput,
TxBlob, TxHash, TxId, UnlockTime,
},
ConcreteEnv,
};
//---------------------------------------------------------------------------------------------------- Tests
/// Create an `Env` in a temporarily directory.
/// The directory is automatically removed after the `TempDir` is dropped.
///
/// TODO: changing this to `-> impl Env` causes lifetime errors...
fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) {
let tempdir = tempfile::tempdir().unwrap();
let config = Config::low_power(Some(tempdir.path().into()));
let env = ConcreteEnv::open(config).unwrap();
(env, tempdir)
}
/// Simply call [`Env::open`]. If this fails, something is really wrong.
#[test]
fn open() {
@ -87,9 +68,7 @@ fn open_db() {
// This should be updated when tables are modified.
env_inner.open_db_ro::<BlockBlobs>(&tx_ro).unwrap();
env_inner.open_db_ro::<BlockHeights>(&tx_ro).unwrap();
env_inner.open_db_ro::<BlockInfoV1s>(&tx_ro).unwrap();
env_inner.open_db_ro::<BlockInfoV2s>(&tx_ro).unwrap();
env_inner.open_db_ro::<BlockInfoV3s>(&tx_ro).unwrap();
env_inner.open_db_ro::<BlockInfos>(&tx_ro).unwrap();
env_inner.open_db_ro::<KeyImages>(&tx_ro).unwrap();
env_inner.open_db_ro::<NumOutputs>(&tx_ro).unwrap();
env_inner.open_db_ro::<Outputs>(&tx_ro).unwrap();
@ -97,17 +76,17 @@ fn open_db() {
env_inner.open_db_ro::<PrunableTxBlobs>(&tx_ro).unwrap();
env_inner.open_db_ro::<PrunedTxBlobs>(&tx_ro).unwrap();
env_inner.open_db_ro::<RctOutputs>(&tx_ro).unwrap();
env_inner.open_db_ro::<TxBlobs>(&tx_ro).unwrap();
env_inner.open_db_ro::<TxHeights>(&tx_ro).unwrap();
env_inner.open_db_ro::<TxIds>(&tx_ro).unwrap();
env_inner.open_db_ro::<TxOutputs>(&tx_ro).unwrap();
env_inner.open_db_ro::<TxUnlockTime>(&tx_ro).unwrap();
TxRo::commit(tx_ro).unwrap();
// Open all tables in read/write mode.
env_inner.open_db_rw::<BlockBlobs>(&tx_rw).unwrap();
env_inner.open_db_rw::<BlockHeights>(&tx_rw).unwrap();
env_inner.open_db_rw::<BlockInfoV1s>(&tx_rw).unwrap();
env_inner.open_db_rw::<BlockInfoV2s>(&tx_rw).unwrap();
env_inner.open_db_rw::<BlockInfoV3s>(&tx_rw).unwrap();
env_inner.open_db_rw::<BlockInfos>(&tx_rw).unwrap();
env_inner.open_db_rw::<KeyImages>(&tx_rw).unwrap();
env_inner.open_db_rw::<NumOutputs>(&tx_rw).unwrap();
env_inner.open_db_rw::<Outputs>(&tx_rw).unwrap();
@ -115,8 +94,10 @@ fn open_db() {
env_inner.open_db_rw::<PrunableTxBlobs>(&tx_rw).unwrap();
env_inner.open_db_rw::<PrunedTxBlobs>(&tx_rw).unwrap();
env_inner.open_db_rw::<RctOutputs>(&tx_rw).unwrap();
env_inner.open_db_rw::<TxBlobs>(&tx_rw).unwrap();
env_inner.open_db_rw::<TxHeights>(&tx_rw).unwrap();
env_inner.open_db_rw::<TxIds>(&tx_rw).unwrap();
env_inner.open_db_rw::<TxOutputs>(&tx_rw).unwrap();
env_inner.open_db_rw::<TxUnlockTime>(&tx_rw).unwrap();
TxRw::commit(tx_rw).unwrap();
}
@ -166,7 +147,6 @@ fn non_manual_resize_2() {
/// Test all `DatabaseR{o,w}` operations.
#[test]
#[allow(clippy::too_many_lines)]
fn db_read_write() {
let (env, _tempdir) = tmp_concrete_env();
let env_inner = env.env_inner();
@ -182,7 +162,7 @@ fn db_read_write() {
const VALUE: Output = Output {
key: [35; 32],
height: 45_761_798,
output_flags: 0,
output_flags: OutputFlags::empty(),
tx_idx: 2_353_487,
};
/// How many `(key, value)` pairs will be inserted.
@ -202,7 +182,7 @@ fn db_read_write() {
// Insert keys.
let mut key = KEY;
for i in 0..N {
for _ in 0..N {
table.put(&key, &VALUE).unwrap();
key.amount += 1;
}
@ -271,6 +251,22 @@ fn db_read_write() {
}
}
// Assert `update()` works.
{
const HEIGHT: u32 = 999;
assert_ne!(table.get(&KEY).unwrap().height, HEIGHT);
table
.update(&KEY, |mut value| {
value.height = HEIGHT;
Some(value)
})
.unwrap();
assert_eq!(table.get(&KEY).unwrap().height, HEIGHT);
}
// Assert deleting works.
{
table.delete(&KEY).unwrap();
@ -284,6 +280,23 @@ fn db_read_write() {
assert_same(value);
}
// Assert `take()` works.
{
let mut key = KEY;
key.amount += 1;
let value = table.take(&key).unwrap();
assert_eq!(value, VALUE);
let get = table.get(&KEY);
assert!(!table.contains(&key).unwrap());
assert!(matches!(get, Err(RuntimeError::KeyNotFound)));
// Assert the other `(key, value)` pairs are still there.
key.amount += 1;
let value = table.get(&key).unwrap();
assert_same(value);
}
drop(table);
TxRw::commit(tx_rw).unwrap();
@ -309,6 +322,60 @@ fn db_read_write() {
}
}
/// Assert that `key`'s in database tables are sorted in
/// an ordered B-Tree fashion, i.e. `min_value -> max_value`.
#[test]
fn tables_are_sorted() {
let (env, _tmp) = tmp_concrete_env();
let env_inner = env.env_inner();
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables_mut = env_inner.open_tables_mut(&tx_rw).unwrap();
// Insert `{5, 4, 3, 2, 1, 0}`, assert each new
// number inserted is the minimum `first()` value.
for key in (0..6).rev() {
tables_mut.num_outputs_mut().put(&key, &123).unwrap();
let (first, _) = tables_mut.num_outputs_mut().first().unwrap();
assert_eq!(first, key);
}
drop(tables_mut);
TxRw::commit(tx_rw).unwrap();
let tx_rw = env_inner.tx_rw().unwrap();
// Assert iterators are ordered.
{
let tx_ro = env_inner.tx_ro().unwrap();
let tables = env_inner.open_tables(&tx_ro).unwrap();
let t = tables.num_outputs_iter();
let iter = t.iter().unwrap();
let keys = t.keys().unwrap();
for ((i, iter), key) in (0..6).zip(iter).zip(keys) {
let (iter, _) = iter.unwrap();
let key = key.unwrap();
assert_eq!(i, iter);
assert_eq!(iter, key);
}
}
let mut tables_mut = env_inner.open_tables_mut(&tx_rw).unwrap();
let t = tables_mut.num_outputs_mut();
// Assert the `first()` values are the minimum, i.e. `{0, 1, 2}`
for key in 0..3 {
let (first, _) = t.first().unwrap();
assert_eq!(first, key);
t.delete(&key).unwrap();
}
// Assert the `last()` values are the maximum, i.e. `{5, 4, 3}`
for key in (3..6).rev() {
let (last, _) = tables_mut.num_outputs_mut().last().unwrap();
assert_eq!(last, key);
tables_mut.num_outputs_mut().delete(&key).unwrap();
}
}
//---------------------------------------------------------------------------------------------------- Table Tests
/// Test multiple tables and their key + values.
///
@ -406,35 +473,14 @@ test_tables! {
BlockHash => BlockHeight,
[32; 32] => 123,
BlockInfoV1s,
BlockHeight => BlockInfoV1,
123 => BlockInfoV1 {
BlockInfos,
BlockHeight => BlockInfo,
123 => BlockInfo {
timestamp: 1,
total_generated_coins: 123,
weight: 321,
cumulative_difficulty: 111,
block_hash: [54; 32],
},
BlockInfoV2s,
BlockHeight => BlockInfoV2,
123 => BlockInfoV2 {
timestamp: 1,
total_generated_coins: 123,
weight: 321,
cumulative_difficulty: 111,
cumulative_rct_outs: 2389,
block_hash: [54; 32],
},
BlockInfoV3s,
BlockHeight => BlockInfoV3,
123 => BlockInfoV3 {
timestamp: 1,
total_generated_coins: 123,
cumulative_generated_coins: 123,
weight: 321,
cumulative_difficulty_low: 111,
cumulative_difficulty_high: 112,
cumulative_difficulty_high: 111,
block_hash: [54; 32],
cumulative_rct_outs: 2389,
long_term_weight: 2389,
@ -448,6 +494,10 @@ test_tables! {
Amount => AmountIndex,
123 => 123,
TxBlobs,
TxId => TxBlob,
123 => StorableVec(vec![1,2,3,4,5,6,7,8]),
TxIds,
TxHash => TxId,
[32; 32] => 123,
@ -456,6 +506,10 @@ test_tables! {
TxId => BlockHeight,
123 => 123,
TxOutputs,
TxId => AmountIndices,
123 => StorableVec(vec![1,2,3,4,5,6,7,8]),
TxUnlockTime,
TxId => UnlockTime,
123 => 123,
@ -468,7 +522,7 @@ test_tables! {
} => Output {
key: [1; 32],
height: 1,
output_flags: 0,
output_flags: OutputFlags::empty(),
tx_idx: 3,
},
@ -489,7 +543,7 @@ test_tables! {
123 => RctOutput {
key: [1; 32],
height: 1,
output_flags: 0,
output_flags: OutputFlags::empty(),
tx_idx: 3,
commitment: [3; 32],
},

View file

@ -1,4 +1,4 @@
//! TODO
//! SOMEDAY
//---------------------------------------------------------------------------------------------------- Import
use std::{
@ -19,13 +19,13 @@ use crate::{
};
//---------------------------------------------------------------------------------------------------- Backend
/// TODO
/// SOMEDAY: allow runtime hot-swappable backends.
#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Backend {
#[default]
/// TODO
/// SOMEDAY
Heed,
/// TODO
/// SOMEDAY
Redb,
}

View file

@ -1,17 +1,8 @@
//! Database [`Env`](crate::Env) configuration.
//!
//! This module contains the main [`Config`]uration struct
//! for the database [`Env`](crate::Env)ironment, and data
//! structures related to any configuration setting.
//!
//! These configurations are processed at runtime, meaning
//! the `Env` can/will dynamically adjust its behavior
//! based on these values.
//! The main [`Config`] struct, holding all configurable values.
//---------------------------------------------------------------------------------------------------- Import
use std::{
borrow::Cow,
num::NonZeroUsize,
path::{Path, PathBuf},
};
@ -26,13 +17,143 @@ use crate::{
resize::ResizeAlgorithm,
};
//---------------------------------------------------------------------------------------------------- ConfigBuilder
/// Builder for [`Config`].
///
// SOMEDAY: there's are many more options to add in the future.
#[derive(Debug, Clone, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ConfigBuilder {
/// [`Config::db_directory`].
db_directory: Option<Cow<'static, Path>>,
/// [`Config::sync_mode`].
sync_mode: Option<SyncMode>,
/// [`Config::reader_threads`].
reader_threads: Option<ReaderThreads>,
/// [`Config::resize_algorithm`].
resize_algorithm: Option<ResizeAlgorithm>,
}
impl ConfigBuilder {
/// Create a new [`ConfigBuilder`].
///
/// [`ConfigBuilder::build`] can be called immediately
/// after this function to use default values.
pub const fn new() -> Self {
Self {
db_directory: None,
sync_mode: None,
reader_threads: None,
resize_algorithm: None,
}
}
/// Build into a [`Config`].
///
/// # Default values
/// If [`ConfigBuilder::db_directory`] was not called,
/// the default [`cuprate_database_dir`] will be used.
///
/// For all other values, [`Default::default`] is used.
pub fn build(self) -> Config {
// INVARIANT: all PATH safety checks are done
// in `helper::fs`. No need to do them here.
let db_directory = self
.db_directory
.unwrap_or_else(|| Cow::Borrowed(cuprate_database_dir()));
// Add the database filename to the directory.
let db_file = {
let mut db_file = db_directory.to_path_buf();
db_file.push(DATABASE_DATA_FILENAME);
Cow::Owned(db_file)
};
Config {
db_directory,
db_file,
sync_mode: self.sync_mode.unwrap_or_default(),
reader_threads: self.reader_threads.unwrap_or_default(),
resize_algorithm: self.resize_algorithm.unwrap_or_default(),
}
}
/// Set a custom database directory (and file) [`Path`].
#[must_use]
pub fn db_directory(mut self, db_directory: PathBuf) -> Self {
self.db_directory = Some(Cow::Owned(db_directory));
self
}
/// Tune the [`ConfigBuilder`] for the highest performing,
/// but also most resource-intensive & maybe risky settings.
///
/// Good default for testing, and resource-available machines.
#[must_use]
pub fn fast(mut self) -> Self {
self.sync_mode = Some(SyncMode::Fast);
self.reader_threads = Some(ReaderThreads::OnePerThread);
self.resize_algorithm = Some(ResizeAlgorithm::default());
self
}
/// Tune the [`ConfigBuilder`] for the lowest performing,
/// but also least resource-intensive settings.
///
/// Good default for resource-limited machines, e.g. a cheap VPS.
#[must_use]
pub fn low_power(mut self) -> Self {
self.sync_mode = Some(SyncMode::default());
self.reader_threads = Some(ReaderThreads::One);
self.resize_algorithm = Some(ResizeAlgorithm::default());
self
}
/// Set a custom [`SyncMode`].
#[must_use]
pub const fn sync_mode(mut self, sync_mode: SyncMode) -> Self {
self.sync_mode = Some(sync_mode);
self
}
/// Set a custom [`ReaderThreads`].
#[must_use]
pub const fn reader_threads(mut self, reader_threads: ReaderThreads) -> Self {
self.reader_threads = Some(reader_threads);
self
}
/// Set a custom [`ResizeAlgorithm`].
#[must_use]
pub const fn resize_algorithm(mut self, resize_algorithm: ResizeAlgorithm) -> Self {
self.resize_algorithm = Some(resize_algorithm);
self
}
}
impl Default for ConfigBuilder {
fn default() -> Self {
Self {
db_directory: Some(Cow::Borrowed(cuprate_database_dir())),
sync_mode: Some(SyncMode::default()),
reader_threads: Some(ReaderThreads::default()),
resize_algorithm: Some(ResizeAlgorithm::default()),
}
}
}
//---------------------------------------------------------------------------------------------------- Config
/// Database [`Env`](crate::Env) configuration.
///
/// This is the struct passed to [`Env::open`](crate::Env::open) that
/// allows the database to be configured in various ways.
///
/// TODO: there's probably more options to add.
/// For construction, either use [`ConfigBuilder`] or [`Config::default`].
///
// SOMEDAY: there's are many more options to add in the future.
#[derive(Debug, Clone, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Config {
@ -44,8 +165,8 @@ pub struct Config {
/// By default, if no value is provided in the [`Config`]
/// constructor functions, this will be [`cuprate_database_dir`].
///
/// TODO: we should also support `/etc/cuprated.conf`.
/// This could be represented with an `enum DbPath { Default, Custom, Etc, }`
// SOMEDAY: we should also support `/etc/cuprated.conf`.
// This could be represented with an `enum DbPath { Default, Custom, Etc, }`
pub(crate) db_directory: Cow<'static, Path>,
/// The actual database data file.
///
@ -67,111 +188,50 @@ pub struct Config {
}
impl Config {
/// Private function to acquire [`Config::db_file`]
/// from the user provided (or default) [`Config::db_directory`].
///
/// As the database data file PATH is just the directory + the filename,
/// we only need the directory from the user/Config, and can add it here.
fn return_db_dir_and_file(
db_directory: Option<PathBuf>,
) -> (Cow<'static, Path>, Cow<'static, Path>) {
// INVARIANT: all PATH safety checks are done
// in `helper::fs`. No need to do them here.
let db_directory =
db_directory.map_or_else(|| Cow::Borrowed(cuprate_database_dir()), Cow::Owned);
// Add the database filename to the directory.
let mut db_file = db_directory.to_path_buf();
db_file.push(DATABASE_DATA_FILENAME);
(db_directory, Cow::Owned(db_file))
}
/// Create a new [`Config`] with sane default settings.
///
/// # `db_directory`
/// If this is `Some`, it will be used as the
/// directory that contains all database files.
/// The [`Config::db_directory`] will be [`cuprate_database_dir`].
///
/// If `None`, it will use the default directory [`cuprate_database_dir`].
pub fn new(db_directory: Option<PathBuf>) -> Self {
let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
Self {
db_directory,
db_file,
sync_mode: SyncMode::default(),
reader_threads: ReaderThreads::OnePerThread,
resize_algorithm: ResizeAlgorithm::default(),
}
}
/// Create a [`Config`] with the highest performing,
/// but also most resource-intensive & maybe risky settings.
/// All other values will be [`Default::default`].
///
/// Good default for testing, and resource-available machines.
/// Same as [`Config::default`].
///
/// # `db_directory`
/// If this is `Some`, it will be used as the
/// directory that contains all database files.
/// ```rust
/// use cuprate_database::{config::*, resize::*, DATABASE_DATA_FILENAME};
/// use cuprate_helper::fs::*;
///
/// If `None`, it will use the default directory [`cuprate_database_dir`].
pub fn fast(db_directory: Option<PathBuf>) -> Self {
let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
Self {
db_directory,
db_file,
sync_mode: SyncMode::Fast,
reader_threads: ReaderThreads::OnePerThread,
resize_algorithm: ResizeAlgorithm::default(),
}
}
/// Create a [`Config`] with the lowest performing,
/// but also least resource-intensive settings.
/// let config = Config::new();
///
/// Good default for resource-limited machines, e.g. a cheap VPS.
///
/// # `db_directory`
/// If this is `Some`, it will be used as the
/// directory that contains all database files.
///
/// If `None`, it will use the default directory [`cuprate_database_dir`].
pub fn low_power(db_directory: Option<PathBuf>) -> Self {
let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
Self {
db_directory,
db_file,
sync_mode: SyncMode::default(),
reader_threads: ReaderThreads::One,
resize_algorithm: ResizeAlgorithm::default(),
}
/// assert_eq!(config.db_directory(), cuprate_database_dir());
/// assert!(config.db_file().starts_with(cuprate_database_dir()));
/// assert!(config.db_file().ends_with(DATABASE_DATA_FILENAME));
/// assert_eq!(config.sync_mode, SyncMode::default());
/// assert_eq!(config.reader_threads, ReaderThreads::default());
/// assert_eq!(config.resize_algorithm, ResizeAlgorithm::default());
/// ```
pub fn new() -> Self {
ConfigBuilder::default().build()
}
/// Return the absolute [`Path`] to the database directory.
///
/// This will be the `db_directory` given
/// (or default) during [`Config`] construction.
pub const fn db_directory(&self) -> &Cow<'_, Path> {
&self.db_directory
}
/// Return the absolute [`Path`] to the database data file.
///
/// This will be based off the `db_directory` given
/// (or default) during [`Config`] construction.
pub const fn db_file(&self) -> &Cow<'_, Path> {
&self.db_file
}
}
impl Default for Config {
/// Same as `Self::new(None)`.
/// Same as [`Config::new`].
///
/// ```rust
/// # use cuprate_database::config::*;
/// assert_eq!(Config::default(), Config::new(None));
/// assert_eq!(Config::default(), Config::new());
/// ```
fn default() -> Self {
Self::new(None)
Self::new()
}
}

View file

@ -1,7 +1,44 @@
//! TODO
//! Database [`Env`](crate::Env) configuration.
//!
//! This module contains the main [`Config`]uration struct
//! for the database [`Env`](crate::Env)ironment, and types
//! related to configuration settings.
//!
//! The main constructor is the [`ConfigBuilder`].
//!
//! These configurations are processed at runtime, meaning
//! the `Env` can/will dynamically adjust its behavior
//! based on these values.
//!
//! # Example
//! ```rust
//! use cuprate_database::{
//! Env,
//! config::{ConfigBuilder, ReaderThreads, SyncMode}
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let db_dir = tempfile::tempdir()?;
//!
//! let config = ConfigBuilder::new()
//! // Use a custom database directory.
//! .db_directory(db_dir.path().to_path_buf())
//! // Use as many reader threads as possible (when using `service`).
//! .reader_threads(ReaderThreads::OnePerThread)
//! // Use the fastest sync mode.
//! .sync_mode(SyncMode::Fast)
//! // Build into `Config`
//! .build();
//!
//! // Start a database `service` using this configuration.
//! let (reader_handle, _) = cuprate_database::service::init(config.clone())?;
//! // It's using the config we provided.
//! assert_eq!(reader_handle.env().config(), &config);
//! # Ok(()) }
//! ```
mod config;
pub use config::Config;
pub use config::{Config, ConfigBuilder};
mod reader_threads;
pub use reader_threads::ReaderThreads;

View file

@ -9,25 +9,19 @@
//! based on these values.
//---------------------------------------------------------------------------------------------------- Import
use std::{
borrow::Cow,
num::NonZeroUsize,
path::{Path, PathBuf},
};
use std::num::NonZeroUsize;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use cuprate_helper::fs::cuprate_database_dir;
use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
//---------------------------------------------------------------------------------------------------- ReaderThreads
/// Amount of database reader threads to spawn.
/// Amount of database reader threads to spawn when using [`service`](crate::service).
///
/// This controls how many reader thread [`crate::service`]'s
/// This controls how many reader thread `service`'s
/// thread-pool will spawn to receive and send requests/responses.
///
/// It does nothing outside of `service`.
///
/// It will always be at least 1, up until the amount of threads on the machine.
///
/// The main function used to extract an actual
@ -38,8 +32,8 @@ pub enum ReaderThreads {
#[default]
/// Spawn 1 reader thread per available thread on the machine.
///
/// For example, a `16-core, 32-thread` Ryzen 5950x will
/// spawn `32` reader threads using this setting.
/// For example, a `32-thread` system will spawn
/// `32` reader threads using this setting.
OnePerThread,
/// Only spawn 1 reader thread.

View file

@ -9,19 +9,10 @@
//! based on these values.
//---------------------------------------------------------------------------------------------------- Import
use std::{
borrow::Cow,
num::NonZeroUsize,
path::{Path, PathBuf},
};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use cuprate_helper::fs::cuprate_database_dir;
use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
//---------------------------------------------------------------------------------------------------- SyncMode
/// Disk synchronization mode.
///
@ -48,7 +39,7 @@ use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
/// ```
/// will be fine, most likely pulling from memory instead of disk.
///
/// # TODO
/// # SOMEDAY
/// Dynamic sync's are not yet supported.
///
/// Only:
@ -64,24 +55,24 @@ pub enum SyncMode {
/// Use [`SyncMode::Fast`] until fully synced,
/// then use [`SyncMode::Safe`].
///
/// # TODO: how to implement this?
/// ref: <https://github.com/monero-project/monero/issues/1463>
/// monerod-solution: <https://github.com/monero-project/monero/pull/1506>
/// cuprate-issue: <https://github.com/Cuprate/cuprate/issues/78>
///
/// We could:
/// ```rust,ignore
/// if current_db_block <= top_block.saturating_sub(N) {
/// // don't sync()
/// } else {
/// // sync()
/// }
/// ```
/// where N is some threshold we pick that is _close_ enough
/// to being synced where we want to start being safer.
///
/// Essentially, when we are in a certain % range of being finished,
/// switch to safe mode, until then, go fast.
// # SOMEDAY: how to implement this?
// ref: <https://github.com/monero-project/monero/issues/1463>
// monerod-solution: <https://github.com/monero-project/monero/pull/1506>
// cuprate-issue: <https://github.com/Cuprate/cuprate/issues/78>
//
// We could:
// ```rust,ignore
// if current_db_block <= top_block.saturating_sub(N) {
// // don't sync()
// } else {
// // sync()
// }
// ```
// where N is some threshold we pick that is _close_ enough
// to being synced where we want to start being safer.
//
// Essentially, when we are in a certain % range of being finished,
// switch to safe mode, until then, go fast.
FastThenSafe,
#[default]
@ -136,7 +127,7 @@ pub enum SyncMode {
/// In the case of a system crash, the database
/// may become corrupted when using this option.
//
// TODO: we could call this `unsafe`
// FIXME: we could call this `unsafe`
// and use that terminology in the config file
// so users know exactly what they are getting
// themselves into.

View file

@ -3,6 +3,18 @@
//---------------------------------------------------------------------------------------------------- Import
use cfg_if::cfg_if;
//---------------------------------------------------------------------------------------------------- Version
/// Current major version of the database.
///
/// Returned by [`crate::ops::property::db_version`].
///
/// This is incremented by 1 when `cuprate_database`'s
/// structure/schema/tables change.
///
/// This is akin to `VERSION` in `monerod`:
/// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L57>
pub const DATABASE_VERSION: u64 = 0;
//---------------------------------------------------------------------------------------------------- Error Messages
/// Corrupt database error message.
///
@ -23,8 +35,8 @@ TODO: instructions on:
///
/// | Backend | Value |
/// |---------|-------|
/// | `heed` | "heed"
/// | `redb` | "redb"
/// | `heed` | `"heed"`
/// | `redb` | `"redb"`
pub const DATABASE_BACKEND: &str = {
cfg_if! {
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
@ -41,8 +53,8 @@ pub const DATABASE_BACKEND: &str = {
///
/// | Backend | Value |
/// |---------|-------|
/// | `heed` | "data.mdb"
/// | `redb` | "data.redb"
/// | `heed` | `"data.mdb"`
/// | `redb` | `"data.redb"`
pub const DATABASE_DATA_FILENAME: &str = {
cfg_if! {
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
@ -57,8 +69,8 @@ pub const DATABASE_DATA_FILENAME: &str = {
///
/// | Backend | Value |
/// |---------|-------|
/// | `heed` | Some("lock.mdb")
/// | `redb` | None (redb doesn't use a file lock)
/// | `heed` | `Some("lock.mdb")`
/// | `redb` | `None` (redb doesn't use a file lock)
pub const DATABASE_LOCK_FILENAME: Option<&str> = {
cfg_if! {
if #[cfg(all(feature = "redb", not(feature = "heed")))] {

View file

@ -1,33 +1,38 @@
//! Abstracted database; `trait DatabaseRo` & `trait DatabaseRw`.
//! Abstracted database table operations; `trait DatabaseRo` & `trait DatabaseRw`.
//---------------------------------------------------------------------------------------------------- Import
use std::{
borrow::{Borrow, Cow},
fmt::Debug,
ops::{Deref, RangeBounds},
};
use std::ops::RangeBounds;
use crate::{
error::RuntimeError,
table::Table,
transaction::{TxRo, TxRw},
};
use crate::{error::RuntimeError, table::Table};
//---------------------------------------------------------------------------------------------------- DatabaseIter
/// Generic post-fix documentation for `DatabaseIter` methods.
macro_rules! doc_iter {
() => {
r"Although the returned iterator itself is tied to the lifetime
of `&self`, the returned values from the iterator are _owned_.
# Errors
The construction of the iterator itself may error.
Each iteration of the iterator has the potential to error as well."
};
}
//---------------------------------------------------------------------------------------------------- DatabaseRoIter
/// Database (key-value store) read-only iteration abstraction.
///
/// These are read-only iteration-related operations that
/// can only be called from [`DatabaseRo`] objects.
///
/// # Hack
/// This is a HACK to get around the fact our read/write tables
/// This is a HACK to get around the fact [`DatabaseRw`] tables
/// cannot safely return values returning lifetimes, as such,
/// only read-only tables implement this trait.
///
/// - <https://github.com/Cuprate/cuprate/pull/102#discussion_r1548695610>
/// - <https://github.com/Cuprate/cuprate/pull/104>
pub trait DatabaseIter<T: Table> {
/// Get an iterator of value's corresponding to a range of keys.
/// Get an [`Iterator`] of value's corresponding to a range of keys.
///
/// For example:
/// ```rust,ignore
@ -39,12 +44,7 @@ pub trait DatabaseIter<T: Table> {
/// Although the returned iterator itself is tied to the lifetime
/// of `&'a self`, the returned values from the iterator are _owned_.
///
/// # Errors
/// Each key in the `range` has the potential to error, for example,
/// if a particular key in the `range` does not exist,
/// [`RuntimeError::KeyNotFound`] wrapped in [`Err`] will be returned
/// from the iterator.
#[allow(clippy::iter_not_returning_iterator)]
#[doc = doc_iter!()]
fn get_range<'a, Range>(
&'a self,
range: Range,
@ -52,51 +52,74 @@ pub trait DatabaseIter<T: Table> {
where
Range: RangeBounds<T::Key> + 'a;
/// TODO
///
/// # Errors
/// TODO
/// Get an [`Iterator`] that returns the `(key, value)` types for this database.
#[doc = doc_iter!()]
#[allow(clippy::iter_not_returning_iterator)]
fn iter(
&self,
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>;
/// TODO
///
/// # Errors
/// TODO
/// Get an [`Iterator`] that returns _only_ the `key` type for this database.
#[doc = doc_iter!()]
fn keys(&self)
-> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError>;
/// TODO
///
/// # Errors
/// TODO
/// Get an [`Iterator`] that returns _only_ the `value` type for this database.
#[doc = doc_iter!()]
fn values(
&self,
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError>;
}
//---------------------------------------------------------------------------------------------------- DatabaseRo
/// Generic post-fix documentation for `DatabaseR{o,w}` methods.
macro_rules! doc_database {
() => {
r"# Errors
This will return [`RuntimeError::KeyNotFound`] if:
- Input does not exist OR
- Database is empty"
};
}
/// Database (key-value store) read abstraction.
///
/// This is a read-only database table,
/// write operations are defined in [`DatabaseRw`].
pub trait DatabaseRo<T: Table> {
///
/// # Safety
/// The table type that implements this MUST be `Send`.
///
/// However if the table holds a reference to a transaction:
/// - only the transaction only has to be `Send`
/// - the table cannot implement `Send`
///
/// For example:
///
/// `heed`'s transactions are `Send` but `HeedTableRo` contains a `&`
/// to the transaction, as such, if `Send` were implemented on `HeedTableRo`
/// then 1 transaction could be used to open multiple tables, then sent to
/// other threads - this would be a soundness hole against `HeedTableRo`.
///
/// `&T` is only `Send` if `T: Sync`.
///
/// `heed::RoTxn: !Sync`, therefore our table
/// holding `&heed::RoTxn` must NOT be `Send`.
///
/// - <https://doc.rust-lang.org/std/marker/trait.Sync.html>
/// - <https://doc.rust-lang.org/nomicon/send-and-sync.html>
pub unsafe trait DatabaseRo<T: Table> {
/// Get the value corresponding to a key.
///
/// The returned value is _owned_.
///
/// # Errors
/// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist.
///
/// It will return other [`RuntimeError`]'s on things like IO errors as well.
#[doc = doc_database!()]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError>;
/// TODO
/// Returns `true` if the database contains a value for the specified key.
///
/// # Errors
/// TODO
/// Note that this will _never_ return `Err(RuntimeError::KeyNotFound)`,
/// as in that case, `Ok(false)` will be returned.
///
/// Other errors may still occur.
fn contains(&self, key: &T::Key) -> Result<bool, RuntimeError> {
match self.get(key) {
Ok(_) => Ok(true),
@ -105,28 +128,24 @@ pub trait DatabaseRo<T: Table> {
}
}
/// TODO
/// Returns the number of `(key, value)` pairs in the database.
///
/// # Errors
/// TODO
/// This will never return [`RuntimeError::KeyNotFound`].
fn len(&self) -> Result<u64, RuntimeError>;
/// TODO
///
/// # Errors
/// TODO
/// Returns the first `(key, value)` pair in the database.
#[doc = doc_database!()]
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>;
/// TODO
///
/// # Errors
/// TODO
/// Returns the last `(key, value)` pair in the database.
#[doc = doc_database!()]
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>;
/// TODO
/// Returns `true` if the database contains no `(key, value)` pairs.
///
/// # Errors
/// TODO
/// This can only return [`RuntimeError::Io`] on errors.
fn is_empty(&self) -> Result<bool, RuntimeError>;
}
@ -139,25 +158,59 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
///
/// This will overwrite any existing key-value pairs.
///
/// # Errors
/// This will not return [`RuntimeError::KeyExists`].
#[doc = doc_database!()]
///
/// This will never [`RuntimeError::KeyExists`].
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>;
/// Delete a key-value pair in the database.
///
/// # Errors
/// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist.
/// This will return `Ok(())` if the key does not exist.
///
#[doc = doc_database!()]
///
/// This will never [`RuntimeError::KeyExists`].
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>;
/// TODO
/// Delete and return a key-value pair in the database.
///
/// # Errors
/// TODO
/// This is the same as [`DatabaseRw::delete`], however,
/// it will serialize the `T::Value` and return it.
///
#[doc = doc_database!()]
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError>;
/// Fetch the value, and apply a function to it - or delete the entry.
///
/// This will call [`DatabaseRo::get`] and call your provided function `f` on it.
///
/// The [`Option`] `f` returns will dictate whether `update()`:
/// - Updates the current value OR
/// - Deletes the `(key, value)` pair
///
/// - If `f` returns `Some(value)`, that will be [`DatabaseRw::put`] as the new value
/// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d
///
#[doc = doc_database!()]
fn update<F>(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError>
where
F: FnMut(T::Value) -> Option<T::Value>,
{
let value = DatabaseRo::get(self, key)?;
match f(value) {
Some(value) => DatabaseRw::put(self, key, &value),
None => DatabaseRw::delete(self, key),
}
}
/// Removes and returns the first `(key, value)` pair in the database.
///
#[doc = doc_database!()]
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
/// TODO
/// Removes and returns the last `(key, value)` pair in the database.
///
/// # Errors
/// TODO
#[doc = doc_database!()]
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
}

View file

@ -1,7 +1,7 @@
//! Abstracted database environment; `trait Env`.
//---------------------------------------------------------------------------------------------------- Import
use std::{fmt::Debug, ops::Deref};
use std::num::NonZeroUsize;
use crate::{
config::Config,
@ -9,6 +9,7 @@ use crate::{
error::{InitError, RuntimeError},
resize::ResizeAlgorithm,
table::Table,
tables::{call_fn_on_all_tables_or_early_return, TablesIter, TablesMut},
transaction::{TxRo, TxRw},
};
@ -23,8 +24,16 @@ use crate::{
/// although, no invariant relies on this (yet).
///
/// # Lifetimes
/// TODO: Explain the very sequential lifetime pipeline:
/// - `ConcreteEnv` -> `'env` -> `'tx` -> `impl DatabaseR{o,w}`
/// The lifetimes associated with `Env` have a sequential flow:
/// 1. `ConcreteEnv`
/// 2. `'env`
/// 3. `'tx`
/// 4. `'db`
///
/// As in:
/// - open database tables only live as long as...
/// - transactions which only live as long as the...
/// - environment ([`EnvInner`])
pub trait Env: Sized {
//------------------------------------------------ Constants
/// Does the database backend need to be manually
@ -32,7 +41,7 @@ pub trait Env: Sized {
///
/// # Invariant
/// If this is `false`, that means this [`Env`]
/// can _never_ return a [`RuntimeError::ResizeNeeded`].
/// must _never_ return a [`RuntimeError::ResizeNeeded`].
///
/// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`]
/// _must_ be re-implemented, as it just panics by default.
@ -50,10 +59,10 @@ pub trait Env: Sized {
/// This is used as the `self` in [`EnvInner`] functions, so whatever
/// this type is, is what will be accessible from those functions.
///
/// # Explanation (not needed for practical use)
/// For `heed`, this is just `heed::Env`, for `redb` this is
/// `(redb::Database, redb::Durability)` as each transaction
/// needs the sync mode set during creation.
// # HACK
// For `heed`, this is just `heed::Env`, for `redb` this is
// `(redb::Database, redb::Durability)` as each transaction
// needs the sync mode set during creation.
type EnvInner<'env>: EnvInner<'env, Self::TxRo<'env>, Self::TxRw<'env>>
where
Self: 'env;
@ -95,11 +104,11 @@ pub trait Env: Sized {
/// I.e., after this function returns, there must be no doubts
/// that the data isn't synced yet, it _must_ be synced.
///
/// TODO: either this invariant or `sync()` itself will most
/// likely be removed/changed after `SyncMode` is finalized.
// FIXME: either this invariant or `sync()` itself will most
// likely be removed/changed after `SyncMode` is finalized.
///
/// # Errors
/// TODO
/// If there is a synchronization error, this should return an error.
fn sync(&self) -> Result<(), RuntimeError>;
/// Resize the database's memory map to a
@ -109,11 +118,14 @@ pub trait Env: Sized {
///
/// If `resize_algorithm` is `Some`, that will be used instead.
///
/// This function returns the _new_ memory map size in bytes.
///
/// # Invariant
/// This function _must_ be re-implemented if [`Env::MANUAL_RESIZE`] is `true`.
///
/// Otherwise, this function will panic with `unreachable!()`.
fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) {
#[allow(unused_variables)]
fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) -> NonZeroUsize {
unreachable!()
}
@ -164,7 +176,26 @@ pub trait Env: Sized {
}
//---------------------------------------------------------------------------------------------------- DatabaseRo
/// TODO
/// Document errors when opening tables in [`EnvInner`].
macro_rules! doc_table_error {
() => {
r"# Errors
This will only return [`RuntimeError::Io`] if it errors.
As all tables are created upon [`Env::open`],
this function will never error because a table doesn't exist."
};
}
/// The inner [`Env`] type.
///
/// This type is created with [`Env::env_inner`] and represents
/// the type able to generate transactions and open tables.
///
/// # Locking behavior
/// As noted in `Env::env_inner`, this is a `RwLockReadGuard`
/// when using the `heed` backend, be aware of this and do
/// not hold onto an `EnvInner` for a long time.
pub trait EnvInner<'env, Ro, Rw>
where
Self: 'env,
@ -185,6 +216,9 @@ where
/// Open a database in read-only mode.
///
/// The returned value can have [`DatabaseRo`]
/// & [`DatabaseIter`] functions called on it.
///
/// This will open the database [`Table`]
/// passed as a generic to this function.
///
@ -195,12 +229,7 @@ where
/// // (name, key/value type)
/// ```
///
/// # Errors
/// This function errors upon internal database/IO errors.
///
/// As [`Table`] is `Sealed`, and all tables are created
/// upon [`Env::open`], this function will never error because
/// a table doesn't exist.
#[doc = doc_table_error!()]
fn open_db_ro<T: Table>(
&self,
tx_ro: &Ro,
@ -211,17 +240,39 @@ where
/// All [`DatabaseRo`] functions are also callable
/// with the returned [`DatabaseRw`] structure.
///
/// Note that [`DatabaseIter`] functions are _not_
/// available to [`DatabaseRw`] structures.
///
/// This will open the database [`Table`]
/// passed as a generic to this function.
///
/// # Errors
/// This function errors upon internal database/IO errors.
///
/// As [`Table`] is `Sealed`, and all tables are created
/// upon [`Env::open`], this function will never error because
/// a table doesn't exist.
#[doc = doc_table_error!()]
fn open_db_rw<T: Table>(&self, tx_rw: &Rw) -> Result<impl DatabaseRw<T>, RuntimeError>;
/// Open all tables in read/iter mode.
///
/// This calls [`EnvInner::open_db_ro`] on all database tables
/// and returns a structure that allows access to all tables.
///
#[doc = doc_table_error!()]
fn open_tables(&self, tx_ro: &Ro) -> Result<impl TablesIter, RuntimeError> {
call_fn_on_all_tables_or_early_return! {
Self::open_db_ro(self, tx_ro)
}
}
/// Open all tables in read-write mode.
///
/// This calls [`EnvInner::open_db_rw`] on all database tables
/// and returns a structure that allows access to all tables.
///
#[doc = doc_table_error!()]
fn open_tables_mut(&self, tx_rw: &Rw) -> Result<impl TablesMut, RuntimeError> {
call_fn_on_all_tables_or_early_return! {
Self::open_db_rw(self, tx_rw)
}
}
/// Clear all `(key, value)`'s from a database table.
///
/// This will delete all key and values in the passed
@ -230,11 +281,6 @@ where
/// Note that this operation is tied to `tx_rw`, as such this
/// function's effects can be aborted using [`TxRw::abort`].
///
/// # Errors
/// This function errors upon internal database/IO errors.
///
/// As [`Table`] is `Sealed`, and all tables are created
/// upon [`Env::open`], this function will never error because
/// a table doesn't exist.
#[doc = doc_table_error!()]
fn clear_db<T: Table>(&self, tx_rw: &mut Rw) -> Result<(), RuntimeError>;
}

View file

@ -1,5 +1,4 @@
//! Database error types.
//! TODO: `InitError/RuntimeError` are maybe bad names.
//---------------------------------------------------------------------------------------------------- Import
use std::fmt::Debug;
@ -42,8 +41,12 @@ pub enum InitError {
/// The database is currently in the process
/// of shutting down and cannot respond.
///
/// TODO: This might happen if we try to open
/// while we are shutting down, `unreachable!()`?
/// # Notes
/// This error can only occur with the `heed` backend when
/// the database environment is opened _right_ at the same time
/// another thread/process is closing it.
///
/// This will never occur with other backends.
#[error("database is shutting down")]
ShuttingDown,

View file

@ -1,6 +1,4 @@
//! General free functions (related to the database).
//!
//! TODO.
//---------------------------------------------------------------------------------------------------- Import

View file

@ -1,40 +1,22 @@
//! Database key abstraction; `trait Key`.
//---------------------------------------------------------------------------------------------------- Import
use std::{cmp::Ordering, fmt::Debug};
use std::cmp::Ordering;
use bytemuck::Pod;
use crate::storable::{self, Storable};
use crate::storable::Storable;
//---------------------------------------------------------------------------------------------------- Table
/// Database [`Table`](crate::table::Table) key metadata.
///
/// Purely compile time information for database table keys, supporting duplicate keys.
/// Purely compile time information for database table keys.
//
// FIXME: this doesn't need to exist right now but
// may be used if we implement getting values using ranges.
// <https://github.com/Cuprate/cuprate/pull/117#discussion_r1589378104>
pub trait Key: Storable + Sized {
/// Does this [`Key`] require multiple keys to reach a value?
///
/// # Invariant
/// - If [`Key::DUPLICATE`] is `true`, [`Key::primary_secondary`] MUST be re-implemented.
/// - If [`Key::DUPLICATE`] is `true`, [`Key::new_with_max_secondary`] MUST be re-implemented.
const DUPLICATE: bool;
/// Does this [`Key`] have a custom comparison function?
///
/// # Invariant
/// If [`Key::CUSTOM_COMPARE`] is `true`, [`Key::compare`] MUST be re-implemented.
const CUSTOM_COMPARE: bool;
/// The primary key type.
type Primary: Storable;
/// Acquire [`Self::Primary`] and the secondary key.
///
/// # TODO: doc test
fn primary_secondary(self) -> (Self::Primary, u64) {
unreachable!()
}
/// Compare 2 [`Key`]'s against each other.
///
/// By default, this does a straight _byte_ comparison,
@ -55,67 +37,17 @@ pub trait Key: Storable + Sized {
/// std::cmp::Ordering::Greater,
/// );
/// ```
#[inline]
fn compare(left: &[u8], right: &[u8]) -> Ordering {
left.cmp(right)
}
/// Create a new [`Key`] from the [`Key::Primary`] type,
/// with the secondary key type set to the maximum value.
///
/// # Invariant
/// Secondary key must be the max value of the type.
///
/// # TODO: doc test
fn new_with_max_secondary(primary: Self::Primary) -> Self {
unreachable!()
}
}
//---------------------------------------------------------------------------------------------------- Impl
/// TODO: remove after we finalize tables.
///
/// Implement `Key` on most primitive types.
///
/// - `Key::DUPLICATE` is always `false`.
/// - `Key::CUSTOM_COMPARE` is always `false`.
macro_rules! impl_key {
(
$(
$t:ident // Key type.
),* $(,)?
) => {
$(
impl Key for $t {
const DUPLICATE: bool = false;
const CUSTOM_COMPARE: bool = false;
type Primary = $t;
}
)*
};
}
// Implement `Key` for primitives.
impl_key! {
u8,
u16,
u32,
u64,
i8,
i16,
i32,
i64,
}
impl<T: Key + Pod, const N: usize> Key for [T; N] {
const DUPLICATE: bool = false;
const CUSTOM_COMPARE: bool = false;
type Primary = Self;
}
// TODO: temporary for now for `Key` bound, remove later.
impl Key for crate::types::PreRctOutputId {
const DUPLICATE: bool = false;
const CUSTOM_COMPARE: bool = false;
impl<T> Key for T
where
T: Storable + Sized,
{
type Primary = Self;
}

View file

@ -1,4 +1,4 @@
//! Database abstraction and utilities.
//! Cuprate's database abstraction.
//!
//! This documentation is mostly for practical usage of `cuprate_database`.
//!
@ -8,28 +8,33 @@
//! # Purpose
//! This crate does 3 things:
//! 1. Abstracts various database backends with traits
//! 2. Implements various `Monero` related [functions](ops) & [tables] & [types]
//! 2. Implements various `Monero` related [operations](ops), [tables], and [types]
//! 3. Exposes a [`tower::Service`] backed by a thread-pool
//!
//! Each layer builds on-top of the previous.
//!
//! As a user of `cuprate_database`, consider using the higher-level [`service`] module,
//! or at the very least the [`ops`] module instead of interacting with the database traits directly.
//!
//! With that said, many database traits and internals (like [`DatabaseRo::get`]) are exposed.
//!
//! # Terminology
//! To be more clear on some terms used in this crate:
//!
//! | Term | Meaning |
//! |---------------|--------------------------------------|
//! | `Env` | The 1 database environment, the "whole" thing
//! | `DatabaseRo` | A read-only `key/value` store
//! | `DatabaseRw` | A readable/writable `key/value` store
//! | `Table` | Solely the metadata of a `Database` (the `key` and `value` types, and the name)
//! | `TxRo` | Read only transaction
//! | `TxRw` | Read/write transaction
//! | `Storable` | A data that type can be stored in the database
//! | Term | Meaning |
//! |------------------|--------------------------------------|
//! | `Env` | The 1 database environment, the "whole" thing
//! | `DatabaseR{o,w}` | A _actively open_ readable/writable `key/value` store
//! | `Table` | Solely the metadata of a `Database` (the `key` and `value` types, and the name)
//! | `TxR{o,w}` | A read/write transaction
//! | `Storable` | A data that type can be stored in the database
//!
//! The dataflow is `Env` -> `Tx` -> `Database`
//!
//! Which reads as:
//! 1. You have a database `Environment`
//! 1. You open up a `Transaction`
//! 1. You get a particular `Database` from that `Environment`
//! 1. You open a particular `Table` from that `Environment`, getting a `Database`
//! 1. You can now read/write data from/to that `Database`
//!
//! # `ConcreteEnv`
@ -58,14 +63,10 @@
//! Note that `ConcreteEnv` itself is not a clonable type,
//! it should be wrapped in [`std::sync::Arc`].
//!
//! TODO: we could also expose `ConcreteDatabase` if we're
//! going to be storing any databases in structs, to lessen
//! the generic `<D: Database>` pain.
//!
//! TODO: we could replace `ConcreteEnv` with `fn Env::open() -> impl Env`/
//! <!-- SOMEDAY: replace `ConcreteEnv` with `fn Env::open() -> impl Env`/
//! and use `<E: Env>` everywhere it is stored instead. This would allow
//! generic-backed dynamic runtime selection of the database backend, i.e.
//! the user can select which database backend they use.
//! the user can select which database backend they use. -->
//!
//! # Feature flags
//! The `service` module requires the `service` feature to be enabled.
@ -77,43 +78,66 @@
//!
//! The default is `heed`.
//!
//! `tracing` is always enabled and cannot be disabled via feature-flag.
//! <!-- FIXME: tracing should be behind a feature flag -->
//!
//! # Invariants when not using `service`
//! `cuprate_database` can be used without the `service` feature enabled but
//! there are some things that must be kept in mind when doing so:
//! there are some things that must be kept in mind when doing so.
//!
//! TODO: make pretty. these will need to be updated
//! as things change and as more backends are added.
//! Failing to uphold these invariants may cause panics.
//!
//! 1. Memory map resizing (must resize as needed)
//! 1. Must not exceed `Config`'s maximum reader count
//! 1. Avoid many nested transactions
//! 1. `heed::MdbError::BadValSize`
//! 1. `heed::Error::InvalidDatabaseTyping`
//! 1. `heed::Error::BadOpenOptions`
//! 1. Encoding/decoding into `[u8]`
//! 1. `LMDB` requires the user to resize the memory map resizing (see [`RuntimeError::ResizeNeeded`]
//! 1. `LMDB` has a maximum reader transaction count, currently it is set to `128`
//! 1. `LMDB` has [maximum key/value byte size](http://www.lmdb.tech/doc/group__internal.html#gac929399f5d93cef85f874b9e9b1d09e0) which must not be exceeded
//!
//! # Example
//! Simple usage of this crate.
//! # Examples
//! The below is an example of using `cuprate_database`'s
//! lowest API, i.e. using the database directly.
//!
//! For examples of the higher-level APIs, see:
//! - [`ops`]
//! - [`service`]
//!
//! ```rust
//! use cuprate_database::{
//! config::Config,
//! ConcreteEnv,
//! Env, Key, TxRo, TxRw,
//! service::{ReadRequest, WriteRequest, Response},
//! config::ConfigBuilder,
//! Env, EnvInner,
//! tables::{Tables, TablesMut},
//! DatabaseRo, DatabaseRw, TxRo, TxRw,
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Create a configuration for the database environment.
//! let db_dir = tempfile::tempdir().unwrap();
//! let config = Config::new(Some(db_dir.path().to_path_buf()));
//! let db_dir = tempfile::tempdir()?;
//! let config = ConfigBuilder::new()
//! .db_directory(db_dir.path().to_path_buf())
//! .build();
//!
//! // Initialize the database thread-pool.
//! // Initialize the database environment.
//! let env = ConcreteEnv::open(config)?;
//!
//! // TODO:
//! // 1. let (read_handle, write_handle) = cuprate_database::service::init(config).unwrap();
//! // 2. Send write/read requests
//! // 3. Use some other `Env` functions
//! // 4. Shutdown
//! // Open up a transaction + tables for writing.
//! let env_inner = env.env_inner();
//! let tx_rw = env_inner.tx_rw()?;
//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
//!
//! // ⚠️ Write data to the tables directly.
//! // (not recommended, use `ops` or `service`).
//! const KEY_IMAGE: [u8; 32] = [88; 32];
//! tables.key_images_mut().put(&KEY_IMAGE, &())?;
//!
//! // Commit the data written.
//! drop(tables);
//! TxRw::commit(tx_rw)?;
//!
//! // Read the data, assert it is correct.
//! let tx_ro = env_inner.tx_ro()?;
//! let tables = env_inner.open_tables(&tx_ro)?;
//! let (key_image, _) = tables.key_images().first()?;
//! assert_eq!(key_image, KEY_IMAGE);
//! # Ok(()) }
//! ```
//---------------------------------------------------------------------------------------------------- Lints
@ -136,7 +160,6 @@
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_doc_comments,
unused_labels,
keyword_idents,
non_ascii_idents,
@ -167,13 +190,13 @@
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow(unreachable_code, unused_variables, dead_code, unused_imports)] // TODO: remove
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
@ -184,8 +207,12 @@
// although it is sometimes nice.
clippy::must_use_candidate,
// TODO: should be removed after all `todo!()`'s are gone.
clippy::diverging_sub_expression,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::module_name_repetitions,
clippy::module_inception,
@ -194,7 +221,16 @@
)]
// Allow some lints when running in debug mode.
#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
)]
// Only allow building 64-bit targets.
//
// This allows us to assume 64-bit
@ -219,6 +255,7 @@ pub mod config;
mod constants;
pub use constants::{
DATABASE_BACKEND, DATABASE_CORRUPT_MSG, DATABASE_DATA_FILENAME, DATABASE_LOCK_FILENAME,
DATABASE_VERSION,
};
mod database;
@ -230,15 +267,13 @@ pub use env::{Env, EnvInner};
mod error;
pub use error::{InitError, RuntimeError};
mod free;
pub(crate) mod free;
pub mod resize;
mod key;
pub use key::Key;
mod macros;
mod storable;
pub use storable::{Storable, StorableBytes, StorableVec};
@ -259,3 +294,8 @@ pub use transaction::{TxRo, TxRw};
pub mod service;
//---------------------------------------------------------------------------------------------------- Private
#[cfg(test)]
pub(crate) mod tests;
#[cfg(feature = "service")] // only needed in `service` for now
pub(crate) mod unsafe_sendable;

View file

@ -1,17 +0,0 @@
//! General macros used throughout `cuprate-database`.
//---------------------------------------------------------------------------------------------------- Import
//---------------------------------------------------------------------------------------------------- Constants
//---------------------------------------------------------------------------------------------------- TYPE
//---------------------------------------------------------------------------------------------------- IMPL
//---------------------------------------------------------------------------------------------------- Trait Impl
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
// use super::*;
}

View file

@ -1,29 +0,0 @@
//! Alternative blocks.
//---------------------------------------------------------------------------------------------------- Import
//---------------------------------------------------------------------------------------------------- Free Functions
/// TODO
pub fn add_alt_block() {
todo!()
}
/// TODO
pub fn get_alt_block() {
todo!()
}
/// TODO
pub fn remove_alt_block() {
todo!()
}
/// TODO
pub fn get_alt_block_count() {
todo!()
}
/// TODO
pub fn drop_alt_blocks() {
todo!()
}

View file

@ -1,89 +1,472 @@
//! Blocks.
//! Blocks functions.
//---------------------------------------------------------------------------------------------------- Import
use bytemuck::TransparentWrapper;
use monero_serai::block::Block;
//---------------------------------------------------------------------------------------------------- Free Functions
/// TODO
pub fn add_block() {
todo!()
use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits};
use cuprate_types::{ExtendedBlockHeader, VerifiedBlockInformation};
use crate::{
database::{DatabaseRo, DatabaseRw},
error::RuntimeError,
ops::{
blockchain::{chain_height, cumulative_generated_coins},
macros::doc_error,
output::get_rct_num_outputs,
tx::{add_tx, remove_tx},
},
tables::{BlockHeights, BlockInfos, Tables, TablesMut},
types::{BlockHash, BlockHeight, BlockInfo},
StorableVec,
};
//---------------------------------------------------------------------------------------------------- `add_block_*`
/// Add a [`VerifiedBlockInformation`] to the database.
///
/// This extracts all the data from the input block and
/// maps/adds them to the appropriate database tables.
///
#[doc = doc_error!()]
///
/// # Panics
/// This function will panic if:
/// - `block.height > u32::MAX` (not normally possible)
/// - `block.height` is not != [`chain_height`]
///
/// # Already exists
/// This function will operate normally even if `block` already
/// exists, i.e., this function will not return `Err` even if you
/// call this function infinitely with the same block.
// no inline, too big.
pub fn add_block(
block: &VerifiedBlockInformation,
tables: &mut impl TablesMut,
) -> Result<(), RuntimeError> {
//------------------------------------------------------ Check preconditions first
// Cast height to `u32` for storage (handled at top of function).
// Panic (should never happen) instead of allowing DB corruption.
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1560020991>
assert!(
u32::try_from(block.height).is_ok(),
"block.height ({}) > u32::MAX",
block.height,
);
let chain_height = chain_height(tables.block_heights())?;
assert_eq!(
block.height, chain_height,
"block.height ({}) != chain_height ({})",
block.height, chain_height,
);
// Expensive checks - debug only.
#[cfg(debug_assertions)]
{
assert_eq!(block.block.serialize(), block.block_blob);
assert_eq!(block.block.txs.len(), block.txs.len());
for (i, tx) in block.txs.iter().enumerate() {
assert_eq!(tx.tx_blob, tx.tx.serialize());
assert_eq!(tx.tx_hash, block.block.txs[i]);
}
}
//------------------------------------------------------ Transaction / Outputs / Key Images
// Add the miner transaction first.
{
let tx = &block.block.miner_tx;
add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)?;
}
for tx in &block.txs {
add_tx(&tx.tx, &tx.tx_blob, &tx.tx_hash, &chain_height, tables)?;
}
//------------------------------------------------------ Block Info
// INVARIANT: must be below the above transaction loop since this
// RCT output count needs account for _this_ block's outputs.
let cumulative_rct_outs = get_rct_num_outputs(tables.rct_outputs())?;
let cumulative_generated_coins =
cumulative_generated_coins(&block.height.saturating_sub(1), tables.block_infos())?
+ block.generated_coins;
let (cumulative_difficulty_low, cumulative_difficulty_high) =
split_u128_into_low_high_bits(block.cumulative_difficulty);
// Block Info.
tables.block_infos_mut().put(
&block.height,
&BlockInfo {
cumulative_difficulty_low,
cumulative_difficulty_high,
cumulative_generated_coins,
cumulative_rct_outs,
timestamp: block.block.header.timestamp,
block_hash: block.block_hash,
// INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
weight: block.weight as u64,
long_term_weight: block.long_term_weight as u64,
},
)?;
// Block blobs.
tables
.block_blobs_mut()
.put(&block.height, StorableVec::wrap_ref(&block.block_blob))?;
// Block heights.
tables
.block_heights_mut()
.put(&block.block_hash, &block.height)?;
Ok(())
}
/// TODO
pub fn add_block_data() {
todo!()
//---------------------------------------------------------------------------------------------------- `pop_block`
/// Remove the top/latest block from the database.
///
/// The removed block's data is returned.
#[doc = doc_error!()]
///
/// In `pop_block()`'s case, [`RuntimeError::KeyNotFound`]
/// will be returned if there are no blocks left.
// no inline, too big
pub fn pop_block(
tables: &mut impl TablesMut,
) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> {
//------------------------------------------------------ Block Info
// Remove block data from tables.
let (block_height, block_hash) = {
let (block_height, block_info) = tables.block_infos_mut().pop_last()?;
(block_height, block_info.block_hash)
};
// Block heights.
tables.block_heights_mut().delete(&block_hash)?;
// Block blobs.
// We deserialize the block blob into a `Block`, such
// that we can remove the associated transactions later.
let block_blob = tables.block_blobs_mut().take(&block_height)?.0;
let block = Block::read(&mut block_blob.as_slice())?;
//------------------------------------------------------ Transaction / Outputs / Key Images
remove_tx(&block.miner_tx.hash(), tables)?;
for tx_hash in &block.txs {
remove_tx(tx_hash, tables)?;
}
Ok((block_height, block_hash, block))
}
/// TODO
pub fn pop_block() {
todo!()
//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*`
/// Retrieve a [`ExtendedBlockHeader`] from the database.
///
/// This extracts all the data from the database tables
/// needed to create a full `ExtendedBlockHeader`.
///
/// # Notes
/// This is slightly more expensive than [`get_block_extended_header_from_height`]
/// (1 more database lookup).
#[doc = doc_error!()]
#[inline]
pub fn get_block_extended_header(
block_hash: &BlockHash,
tables: &impl Tables,
) -> Result<ExtendedBlockHeader, RuntimeError> {
get_block_extended_header_from_height(&tables.block_heights().get(block_hash)?, tables)
}
/// TODO
pub fn block_exists() {
todo!()
/// Same as [`get_block_extended_header`] but with a [`BlockHeight`].
#[doc = doc_error!()]
#[inline]
pub fn get_block_extended_header_from_height(
block_height: &BlockHeight,
tables: &impl Tables,
) -> Result<ExtendedBlockHeader, RuntimeError> {
let block_info = tables.block_infos().get(block_height)?;
let block_blob = tables.block_blobs().get(block_height)?.0;
let block = Block::read(&mut block_blob.as_slice())?;
let cumulative_difficulty = combine_low_high_bits_to_u128(
block_info.cumulative_difficulty_low,
block_info.cumulative_difficulty_high,
);
// INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
#[allow(clippy::cast_possible_truncation)]
Ok(ExtendedBlockHeader {
cumulative_difficulty,
version: block.header.major_version,
vote: block.header.minor_version,
timestamp: block.header.timestamp,
block_weight: block_info.weight as usize,
long_term_weight: block_info.long_term_weight as usize,
})
}
/// TODO
pub fn get_block_hash() {
todo!()
/// Return the top/latest [`ExtendedBlockHeader`] from the database.
#[doc = doc_error!()]
#[inline]
pub fn get_block_extended_header_top(
tables: &impl Tables,
) -> Result<(ExtendedBlockHeader, BlockHeight), RuntimeError> {
let height = chain_height(tables.block_heights())?.saturating_sub(1);
let header = get_block_extended_header_from_height(&height, tables)?;
Ok((header, height))
}
/// TODO
pub fn get_block_height() {
todo!()
//---------------------------------------------------------------------------------------------------- Misc
/// Retrieve a [`BlockInfo`] via its [`BlockHeight`].
#[doc = doc_error!()]
#[inline]
pub fn get_block_info(
block_height: &BlockHeight,
table_block_infos: &impl DatabaseRo<BlockInfos>,
) -> Result<BlockInfo, RuntimeError> {
table_block_infos.get(block_height)
}
/// TODO
pub fn get_block_weight() {
todo!()
/// Retrieve a [`BlockHeight`] via its [`BlockHash`].
#[doc = doc_error!()]
#[inline]
pub fn get_block_height(
block_hash: &BlockHash,
table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<BlockHeight, RuntimeError> {
table_block_heights.get(block_hash)
}
/// TODO
pub fn get_block_already_generated_coins() {
todo!()
/// Check if a block exists in the database.
///
/// # Errors
/// Note that this will never return `Err(RuntimeError::KeyNotFound)`,
/// as in that case, `Ok(false)` will be returned.
///
/// Other errors may still occur.
#[inline]
pub fn block_exists(
block_hash: &BlockHash,
table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<bool, RuntimeError> {
table_block_heights.contains(block_hash)
}
/// TODO
pub fn get_block_long_term_weight() {
todo!()
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
#[allow(
clippy::significant_drop_tightening,
clippy::cognitive_complexity,
clippy::too_many_lines
)]
mod test {
use pretty_assertions::assert_eq;
/// TODO
pub fn get_block_timestamp() {
todo!()
}
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
/// TODO
pub fn get_block_cumulative_rct_outputs() {
todo!()
}
use super::*;
use crate::{
ops::tx::{get_tx, tx_exists},
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
transaction::TxRw,
Env, EnvInner,
};
/// TODO
pub fn get_block() {
todo!()
}
/// Tests all above block functions.
///
/// Note that this doesn't test the correctness of values added, as the
/// functions have a pre-condition that the caller handles this.
///
/// It simply tests if the proper tables are mutated, and if the data
/// stored and retrieved is the same.
#[test]
fn all_block_functions() {
let (env, _tmp) = tmp_concrete_env();
let env_inner = env.env_inner();
assert_all_tables_are_empty(&env);
/// TODO
pub fn get_block_from_height() {
todo!()
}
let mut blocks = [
block_v1_tx2().clone(),
block_v9_tx3().clone(),
block_v16_tx0().clone(),
];
// HACK: `add_block()` asserts blocks with non-sequential heights
// cannot be added, to get around this, manually edit the block height.
for (height, block) in blocks.iter_mut().enumerate() {
block.height = height as u64;
assert_eq!(block.block.serialize(), block.block_blob);
}
let generated_coins_sum = blocks
.iter()
.map(|block| block.generated_coins)
.sum::<u64>();
/// TODO
pub fn get_block_header() {
todo!()
}
// Add blocks.
{
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
/// TODO
pub fn get_block_header_from_height() {
todo!()
}
for block in &blocks {
// println!("add_block: {block:#?}");
add_block(block, &mut tables).unwrap();
}
/// TODO
pub fn get_top_block() {
todo!()
}
drop(tables);
TxRw::commit(tx_rw).unwrap();
}
/// TODO
pub fn get_top_block_hash() {
todo!()
// Assert all reads are OK.
let block_hashes = {
let tx_ro = env_inner.tx_ro().unwrap();
let tables = env_inner.open_tables(&tx_ro).unwrap();
// Assert only the proper tables were added to.
AssertTableLen {
block_infos: 3,
block_blobs: 3,
block_heights: 3,
key_images: 69,
num_outputs: 41,
pruned_tx_blobs: 0,
prunable_hashes: 0,
outputs: 111,
prunable_tx_blobs: 0,
rct_outputs: 8,
tx_blobs: 8,
tx_ids: 8,
tx_heights: 8,
tx_unlock_time: 3,
}
.assert(&tables);
// Check `cumulative` functions work.
assert_eq!(
cumulative_generated_coins(&2, tables.block_infos()).unwrap(),
generated_coins_sum,
);
// Both height and hash should result in getting the same data.
let mut block_hashes = vec![];
for block in &blocks {
println!("blocks.iter(): hash: {}", hex::encode(block.block_hash));
let height = get_block_height(&block.block_hash, tables.block_heights()).unwrap();
println!("blocks.iter(): height: {height}");
assert!(block_exists(&block.block_hash, tables.block_heights()).unwrap());
let block_header_from_height =
get_block_extended_header_from_height(&height, &tables).unwrap();
let block_header_from_hash =
get_block_extended_header(&block.block_hash, &tables).unwrap();
// Just an alias, these names are long.
let b1 = block_header_from_hash;
let b2 = block;
assert_eq!(b1, block_header_from_height);
assert_eq!(b1.version, b2.block.header.major_version);
assert_eq!(b1.vote, b2.block.header.minor_version);
assert_eq!(b1.timestamp, b2.block.header.timestamp);
assert_eq!(b1.cumulative_difficulty, b2.cumulative_difficulty);
assert_eq!(b1.block_weight, b2.weight);
assert_eq!(b1.long_term_weight, b2.long_term_weight);
block_hashes.push(block.block_hash);
// Assert transaction reads are OK.
for (i, tx) in block.txs.iter().enumerate() {
println!("tx_hash: {:?}", hex::encode(tx.tx_hash));
assert!(tx_exists(&tx.tx_hash, tables.tx_ids()).unwrap());
let tx2 = get_tx(&tx.tx_hash, tables.tx_ids(), tables.tx_blobs()).unwrap();
assert_eq!(tx.tx_blob, tx2.serialize());
assert_eq!(tx.tx_weight, tx2.weight());
assert_eq!(tx.tx_hash, block.block.txs[i]);
assert_eq!(tx.tx_hash, tx2.hash());
}
}
block_hashes
};
{
let len = block_hashes.len();
let hashes: Vec<String> = block_hashes.iter().map(hex::encode).collect();
println!("block_hashes: len: {len}, hashes: {hashes:?}");
}
// Remove the blocks.
{
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
for block_hash in block_hashes.into_iter().rev() {
println!("pop_block(): block_hash: {}", hex::encode(block_hash));
let (_popped_height, popped_hash, _popped_block) = pop_block(&mut tables).unwrap();
assert_eq!(block_hash, popped_hash);
assert!(matches!(
get_block_extended_header(&block_hash, &tables),
Err(RuntimeError::KeyNotFound)
));
}
drop(tables);
TxRw::commit(tx_rw).unwrap();
}
assert_all_tables_are_empty(&env);
}
/// We should panic if: `block.height` > `u32::MAX`
#[test]
#[should_panic(expected = "block.height (4294967296) > u32::MAX")]
fn block_height_gt_u32_max() {
let (env, _tmp) = tmp_concrete_env();
let env_inner = env.env_inner();
assert_all_tables_are_empty(&env);
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
let mut block = block_v9_tx3().clone();
block.height = u64::from(u32::MAX) + 1;
add_block(&block, &mut tables).unwrap();
}
/// We should panic if: `block.height` != the chain height
#[test]
#[should_panic(
expected = "assertion `left == right` failed: block.height (123) != chain_height (1)\n left: 123\n right: 1"
)]
fn block_height_not_chain_height() {
let (env, _tmp) = tmp_concrete_env();
let env_inner = env.env_inner();
assert_all_tables_are_empty(&env);
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
let mut block = block_v9_tx3().clone();
// HACK: `add_block()` asserts blocks with non-sequential heights
// cannot be added, to get around this, manually edit the block height.
block.height = 0;
// OK, `0 == 0`
assert_eq!(block.height, 0);
add_block(&block, &mut tables).unwrap();
// FAIL, `123 != 1`
block.height = 123;
add_block(&block, &mut tables).unwrap();
}
}

View file

@ -1,9 +1,182 @@
//! Blockchain.
//! Blockchain functions - chain height, generated coins, etc.
//---------------------------------------------------------------------------------------------------- Import
use crate::{
database::DatabaseRo,
error::RuntimeError,
ops::macros::doc_error,
tables::{BlockHeights, BlockInfos},
types::BlockHeight,
};
//---------------------------------------------------------------------------------------------------- Free Functions
/// TODO
pub fn height() {
todo!()
/// Retrieve the height of the chain.
///
/// This returns the chain-tip, not the [`top_block_height`].
///
/// For example:
/// - The blockchain has 0 blocks => this returns `0`
/// - The blockchain has 1 block (height 0) => this returns `1`
/// - The blockchain has 2 blocks (height 1) => this returns `2`
///
/// So the height of a new block would be `chain_height()`.
#[doc = doc_error!()]
#[inline]
pub fn chain_height(
table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<BlockHeight, RuntimeError> {
table_block_heights.len()
}
/// Retrieve the height of the top block.
///
/// This returns the height of the top block, not the [`chain_height`].
///
/// For example:
/// - The blockchain has 0 blocks => this returns `Err(RuntimeError::KeyNotFound)`
/// - The blockchain has 1 block (height 0) => this returns `Ok(0)`
/// - The blockchain has 2 blocks (height 1) => this returns `Ok(1)`
///
/// Note that in cases where no blocks have been written to the
/// database yet, an error is returned: `Err(RuntimeError::KeyNotFound)`.
///
#[doc = doc_error!()]
#[inline]
pub fn top_block_height(
table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<BlockHeight, RuntimeError> {
match table_block_heights.len()? {
0 => Err(RuntimeError::KeyNotFound),
height => Ok(height - 1),
}
}
/// Check how many cumulative generated coins there have been until a certain [`BlockHeight`].
///
/// This returns the total amount of Monero generated up to `block_height`
/// (including the block itself) in atomic units.
///
/// For example:
/// - on the genesis block `0`, this returns the amount block `0` generated
/// - on the next block `1`, this returns the amount block `0` and `1` generated
///
/// If no blocks have been added and `block_height == 0`
/// (i.e., the cumulative generated coins before genesis block is being calculated),
/// this returns `Ok(0)`.
#[doc = doc_error!()]
#[inline]
pub fn cumulative_generated_coins(
block_height: &BlockHeight,
table_block_infos: &impl DatabaseRo<BlockInfos>,
) -> Result<u64, RuntimeError> {
match table_block_infos.get(block_height) {
Ok(block_info) => Ok(block_info.cumulative_generated_coins),
Err(RuntimeError::KeyNotFound) if block_height == &0 => Ok(0),
Err(e) => Err(e),
}
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
use super::*;
use crate::{
ops::block::add_block,
tables::Tables,
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
transaction::TxRw,
Env, EnvInner,
};
/// Tests all above functions.
///
/// Note that this doesn't test the correctness of values added, as the
/// functions have a pre-condition that the caller handles this.
///
/// It simply tests if the proper tables are mutated, and if the data
/// stored and retrieved is the same.
#[test]
fn all_blockchain_functions() {
let (env, _tmp) = tmp_concrete_env();
let env_inner = env.env_inner();
assert_all_tables_are_empty(&env);
let mut blocks = [
block_v1_tx2().clone(),
block_v9_tx3().clone(),
block_v16_tx0().clone(),
];
let blocks_len = u64::try_from(blocks.len()).unwrap();
// Add blocks.
{
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
assert!(matches!(
top_block_height(tables.block_heights()),
Err(RuntimeError::KeyNotFound),
));
assert_eq!(
0,
cumulative_generated_coins(&0, tables.block_infos()).unwrap()
);
for (i, block) in blocks.iter_mut().enumerate() {
let i = u64::try_from(i).unwrap();
// HACK: `add_block()` asserts blocks with non-sequential heights
// cannot be added, to get around this, manually edit the block height.
block.height = i;
add_block(block, &mut tables).unwrap();
}
// Assert reads are correct.
AssertTableLen {
block_infos: 3,
block_blobs: 3,
block_heights: 3,
key_images: 69,
num_outputs: 41,
pruned_tx_blobs: 0,
prunable_hashes: 0,
outputs: 111,
prunable_tx_blobs: 0,
rct_outputs: 8,
tx_blobs: 8,
tx_ids: 8,
tx_heights: 8,
tx_unlock_time: 3,
}
.assert(&tables);
assert_eq!(blocks_len, chain_height(tables.block_heights()).unwrap());
assert_eq!(
blocks_len - 1,
top_block_height(tables.block_heights()).unwrap()
);
assert_eq!(
cumulative_generated_coins(&0, tables.block_infos()).unwrap(),
14_535_350_982_449,
);
assert_eq!(
cumulative_generated_coins(&1, tables.block_infos()).unwrap(),
17_939_125_004_612,
);
assert_eq!(
cumulative_generated_coins(&2, tables.block_infos()).unwrap(),
18_539_125_004_612,
);
assert!(matches!(
cumulative_generated_coins(&3, tables.block_infos()),
Err(RuntimeError::KeyNotFound),
));
drop(tables);
TxRw::commit(tx_rw).unwrap();
}
}
}

View file

@ -0,0 +1,127 @@
//! Key image functions.
//---------------------------------------------------------------------------------------------------- Import
use crate::{
database::{DatabaseRo, DatabaseRw},
error::RuntimeError,
ops::macros::{doc_add_block_inner_invariant, doc_error},
tables::KeyImages,
types::KeyImage,
};
//---------------------------------------------------------------------------------------------------- Key image functions
/// Add a [`KeyImage`] to the "spent" set in the database.
#[doc = doc_add_block_inner_invariant!()]
#[doc = doc_error!()]
#[inline]
pub fn add_key_image(
key_image: &KeyImage,
table_key_images: &mut impl DatabaseRw<KeyImages>,
) -> Result<(), RuntimeError> {
table_key_images.put(key_image, &())
}
/// Remove a [`KeyImage`] from the "spent" set in the database.
#[doc = doc_add_block_inner_invariant!()]
#[doc = doc_error!()]
#[inline]
pub fn remove_key_image(
key_image: &KeyImage,
table_key_images: &mut impl DatabaseRw<KeyImages>,
) -> Result<(), RuntimeError> {
table_key_images.delete(key_image)
}
/// Check if a [`KeyImage`] exists - i.e. if it is "spent".
#[doc = doc_error!()]
#[inline]
pub fn key_image_exists(
key_image: &KeyImage,
table_key_images: &impl DatabaseRo<KeyImages>,
) -> Result<bool, RuntimeError> {
table_key_images.contains(key_image)
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
use hex_literal::hex;
use super::*;
use crate::{
tables::{Tables, TablesMut},
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
transaction::TxRw,
Env, EnvInner,
};
/// Tests all above key-image functions.
///
/// Note that this doesn't test the correctness of values added, as the
/// functions have a pre-condition that the caller handles this.
///
/// It simply tests if the proper tables are mutated, and if the data
/// stored and retrieved is the same.
#[test]
fn all_key_image_functions() {
let (env, _tmp) = tmp_concrete_env();
let env_inner = env.env_inner();
assert_all_tables_are_empty(&env);
let key_images = [
hex!("be1c87fc8f958f68fbe346a18dfb314204dca7573f61aae14840b8037da5c286"),
hex!("c5e4a592c11f34a12e13516ab2883b7c580d47b286b8fe8b15d57d2a18ade275"),
hex!("93288b646f858edfb0997ae08d7c76f4599b04c127f108e8e69a0696ae7ba334"),
hex!("726e9e3d8f826d24811183f94ff53aeba766c9efe6274eb80806f69b06bfa3fc"),
];
// Add.
{
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
for key_image in &key_images {
println!("add_key_image(): {}", hex::encode(key_image));
add_key_image(key_image, tables.key_images_mut()).unwrap();
}
drop(tables);
TxRw::commit(tx_rw).unwrap();
}
// Assert all reads are OK.
{
let tx_ro = env_inner.tx_ro().unwrap();
let tables = env_inner.open_tables(&tx_ro).unwrap();
// Assert only the proper tables were added to.
AssertTableLen {
key_images: tables.key_images().len().unwrap(),
..Default::default()
}
.assert(&tables);
for key_image in &key_images {
println!("key_image_exists(): {}", hex::encode(key_image));
key_image_exists(key_image, tables.key_images()).unwrap();
}
}
// Remove.
{
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
for key_image in key_images {
println!("remove_key_image(): {}", hex::encode(key_image));
remove_key_image(&key_image, tables.key_images_mut()).unwrap();
assert!(!key_image_exists(&key_image, tables.key_images()).unwrap());
}
drop(tables);
TxRw::commit(tx_rw).unwrap();
}
assert_all_tables_are_empty(&env);
}
}

View file

@ -0,0 +1,33 @@
//! Macros.
//!
//! These generate repetitive documentation
//! for all the functions defined in `ops/`.
//---------------------------------------------------------------------------------------------------- Documentation macros
/// Generate documentation for the required `# Error` section.
macro_rules! doc_error {
() => {
r#"# Errors
This function returns [`RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."#
};
}
pub(super) use doc_error;
/// Generate `# Invariant` documentation for internal `fn`'s
/// that should be called directly with caution.
macro_rules! doc_add_block_inner_invariant {
() => {
r#"# ⚠️ Invariant ⚠️
This function mainly exists to be used internally by the parent function [`crate::ops::block::add_block`].
`add_block()` makes sure all data related to the input is mutated, while
this function _does not_, it specifically mutates _particular_ tables.
This is usually undesired - although this function is still available to call directly.
When calling this function, ensure that either:
1. This effect (incomplete database mutation) is what is desired, or that...
2. ...the other tables will also be mutated to a correct state"#
};
}
pub(super) use doc_add_block_inner_invariant;

View file

@ -4,18 +4,107 @@
//! traits in this crate to generically call Monero-related
//! database operations.
//!
//! # TODO
//! TODO: These functions should pretty much map 1-1 to the `Request` enum.
//! # `impl Table`
//! `ops/` functions take [`Tables`](crate::tables::Tables) and
//! [`TablesMut`](crate::tables::TablesMut) directly - these are
//! _already opened_ database tables.
//!
//! TODO: These are function names from `old_database/` for now.
//! The actual underlying functions (e.g `get()`) aren't implemented.
//! As such, the function puts the responsibility
//! of transactions, tables, etc on the caller.
//!
//! TODO: All of these functions need to take in generic
//! database trait parameters (and their actual inputs).
//! This does mean these functions are mostly as lean
//! as possible, so calling them in a loop should be okay.
//!
//! # Atomicity
//! As transactions are handled by the _caller_ of these functions,
//! it is up to the caller to decide what happens if one them return
//! an error.
//!
//! To maintain atomicity, transactions should be [`abort`](crate::transaction::TxRw::abort)ed
//! if one of the functions failed.
//!
//! For example, if [`add_block()`](block::add_block) is called and returns an [`Err`],
//! `abort`ing the transaction that opened the input `TableMut` would reverse all tables
//! mutated by `add_block()` up until the error, leaving it in the state it was in before
//! `add_block()` was called.
//!
//! # Sub-functions
//! The main functions within this module are mostly within the [`block`] module.
//!
//! Practically speaking, you should only be using 2 functions for mutation:
//! - [`add_block`](block::add_block)
//! - [`pop_block`](block::pop_block)
//!
//! The `block` functions are "parent" functions, calling other
//! sub-functions such as [`add_output()`](output::add_output).
//!
//! `add_output()` itself only modifies output-related tables, while the `block` "parent"
//! functions (like `add_block` and `pop_block`) modify all tables required.
//!
//! `add_block()` makes sure all data related to the input is mutated, while
//! this sub-function _do not_, it specifically mutates _particular_ tables.
//!
//! When calling this sub-functions, ensure that either:
//! 1. This effect (incomplete database mutation) is what is desired, or that...
//! 2. ...the other tables will also be mutated to a correct state
//!
//! # Example
//! Simple usage of `ops`.
//!
//! ```rust
//! use hex_literal::hex;
//!
//! use cuprate_test_utils::data::block_v16_tx0;
//!
//! use cuprate_database::{
//! ConcreteEnv,
//! config::ConfigBuilder,
//! Env, EnvInner,
//! tables::{Tables, TablesMut},
//! DatabaseRo, DatabaseRw, TxRo, TxRw,
//! ops::block::{add_block, pop_block},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Create a configuration for the database environment.
//! let db_dir = tempfile::tempdir()?;
//! let config = ConfigBuilder::new()
//! .db_directory(db_dir.path().to_path_buf())
//! .build();
//!
//! // Initialize the database environment.
//! let env = ConcreteEnv::open(config)?;
//!
//! // Open up a transaction + tables for writing.
//! let env_inner = env.env_inner();
//! let tx_rw = env_inner.tx_rw()?;
//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
//!
//! // Write a block to the database.
//! let mut block = block_v16_tx0().clone();
//! # block.height = 0;
//! add_block(&block, &mut tables)?;
//!
//! // Commit the data written.
//! drop(tables);
//! TxRw::commit(tx_rw)?;
//!
//! // Read the data, assert it is correct.
//! let tx_rw = env_inner.tx_rw()?;
//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
//! let (height, hash, serai_block) = pop_block(&mut tables)?;
//!
//! assert_eq!(height, 0);
//! assert_eq!(serai_block, block.block);
//! assert_eq!(hash, hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428"));
//! # Ok(()) }
//! ```
pub mod alt_block;
pub mod block;
pub mod blockchain;
pub mod key_image;
pub mod output;
pub mod property;
pub mod spent_key;
pub mod tx;
mod macros;

View file

@ -1,34 +1,371 @@
//! Outputs.
//! Output functions.
//---------------------------------------------------------------------------------------------------- Import
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
use monero_serai::{transaction::Timelock, H};
//---------------------------------------------------------------------------------------------------- Free Functions
/// TODO
pub fn add_output() {
todo!()
use cuprate_helper::map::u64_to_timelock;
use cuprate_types::OutputOnChain;
use crate::{
database::{DatabaseRo, DatabaseRw},
error::RuntimeError,
ops::macros::{doc_add_block_inner_invariant, doc_error},
tables::{Outputs, RctOutputs, Tables, TablesMut, TxUnlockTime},
types::{Amount, AmountIndex, Output, OutputFlags, PreRctOutputId, RctOutput},
};
//---------------------------------------------------------------------------------------------------- Pre-RCT Outputs
/// Add a Pre-RCT [`Output`] to the database.
///
/// Upon [`Ok`], this function returns the [`PreRctOutputId`] that
/// can be used to lookup the `Output` in [`get_output()`].
///
#[doc = doc_add_block_inner_invariant!()]
#[doc = doc_error!()]
#[inline]
pub fn add_output(
amount: Amount,
output: &Output,
tables: &mut impl TablesMut,
) -> Result<PreRctOutputId, RuntimeError> {
// FIXME: this would be much better expressed with a
// `btree_map::Entry`-like API, fix `trait DatabaseRw`.
let num_outputs = match tables.num_outputs().get(&amount) {
// Entry with `amount` already exists.
Ok(num_outputs) => num_outputs,
// Entry with `amount` didn't exist, this is
// the 1st output with this amount.
Err(RuntimeError::KeyNotFound) => 0,
Err(e) => return Err(e),
};
// Update the amount of outputs.
tables.num_outputs_mut().put(&amount, &(num_outputs + 1))?;
let pre_rct_output_id = PreRctOutputId {
amount,
// The new `amount_index` is the length of amount of outputs with same amount.
amount_index: num_outputs,
};
tables.outputs_mut().put(&pre_rct_output_id, output)?;
Ok(pre_rct_output_id)
}
/// TODO
pub fn remove_output() {
todo!()
/// Remove a Pre-RCT [`Output`] from the database.
#[doc = doc_add_block_inner_invariant!()]
#[doc = doc_error!()]
#[inline]
pub fn remove_output(
pre_rct_output_id: &PreRctOutputId,
tables: &mut impl TablesMut,
) -> Result<(), RuntimeError> {
// Decrement the amount index by 1, or delete the entry out-right.
// FIXME: this would be much better expressed with a
// `btree_map::Entry`-like API, fix `trait DatabaseRw`.
tables
.num_outputs_mut()
.update(&pre_rct_output_id.amount, |num_outputs| {
// INVARIANT: Should never be 0.
if num_outputs == 1 {
None
} else {
Some(num_outputs - 1)
}
})?;
// Delete the output data itself.
tables.outputs_mut().delete(pre_rct_output_id)
}
/// TODO
pub fn get_output() {
todo!()
/// Retrieve a Pre-RCT [`Output`] from the database.
#[doc = doc_error!()]
#[inline]
pub fn get_output(
pre_rct_output_id: &PreRctOutputId,
table_outputs: &impl DatabaseRo<Outputs>,
) -> Result<Output, RuntimeError> {
table_outputs.get(pre_rct_output_id)
}
/// TODO
pub fn get_output_list() {
todo!()
/// How many pre-RCT [`Output`]s are there?
///
/// This returns the amount of pre-RCT outputs currently stored.
#[doc = doc_error!()]
#[inline]
pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> Result<u64, RuntimeError> {
table_outputs.len()
}
/// TODO
pub fn get_rct_num_outputs() {
todo!()
//---------------------------------------------------------------------------------------------------- RCT Outputs
/// Add an [`RctOutput`] to the database.
///
/// Upon [`Ok`], this function returns the [`AmountIndex`] that
/// can be used to lookup the `RctOutput` in [`get_rct_output()`].
#[doc = doc_add_block_inner_invariant!()]
#[doc = doc_error!()]
#[inline]
pub fn add_rct_output(
rct_output: &RctOutput,
table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
) -> Result<AmountIndex, RuntimeError> {
let amount_index = get_rct_num_outputs(table_rct_outputs)?;
table_rct_outputs.put(&amount_index, rct_output)?;
Ok(amount_index)
}
/// TODO
pub fn get_pre_rct_num_outputs() {
todo!()
/// Remove an [`RctOutput`] from the database.
#[doc = doc_add_block_inner_invariant!()]
#[doc = doc_error!()]
#[inline]
pub fn remove_rct_output(
amount_index: &AmountIndex,
table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
) -> Result<(), RuntimeError> {
table_rct_outputs.delete(amount_index)
}
/// Retrieve an [`RctOutput`] from the database.
#[doc = doc_error!()]
#[inline]
pub fn get_rct_output(
amount_index: &AmountIndex,
table_rct_outputs: &impl DatabaseRo<RctOutputs>,
) -> Result<RctOutput, RuntimeError> {
table_rct_outputs.get(amount_index)
}
/// How many [`RctOutput`]s are there?
///
/// This returns the amount of RCT outputs currently stored.
#[doc = doc_error!()]
#[inline]
pub fn get_rct_num_outputs(
table_rct_outputs: &impl DatabaseRo<RctOutputs>,
) -> Result<u64, RuntimeError> {
table_rct_outputs.len()
}
//---------------------------------------------------------------------------------------------------- Mapping functions
/// Map an [`Output`] to a [`cuprate_types::OutputOnChain`].
#[doc = doc_error!()]
pub fn output_to_output_on_chain(
output: &Output,
amount: Amount,
table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
) -> Result<OutputOnChain, RuntimeError> {
// FIXME: implement lookup table for common values:
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
let commitment = ED25519_BASEPOINT_POINT + H() * Scalar::from(amount);
let time_lock = if output
.output_flags
.contains(OutputFlags::NON_ZERO_UNLOCK_TIME)
{
u64_to_timelock(table_tx_unlock_time.get(&output.tx_idx)?)
} else {
Timelock::None
};
let key = CompressedEdwardsY::from_slice(&output.key)
.map(|y| y.decompress())
.unwrap_or(None);
Ok(OutputOnChain {
height: u64::from(output.height),
time_lock,
key,
commitment,
})
}
/// Map an [`RctOutput`] to a [`cuprate_types::OutputOnChain`].
///
/// # Panics
/// This function will panic if `rct_output`'s `commitment` fails to decompress
/// into a valid [`EdwardsPoint`](curve25519_dalek::edwards::EdwardsPoint).
///
/// This should normally not happen as commitments that
/// are stored in the database should always be valid.
#[doc = doc_error!()]
pub fn rct_output_to_output_on_chain(
rct_output: &RctOutput,
table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
) -> Result<OutputOnChain, RuntimeError> {
// INVARIANT: Commitments stored are valid when stored by the database.
let commitment = CompressedEdwardsY::from_slice(&rct_output.commitment)
.unwrap()
.decompress()
.unwrap();
let time_lock = if rct_output
.output_flags
.contains(OutputFlags::NON_ZERO_UNLOCK_TIME)
{
u64_to_timelock(table_tx_unlock_time.get(&rct_output.tx_idx)?)
} else {
Timelock::None
};
let key = CompressedEdwardsY::from_slice(&rct_output.key)
.map(|y| y.decompress())
.unwrap_or(None);
Ok(OutputOnChain {
height: u64::from(rct_output.height),
time_lock,
key,
commitment,
})
}
/// Map an [`PreRctOutputId`] to an [`OutputOnChain`].
///
/// Note that this still support RCT outputs, in that case, [`PreRctOutputId::amount`] should be `0`.
#[doc = doc_error!()]
pub fn id_to_output_on_chain(
id: &PreRctOutputId,
tables: &impl Tables,
) -> Result<OutputOnChain, RuntimeError> {
// v2 transactions.
if id.amount == 0 {
let rct_output = get_rct_output(&id.amount_index, tables.rct_outputs())?;
let output_on_chain = rct_output_to_output_on_chain(&rct_output, tables.tx_unlock_time())?;
Ok(output_on_chain)
} else {
// v1 transactions.
let output = get_output(id, tables.outputs())?;
let output_on_chain =
output_to_output_on_chain(&output, id.amount, tables.tx_unlock_time())?;
Ok(output_on_chain)
}
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
use super::*;
use crate::{
tables::{Tables, TablesMut},
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
types::OutputFlags,
Env, EnvInner,
};
use pretty_assertions::assert_eq;
/// Dummy `Output`.
const OUTPUT: Output = Output {
key: [44; 32],
height: 0,
output_flags: OutputFlags::NON_ZERO_UNLOCK_TIME,
tx_idx: 0,
};
/// Dummy `RctOutput`.
const RCT_OUTPUT: RctOutput = RctOutput {
key: [88; 32],
height: 1,
output_flags: OutputFlags::empty(),
tx_idx: 1,
commitment: [100; 32],
};
/// Dummy `Amount`
const AMOUNT: Amount = 22;
/// Tests all above output functions when only inputting `Output` data (no Block).
///
/// Note that this doesn't test the correctness of values added, as the
/// functions have a pre-condition that the caller handles this.
///
/// It simply tests if the proper tables are mutated, and if the data
/// stored and retrieved is the same.
#[test]
fn all_output_functions() {
let (env, _tmp) = tmp_concrete_env();
let env_inner = env.env_inner();
assert_all_tables_are_empty(&env);
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
// Assert length is correct.
assert_eq!(get_num_outputs(tables.outputs()).unwrap(), 0);
assert_eq!(get_rct_num_outputs(tables.rct_outputs()).unwrap(), 0);
// Add outputs.
let pre_rct_output_id = add_output(AMOUNT, &OUTPUT, &mut tables).unwrap();
let amount_index = add_rct_output(&RCT_OUTPUT, tables.rct_outputs_mut()).unwrap();
assert_eq!(
pre_rct_output_id,
PreRctOutputId {
amount: AMOUNT,
amount_index: 0,
}
);
// Assert all reads of the outputs are OK.
{
// Assert proper tables were added to.
AssertTableLen {
block_infos: 0,
block_blobs: 0,
block_heights: 0,
key_images: 0,
num_outputs: 1,
pruned_tx_blobs: 0,
prunable_hashes: 0,
outputs: 1,
prunable_tx_blobs: 0,
rct_outputs: 1,
tx_blobs: 0,
tx_ids: 0,
tx_heights: 0,
tx_unlock_time: 0,
}
.assert(&tables);
// Assert length is correct.
assert_eq!(get_num_outputs(tables.outputs()).unwrap(), 1);
assert_eq!(get_rct_num_outputs(tables.rct_outputs()).unwrap(), 1);
assert_eq!(1, tables.num_outputs().get(&AMOUNT).unwrap());
// Assert value is save after retrieval.
assert_eq!(
OUTPUT,
get_output(&pre_rct_output_id, tables.outputs()).unwrap(),
);
assert_eq!(
RCT_OUTPUT,
get_rct_output(&amount_index, tables.rct_outputs()).unwrap(),
);
}
// Remove the outputs.
{
remove_output(&pre_rct_output_id, &mut tables).unwrap();
remove_rct_output(&amount_index, tables.rct_outputs_mut()).unwrap();
// Assert value no longer exists.
assert!(matches!(
get_output(&pre_rct_output_id, tables.outputs()),
Err(RuntimeError::KeyNotFound)
));
assert!(matches!(
get_rct_output(&amount_index, tables.rct_outputs()),
Err(RuntimeError::KeyNotFound)
));
// Assert length is correct.
assert_eq!(get_num_outputs(tables.outputs()).unwrap(), 0);
assert_eq!(get_rct_num_outputs(tables.rct_outputs()).unwrap(), 0);
}
assert_all_tables_are_empty(&env);
}
}

View file

@ -1,9 +1,39 @@
//! Properties.
//! Database properties functions - version, pruning, etc.
//!
//! SOMEDAY: the database `properties` table is not yet implemented.
//---------------------------------------------------------------------------------------------------- Import
use monero_pruning::PruningSeed;
use crate::{error::RuntimeError, ops::macros::doc_error};
//---------------------------------------------------------------------------------------------------- Free Functions
/// TODO
pub fn get_blockchain_pruning_seed() {
todo!()
/// SOMEDAY
///
#[doc = doc_error!()]
///
/// # Example
/// ```rust
/// # use cuprate_database::{*, tables::*, ops::block::*, ops::tx::*};
/// // SOMEDAY
/// ```
#[inline]
pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError> {
// SOMEDAY: impl pruning.
// We need a DB properties table.
Ok(PruningSeed::NotPruned)
}
/// SOMEDAY
///
#[doc = doc_error!()]
///
/// # Example
/// ```rust
/// # use cuprate_database::{*, tables::*, ops::block::*, ops::tx::*};
/// // SOMEDAY
/// ```
#[inline]
pub const fn db_version() -> Result<u64, RuntimeError> {
// SOMEDAY: We need a DB properties table.
Ok(crate::constants::DATABASE_VERSION)
}

View file

@ -1,19 +0,0 @@
//! Spent keys.
//---------------------------------------------------------------------------------------------------- Import
//---------------------------------------------------------------------------------------------------- Free Functions
/// TODO
pub fn add_spent_key() {
todo!()
}
/// TODO
pub fn remove_spent_key() {
todo!()
}
/// TODO
pub fn is_spent_key_recorded() {
todo!()
}

View file

@ -1,64 +1,434 @@
//! Transactions.
//! Transaction functions.
//---------------------------------------------------------------------------------------------------- Import
use bytemuck::TransparentWrapper;
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar};
use monero_serai::transaction::{Input, Timelock, Transaction};
//---------------------------------------------------------------------------------------------------- Free Functions
/// TODO
pub fn add_transaction() {
todo!()
use crate::{
database::{DatabaseRo, DatabaseRw},
error::RuntimeError,
ops::{
key_image::{add_key_image, remove_key_image},
macros::{doc_add_block_inner_invariant, doc_error},
output::{
add_output, add_rct_output, get_rct_num_outputs, remove_output, remove_rct_output,
},
},
tables::{TablesMut, TxBlobs, TxIds},
types::{BlockHeight, Output, OutputFlags, PreRctOutputId, RctOutput, TxHash, TxId},
StorableVec,
};
//---------------------------------------------------------------------------------------------------- Private
/// Add a [`Transaction`] (and related data) to the database.
///
/// The `block_height` is the block that this `tx` belongs to.
///
/// Note that the caller's input is trusted implicitly and no checks
/// are done (in this function) whether the `block_height` is correct or not.
///
#[doc = doc_add_block_inner_invariant!()]
///
/// # Notes
/// This function is different from other sub-functions and slightly more similar to
/// [`add_block()`](crate::ops::block::add_block) in that it calls other sub-functions.
///
/// This function calls:
/// - [`add_output()`]
/// - [`add_rct_output()`]
/// - [`add_key_image()`]
///
/// Thus, after [`add_tx`], those values (outputs and key images)
/// will be added to database tables as well.
///
/// # Panics
/// This function will panic if:
/// - `block.height > u32::MAX` (not normally possible)
#[doc = doc_error!()]
#[inline]
pub fn add_tx(
tx: &Transaction,
tx_blob: &Vec<u8>,
tx_hash: &TxHash,
block_height: &BlockHeight,
tables: &mut impl TablesMut,
) -> Result<TxId, RuntimeError> {
let tx_id = get_num_tx(tables.tx_ids_mut())?;
//------------------------------------------------------ Transaction data
tables.tx_ids_mut().put(tx_hash, &tx_id)?;
tables.tx_heights_mut().put(&tx_id, block_height)?;
tables
.tx_blobs_mut()
.put(&tx_id, StorableVec::wrap_ref(tx_blob))?;
//------------------------------------------------------ Timelocks
// Height/time is not differentiated via type, but rather:
// "height is any value less than 500_000_000 and timestamp is any value above"
// so the `u64/usize` is stored without any tag.
//
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1558504285>
match tx.prefix.timelock {
Timelock::None => (),
Timelock::Block(height) => tables.tx_unlock_time_mut().put(&tx_id, &(height as u64))?,
Timelock::Time(time) => tables.tx_unlock_time_mut().put(&tx_id, &time)?,
}
//------------------------------------------------------ Pruning
// SOMEDAY: implement pruning after `monero-serai` does.
// if let PruningSeed::Pruned(decompressed_pruning_seed) = get_blockchain_pruning_seed()? {
// SOMEDAY: what to store here? which table?
// }
//------------------------------------------------------
let Ok(height) = u32::try_from(*block_height) else {
panic!("add_tx(): block_height ({block_height}) > u32::MAX");
};
//------------------------------------------------------ Key Images
// Is this a miner transaction?
// Which table we add the output data to depends on this.
// <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/blockchain_db/blockchain_db.cpp#L212-L216>
let mut miner_tx = false;
// Key images.
for inputs in &tx.prefix.inputs {
match inputs {
// Key images.
Input::ToKey { key_image, .. } => {
add_key_image(key_image.compress().as_bytes(), tables.key_images_mut())?;
}
// This is a miner transaction, set it for later use.
Input::Gen(_) => miner_tx = true,
}
}
//------------------------------------------------------ Outputs
// Output bit flags.
// Set to a non-zero bit value if the unlock time is non-zero.
let output_flags = match tx.prefix.timelock {
Timelock::None => OutputFlags::empty(),
Timelock::Block(_) | Timelock::Time(_) => OutputFlags::NON_ZERO_UNLOCK_TIME,
};
let mut amount_indices = Vec::with_capacity(tx.prefix.outputs.len());
for (i, output) in tx.prefix.outputs.iter().enumerate() {
let key = *output.key.as_bytes();
// Outputs with clear amounts.
let amount_index = if let Some(amount) = output.amount {
// RingCT (v2 transaction) miner outputs.
if miner_tx && tx.prefix.version == 2 {
// Create commitment.
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1559489302>
// FIXME: implement lookup table for common values:
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
let commitment = (ED25519_BASEPOINT_POINT
+ monero_serai::H() * Scalar::from(amount))
.compress()
.to_bytes();
add_rct_output(
&RctOutput {
key,
height,
output_flags,
tx_idx: tx_id,
commitment,
},
tables.rct_outputs_mut(),
)?
// Pre-RingCT outputs.
} else {
add_output(
amount,
&Output {
key,
height,
output_flags,
tx_idx: tx_id,
},
tables,
)?
.amount_index
}
// RingCT outputs.
} else {
let commitment = tx.rct_signatures.base.commitments[i].compress().to_bytes();
add_rct_output(
&RctOutput {
key,
height,
output_flags,
tx_idx: tx_id,
commitment,
},
tables.rct_outputs_mut(),
)?
};
amount_indices.push(amount_index);
} // for each output
tables
.tx_outputs_mut()
.put(&tx_id, &StorableVec(amount_indices))?;
Ok(tx_id)
}
/// TODO
pub fn add_transaction_data() {
todo!()
/// Remove a transaction from the database with its [`TxHash`].
///
/// This returns the [`TxId`] and [`TxBlob`](crate::types::TxBlob) of the removed transaction.
///
#[doc = doc_add_block_inner_invariant!()]
///
/// # Notes
/// As mentioned in [`add_tx`], this function will call other sub-functions:
/// - [`remove_output()`]
/// - [`remove_rct_output()`]
/// - [`remove_key_image()`]
///
/// Thus, after [`remove_tx`], those values (outputs and key images)
/// will be remove from database tables as well.
///
#[doc = doc_error!()]
#[inline]
pub fn remove_tx(
tx_hash: &TxHash,
tables: &mut impl TablesMut,
) -> Result<(TxId, Transaction), RuntimeError> {
//------------------------------------------------------ Transaction data
let tx_id = tables.tx_ids_mut().take(tx_hash)?;
let tx_blob = tables.tx_blobs_mut().take(&tx_id)?;
tables.tx_heights_mut().delete(&tx_id)?;
tables.tx_outputs_mut().delete(&tx_id)?;
//------------------------------------------------------ Pruning
// SOMEDAY: implement pruning after `monero-serai` does.
// table_prunable_hashes.delete(&tx_id)?;
// table_prunable_tx_blobs.delete(&tx_id)?;
// if let PruningSeed::Pruned(decompressed_pruning_seed) = get_blockchain_pruning_seed()? {
// SOMEDAY: what to remove here? which table?
// }
//------------------------------------------------------ Unlock Time
match tables.tx_unlock_time_mut().delete(&tx_id) {
Ok(()) | Err(RuntimeError::KeyNotFound) => (),
// An actual error occurred, return.
Err(e) => return Err(e),
}
//------------------------------------------------------
// Refer to the inner transaction type from now on.
let tx = Transaction::read(&mut tx_blob.0.as_slice())?;
//------------------------------------------------------ Key Images
// Is this a miner transaction?
let mut miner_tx = false;
for inputs in &tx.prefix.inputs {
match inputs {
// Key images.
Input::ToKey { key_image, .. } => {
remove_key_image(key_image.compress().as_bytes(), tables.key_images_mut())?;
}
// This is a miner transaction, set it for later use.
Input::Gen(_) => miner_tx = true,
}
} // for each input
//------------------------------------------------------ Outputs
// Remove each output in the transaction.
for output in &tx.prefix.outputs {
// Outputs with clear amounts.
if let Some(amount) = output.amount {
// RingCT miner outputs.
if miner_tx && tx.prefix.version == 2 {
let amount_index = get_rct_num_outputs(tables.rct_outputs())? - 1;
remove_rct_output(&amount_index, tables.rct_outputs_mut())?;
// Pre-RingCT outputs.
} else {
let amount_index = tables.num_outputs_mut().get(&amount)? - 1;
remove_output(
&PreRctOutputId {
amount,
amount_index,
},
tables,
)?;
}
// RingCT outputs.
} else {
let amount_index = get_rct_num_outputs(tables.rct_outputs())? - 1;
remove_rct_output(&amount_index, tables.rct_outputs_mut())?;
}
} // for each output
Ok((tx_id, tx))
}
/// TODO
pub fn remove_transaction() {
todo!()
//---------------------------------------------------------------------------------------------------- `get_tx_*`
/// Retrieve a [`Transaction`] from the database with its [`TxHash`].
#[doc = doc_error!()]
#[inline]
pub fn get_tx(
tx_hash: &TxHash,
table_tx_ids: &impl DatabaseRo<TxIds>,
table_tx_blobs: &impl DatabaseRo<TxBlobs>,
) -> Result<Transaction, RuntimeError> {
get_tx_from_id(&table_tx_ids.get(tx_hash)?, table_tx_blobs)
}
/// TODO
pub fn remove_transaction_data() {
todo!()
/// Retrieve a [`Transaction`] from the database with its [`TxId`].
#[doc = doc_error!()]
#[inline]
pub fn get_tx_from_id(
tx_id: &TxId,
table_tx_blobs: &impl DatabaseRo<TxBlobs>,
) -> Result<Transaction, RuntimeError> {
let tx_blob = table_tx_blobs.get(tx_id)?.0;
Ok(Transaction::read(&mut tx_blob.as_slice())?)
}
/// TODO
pub fn remove_tx_outputs() {
todo!()
//----------------------------------------------------------------------------------------------------
/// How many [`Transaction`]s are there?
///
/// This returns the amount of transactions currently stored.
///
/// For example:
/// - 0 transactions exist => returns 0
/// - 1 transactions exist => returns 1
/// - 5 transactions exist => returns 5
/// - etc
#[doc = doc_error!()]
#[inline]
pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> Result<u64, RuntimeError> {
table_tx_ids.len()
}
/// TODO
pub fn get_num_tx() {
todo!()
//----------------------------------------------------------------------------------------------------
/// Check if a transaction exists in the database.
///
/// Returns `true` if it does, else `false`.
#[doc = doc_error!()]
#[inline]
pub fn tx_exists(
tx_hash: &TxHash,
table_tx_ids: &impl DatabaseRo<TxIds>,
) -> Result<bool, RuntimeError> {
table_tx_ids.contains(tx_hash)
}
/// TODO
pub fn tx_exists() {
todo!()
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
use super::*;
use crate::{
tables::Tables,
tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
transaction::TxRw,
Env, EnvInner,
};
use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3};
use pretty_assertions::assert_eq;
/// TODO
pub fn get_tx_unlock_time() {
todo!()
}
/// Tests all above tx functions when only inputting `Transaction` data (no Block).
#[test]
fn all_tx_functions() {
let (env, _tmp) = tmp_concrete_env();
let env_inner = env.env_inner();
assert_all_tables_are_empty(&env);
/// TODO
pub fn get_tx() {
todo!()
}
// Monero `Transaction`, not database tx.
let txs = [tx_v1_sig0(), tx_v1_sig2(), tx_v2_rct3()];
/// TODO
pub fn get_tx_list() {
todo!()
}
// Add transactions.
let tx_ids = {
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
/// TODO
pub fn get_pruned_tx() {
todo!()
}
let tx_ids = txs
.iter()
.map(|tx| {
println!("add_tx(): {tx:#?}");
add_tx(&tx.tx, &tx.tx_blob, &tx.tx_hash, &0, &mut tables).unwrap()
})
.collect::<Vec<TxId>>();
/// TODO
pub fn get_tx_block_height() {
todo!()
drop(tables);
TxRw::commit(tx_rw).unwrap();
tx_ids
};
// Assert all reads of the transactions are OK.
let tx_hashes = {
let tx_ro = env_inner.tx_ro().unwrap();
let tables = env_inner.open_tables(&tx_ro).unwrap();
// Assert only the proper tables were added to.
AssertTableLen {
block_infos: 0,
block_blobs: 0,
block_heights: 0,
key_images: 4, // added to key images
pruned_tx_blobs: 0,
prunable_hashes: 0,
num_outputs: 9,
outputs: 10, // added to outputs
prunable_tx_blobs: 0,
rct_outputs: 2,
tx_blobs: 3,
tx_ids: 3,
tx_heights: 3,
tx_unlock_time: 1, // only 1 has a timelock
}
.assert(&tables);
// Both from ID and hash should result in getting the same transaction.
let mut tx_hashes = vec![];
for (i, tx_id) in tx_ids.iter().enumerate() {
println!("tx_ids.iter(): i: {i}, tx_id: {tx_id}");
let tx_get_from_id = get_tx_from_id(tx_id, tables.tx_blobs()).unwrap();
let tx_hash = tx_get_from_id.hash();
let tx_get = get_tx(&tx_hash, tables.tx_ids(), tables.tx_blobs()).unwrap();
println!("tx_ids.iter(): tx_get_from_id: {tx_get_from_id:#?}, tx_get: {tx_get:#?}");
assert_eq!(tx_get_from_id.hash(), tx_get.hash());
assert_eq!(tx_get_from_id.hash(), txs[i].tx_hash);
assert_eq!(tx_get_from_id, tx_get);
assert_eq!(tx_get, txs[i].tx);
assert!(tx_exists(&tx_hash, tables.tx_ids()).unwrap());
tx_hashes.push(tx_hash);
}
tx_hashes
};
// Remove the transactions.
{
let tx_rw = env_inner.tx_rw().unwrap();
let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
for tx_hash in tx_hashes {
println!("remove_tx(): tx_hash: {tx_hash:?}");
let (tx_id, _) = remove_tx(&tx_hash, &mut tables).unwrap();
assert!(matches!(
get_tx_from_id(&tx_id, tables.tx_blobs()),
Err(RuntimeError::KeyNotFound)
));
}
drop(tables);
TxRw::commit(tx_rw).unwrap();
}
assert_all_tables_are_empty(&env);
}
}

View file

@ -1,7 +1,7 @@
//! Database memory map resizing algorithms.
//!
//! This modules contains [`ResizeAlgorithm`] which determines how the
//! [`ConcreteEnv`](crate::ConcreteEnv) resizes it's memory map when needing more space.
//! [`ConcreteEnv`](crate::ConcreteEnv) resizes its memory map when needing more space.
//! This value is in [`Config`](crate::config::Config) and can be selected at runtime.
//!
//! Although, it is only used by `ConcreteEnv` if [`Env::MANUAL_RESIZE`](crate::env::Env::MANUAL_RESIZE) is `true`.
@ -27,12 +27,12 @@ use std::{num::NonZeroUsize, sync::OnceLock};
/// The function/algorithm used by the
/// database when resizing the memory map.
///
/// # TODO
/// We could test around with different algorithms.
/// Calling `heed::Env::resize` is surprisingly fast,
/// around `0.0000082s` on my machine. We could probably
/// get away with smaller and more frequent resizes.
/// **With the caveat being we are taking a `WriteGuard` to a `RwLock`.**
// # SOMEDAY
// We could test around with different algorithms.
// Calling `heed::Env::resize` is surprisingly fast,
// around `0.0000082s` on my machine. We could probably
// get away with smaller and more frequent resizes.
// **With the caveat being we are taking a `WriteGuard` to a `RwLock`.**
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum ResizeAlgorithm {
@ -59,6 +59,8 @@ impl ResizeAlgorithm {
}
/// Maps the `self` variant to the free functions in [`crate::resize`].
///
/// This function returns the _new_ memory map size in bytes.
#[inline]
pub fn resize(&self, current_size_bytes: usize) -> NonZeroUsize {
match self {

View file

@ -6,7 +6,7 @@ use std::sync::Arc;
use crate::{
config::Config,
error::InitError,
service::{write::DatabaseWriter, DatabaseReadHandle, DatabaseWriteHandle},
service::{DatabaseReadHandle, DatabaseWriteHandle},
ConcreteEnv, Env,
};
@ -20,21 +20,11 @@ use crate::{
///
/// # Errors
/// This will forward the error if [`Env::open`] failed.
//
// INVARIANT:
// `cuprate_database` depends on the fact that this is the only
// function that hands out the handles. After that, they can be
// cloned, however they must eventually be dropped and shouldn't
// be leaked.
//
// As the reader thread-pool and writer thread both rely on the
// disconnection (drop) of these channels for shutdown behavior,
// leaking these handles could cause data to not get flushed to disk.
pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle), InitError> {
let reader_threads = config.reader_threads;
// Initialize the database itself.
let db: Arc<ConcreteEnv> = Arc::new(ConcreteEnv::open(config)?);
let db = Arc::new(ConcreteEnv::open(config)?);
// Spawn the Reader thread pool and Writer.
let readers = DatabaseReadHandle::init(&db, reader_threads);

View file

@ -5,7 +5,7 @@
//! along with the reader/writer thread-pool system.
//!
//! The thread-pool allows outside crates to communicate with it by
//! sending database [`Request`](ReadRequest)s and receiving [`Response`]s `async`hronously -
//! sending database [`Request`][req_r]s and receiving [`Response`][resp]s `async`hronously -
//! without having to actually worry and handle the database themselves.
//!
//! The system is managed by this crate, and only requires [`init`] by the user.
@ -17,9 +17,9 @@
//! - [`DatabaseReadHandle`]
//! - [`DatabaseWriteHandle`]
//!
//! The 1st allows any caller to send [`ReadRequest`]s.
//! The 1st allows any caller to send [`ReadRequest`][req_r]s.
//!
//! The 2nd allows any caller to send [`WriteRequest`]s.
//! The 2nd allows any caller to send [`WriteRequest`][req_w]s.
//!
//! The `DatabaseReadHandle` can be shared as it is cheaply [`Clone`]able, however,
//! the `DatabaseWriteHandle` cannot be cloned. There is only 1 place in Cuprate that
@ -49,6 +49,70 @@
//! An `async`hronous channel will be returned from the call.
//! This channel can be `.await`ed upon to (eventually) receive
//! the corresponding `Response` to your `Request`.
//!
//! [req_r]: cuprate_types::service::ReadRequest
//!
//! [req_w]: cuprate_types::service::WriteRequest
//!
//! [resp]: cuprate_types::service::Response
//!
//! # Example
//! Simple usage of `service`.
//!
//! ```rust
//! use hex_literal::hex;
//! use tower::{Service, ServiceExt};
//!
//! use cuprate_types::service::{ReadRequest, WriteRequest, Response};
//! use cuprate_test_utils::data::block_v16_tx0;
//!
//! use cuprate_database::{ConcreteEnv, config::ConfigBuilder, Env};
//!
//! # #[tokio::main]
//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Create a configuration for the database environment.
//! let db_dir = tempfile::tempdir()?;
//! let config = ConfigBuilder::new()
//! .db_directory(db_dir.path().to_path_buf())
//! .build();
//!
//! // Initialize the database thread-pool.
//! let (mut read_handle, mut write_handle) = cuprate_database::service::init(config)?;
//!
//! // Prepare a request to write block.
//! let mut block = block_v16_tx0().clone();
//! # block.height = 0 as u64; // must be 0th height or panic in `add_block()`
//! let request = WriteRequest::WriteBlock(block);
//!
//! // Send the request.
//! // We receive back an `async` channel that will
//! // eventually yield the result when `service`
//! // is done writing the block.
//! let response_channel = write_handle.ready().await?.call(request);
//!
//! // Block write was OK.
//! let response = response_channel.await?;
//! assert_eq!(response, Response::WriteBlockOk);
//!
//! // Now, let's try getting the block hash
//! // of the block we just wrote.
//! let request = ReadRequest::BlockHash(0);
//! let response_channel = read_handle.ready().await?.call(request);
//! let response = response_channel.await?;
//! assert_eq!(
//! response,
//! Response::BlockHash(
//! hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428")
//! )
//! );
//!
//! // This causes the writer thread on the
//! // other side of this handle to exit...
//! drop(write_handle);
//! // ...and this causes the reader thread-pool to exit.
//! drop(read_handle);
//! # Ok(()) }
//! ```
mod read;
pub use read::DatabaseReadHandle;
@ -59,11 +123,8 @@ pub use write::DatabaseWriteHandle;
mod free;
pub use free::init;
mod request;
pub use request::{ReadRequest, WriteRequest};
mod response;
pub use response::Response;
// Internal type aliases for `service`.
mod types;
#[cfg(test)]
mod tests;

View file

@ -2,47 +2,38 @@
//---------------------------------------------------------------------------------------------------- Import
use std::{
collections::{HashMap, HashSet},
sync::Arc,
task::{Context, Poll},
};
use crossbeam::channel::Receiver;
use futures::{channel::oneshot, ready};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use thread_local::ThreadLocal;
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
use tokio_util::sync::PollSemaphore;
use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::{
service::{ReadRequest, Response},
ExtendedBlockHeader, OutputOnChain,
};
use crate::{
config::ReaderThreads,
error::RuntimeError,
service::{request::ReadRequest, response::Response},
ConcreteEnv,
ops::{
block::{get_block_extended_header_from_height, get_block_info},
blockchain::{cumulative_generated_coins, top_block_height},
key_image::key_image_exists,
output::id_to_output_on_chain,
},
service::types::{ResponseReceiver, ResponseResult, ResponseSender},
tables::{BlockHeights, BlockInfos, Tables},
types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId},
ConcreteEnv, DatabaseRo, Env, EnvInner,
};
//---------------------------------------------------------------------------------------------------- Types
/// The actual type of the response.
///
/// Either our [`Response`], or a database error occurred.
type ResponseResult = Result<Response, RuntimeError>;
/// The `Receiver` channel that receives the read response.
///
/// This is owned by the caller (the reader)
/// who `.await`'s for the response.
///
/// The channel itself should never fail,
/// but the actual database operation might.
type ResponseReceiver = InfallibleOneshotReceiver<ResponseResult>;
/// The `Sender` channel for the response.
///
/// The database reader thread uses this to send
/// the database result to the caller.
type ResponseSender = oneshot::Sender<ResponseResult>;
//---------------------------------------------------------------------------------------------------- DatabaseReadHandle
/// Read handle to the database.
///
@ -82,10 +73,10 @@ pub struct DatabaseReadHandle {
impl Clone for DatabaseReadHandle {
fn clone(&self) -> Self {
Self {
pool: self.pool.clone(),
pool: Arc::clone(&self.pool),
semaphore: self.semaphore.clone(),
permit: None,
env: self.env.clone(),
env: Arc::clone(&self.env),
}
}
}
@ -123,23 +114,21 @@ impl DatabaseReadHandle {
}
}
/// TODO
/// Access to the actual database environment.
///
/// # ⚠️ Warning
/// This function gives you access to the actual
/// underlying database connected to by `self`.
///
/// I.e. it allows you to read/write data _directly_
/// instead of going through a request.
///
/// Be warned that using the database directly
/// in this manner has not been tested.
#[inline]
pub const fn env(&self) -> &Arc<ConcreteEnv> {
&self.env
}
/// TODO
#[inline]
pub const fn semaphore(&self) -> &PollSemaphore {
&self.semaphore
}
/// TODO
#[inline]
pub const fn permit(&self) -> &Option<OwnedSemaphorePermit> {
&self.permit
}
}
impl tower::Service<ReadRequest> for DatabaseReadHandle {
@ -155,15 +144,14 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
}
// Acquire a permit before returning `Ready`.
let Some(permit) = ready!(self.semaphore.poll_acquire(cx)) else {
// `self` itself owns the backing semaphore, so it can't be closed.
unreachable!();
};
let permit =
ready!(self.semaphore.poll_acquire(cx)).expect("this semaphore is never closed");
self.permit = Some(permit);
Poll::Ready(Ok(()))
}
#[inline]
fn call(&mut self, request: ReadRequest) -> Self::Future {
let permit = self
.permit
@ -181,9 +169,11 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
//
// INVARIANT:
// The below `DatabaseReader` function impl block relies on this behavior.
let env = Arc::clone(self.env());
self.pool
.spawn(move || map_request(permit, env, request, response_sender));
let env = Arc::clone(&self.env);
self.pool.spawn(move || {
let _permit: OwnedSemaphorePermit = permit;
map_request(&env, request, response_sender);
}); // drop(permit/env);
InfallibleOneshotReceiver::from(receiver)
}
@ -193,36 +183,98 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
// This function maps [`Request`]s to function calls
// executed by the rayon DB reader threadpool.
#[inline]
#[allow(clippy::needless_pass_by_value)]
/// Map [`Request`]'s to specific database handler functions.
///
/// This is the main entrance into all `Request` handler functions.
/// The basic structure is:
///
/// 1. `Request` is mapped to a handler function
/// 2. Handler function is called
/// 3. [`Response`] is sent
fn map_request(
_permit: OwnedSemaphorePermit, // Permit for this request
env: Arc<ConcreteEnv>, // Access to the database
env: &ConcreteEnv, // Access to the database
request: ReadRequest, // The request we must fulfill
response_sender: ResponseSender, // The channel we must send the response back to
) {
/* TODO: pre-request handling, run some code for each request? */
use ReadRequest as R;
match request {
ReadRequest::Example1 => example_handler_1(env, response_sender),
ReadRequest::Example2(x) => example_handler_2(env, response_sender, x),
ReadRequest::Example3(x) => example_handler_3(env, response_sender, x),
/* SOMEDAY: pre-request handling, run some code for each request? */
let response = match request {
R::BlockExtendedHeader(block) => block_extended_header(env, block),
R::BlockHash(block) => block_hash(env, block),
R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range),
R::ChainHeight => chain_height(env),
R::GeneratedCoins => generated_coins(env),
R::Outputs(map) => outputs(env, map),
R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec),
R::CheckKIsNotSpent(set) => check_k_is_not_spent(env, set),
};
if let Err(e) = response_sender.send(response) {
// TODO: use tracing.
println!("database reader failed to send response: {e:?}");
}
/* TODO: post-request handling, run some code for each request? */
/* SOMEDAY: post-request handling, run some code for each request? */
}
//---------------------------------------------------------------------------------------------------- Thread Local
/// Q: Why does this exist?
///
/// A1: `heed`'s transactions and tables are not `Sync`, so we cannot use
/// them with rayon, however, we set a feature such that they are `Send`.
///
/// A2: When sending to rayon, we want to ensure each read transaction
/// is only being used by 1 thread only to scale reads
///
/// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576762346>
#[inline]
fn thread_local<T: Send>(env: &impl Env) -> ThreadLocal<T> {
ThreadLocal::with_capacity(env.config().reader_threads.as_threads().get())
}
/// Take in a `ThreadLocal<impl Tables>` and return an `&impl Tables + Send`.
///
/// # Safety
/// See [`DatabaseRo`] docs.
///
/// We are safely using `UnsafeSendable` in `service`'s reader thread-pool
/// as we are pairing our usage with `ThreadLocal` - only 1 thread
/// will ever access a transaction at a time. This is an INVARIANT.
///
/// A `Mutex` was considered but:
/// - It is less performant
/// - It isn't technically needed for safety in our use-case
/// - It causes `DatabaseIter` function return issues as there is a `MutexGuard` object
///
/// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1581684698>
///
/// # Notes
/// This is used for other backends as well instead of branching with `cfg_if`.
/// The other backends (as of current) are `Send + Sync` so this is fine.
/// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1585618374>
macro_rules! get_tables {
($env_inner:ident, $tx_ro:ident, $tables:ident) => {{
$tables.get_or_try(|| {
#[allow(clippy::significant_drop_in_scrutinee)]
match $env_inner.open_tables($tx_ro) {
// SAFETY: see above macro doc comment.
Ok(tables) => Ok(unsafe { crate::unsafe_sendable::UnsafeSendable::new(tables) }),
Err(e) => Err(e),
}
})
}};
}
//---------------------------------------------------------------------------------------------------- Handler functions
// These are the actual functions that do stuff according to the incoming [`Request`].
//
// Each function name is a 1-1 mapping (from CamelCase -> snake_case) to
// the enum variant name, e.g: `BlockExtendedHeader` -> `block_extended_header`.
//
// Each function will return the [`Response`] that we
// should send back to the caller in [`map_request()`].
//
// INVARIANT:
// These functions are called above in `tower::Service::call()`
// using a custom threadpool which means any call to `par_*()` functions
@ -231,26 +283,211 @@ fn map_request(
// All functions below assume that this is the case, such that
// `par_*()` functions will not block the _global_ rayon thread-pool.
/// TODO
// FIXME: implement multi-transaction read atomicity.
// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576874589>.
/// [`ReadRequest::BlockExtendedHeader`].
#[inline]
#[allow(clippy::needless_pass_by_value)] // TODO: remove me
fn example_handler_1(env: Arc<ConcreteEnv>, response_sender: ResponseSender) {
let db_result = Ok(Response::Example1);
response_sender.send(db_result).unwrap();
fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let tables = env_inner.open_tables(&tx_ro)?;
Ok(Response::BlockExtendedHeader(
get_block_extended_header_from_height(&block_height, &tables)?,
))
}
/// TODO
/// [`ReadRequest::BlockHash`].
#[inline]
#[allow(clippy::needless_pass_by_value)] // TODO: remove me
fn example_handler_2(env: Arc<ConcreteEnv>, response_sender: ResponseSender, x: usize) {
let db_result = Ok(Response::Example2(x));
response_sender.send(db_result).unwrap();
fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
Ok(Response::BlockHash(
get_block_info(&block_height, &table_block_infos)?.block_hash,
))
}
/// TODO
/// [`ReadRequest::BlockExtendedHeaderInRange`].
#[inline]
#[allow(clippy::needless_pass_by_value)] // TODO: remove me
fn example_handler_3(env: Arc<ConcreteEnv>, response_sender: ResponseSender, x: String) {
let db_result = Ok(Response::Example3(x));
response_sender.send(db_result).unwrap();
fn block_extended_header_in_range(
env: &ConcreteEnv,
range: std::ops::Range<BlockHeight>,
) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
let env_inner = env.env_inner();
let tx_ro = thread_local(env);
let tables = thread_local(env);
// Collect results using `rayon`.
let vec = range
.into_par_iter()
.map(|block_height| {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
get_block_extended_header_from_height(&block_height, tables)
})
.collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?;
Ok(Response::BlockExtendedHeaderInRange(vec))
}
/// [`ReadRequest::ChainHeight`].
#[inline]
fn chain_height(env: &ConcreteEnv) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
let chain_height = crate::ops::blockchain::chain_height(&table_block_heights)?;
let block_hash =
get_block_info(&chain_height.saturating_sub(1), &table_block_infos)?.block_hash;
Ok(Response::ChainHeight(chain_height, block_hash))
}
/// [`ReadRequest::GeneratedCoins`].
#[inline]
fn generated_coins(env: &ConcreteEnv) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
let top_height = top_block_height(&table_block_heights)?;
Ok(Response::GeneratedCoins(cumulative_generated_coins(
&top_height,
&table_block_infos,
)?))
}
/// [`ReadRequest::Outputs`].
#[inline]
fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
let env_inner = env.env_inner();
let tx_ro = thread_local(env);
let tables = thread_local(env);
// The 2nd mapping function.
// This is pulled out from the below `map()` for readability.
let inner_map = |amount, amount_index| -> Result<(AmountIndex, OutputOnChain), RuntimeError> {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
let id = PreRctOutputId {
amount,
amount_index,
};
let output_on_chain = id_to_output_on_chain(&id, tables)?;
Ok((amount_index, output_on_chain))
};
// Collect results using `rayon`.
let map = outputs
.into_par_iter()
.map(|(amount, amount_index_set)| {
Ok((
amount,
amount_index_set
.into_par_iter()
.map(|amount_index| inner_map(amount, amount_index))
.collect::<Result<HashMap<AmountIndex, OutputOnChain>, RuntimeError>>()?,
))
})
.collect::<Result<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>, RuntimeError>>()?;
Ok(Response::Outputs(map))
}
/// [`ReadRequest::NumberOutputsWithAmount`].
#[inline]
fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
let env_inner = env.env_inner();
let tx_ro = thread_local(env);
let tables = thread_local(env);
// Cache the amount of RCT outputs once.
// INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
#[allow(clippy::cast_possible_truncation)]
let num_rct_outputs = {
let tx_ro = env_inner.tx_ro()?;
let tables = env_inner.open_tables(&tx_ro)?;
tables.rct_outputs().len()? as usize
};
// Collect results using `rayon`.
let map = amounts
.into_par_iter()
.map(|amount| {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
if amount == 0 {
// v2 transactions.
Ok((amount, num_rct_outputs))
} else {
// v1 transactions.
match tables.num_outputs().get(&amount) {
// INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
#[allow(clippy::cast_possible_truncation)]
Ok(count) => Ok((amount, count as usize)),
// If we get a request for an `amount` that doesn't exist,
// we return `0` instead of an error.
Err(RuntimeError::KeyNotFound) => Ok((amount, 0)),
Err(e) => Err(e),
}
}
})
.collect::<Result<HashMap<Amount, usize>, RuntimeError>>()?;
Ok(Response::NumberOutputsWithAmount(map))
}
/// [`ReadRequest::CheckKIsNotSpent`].
#[inline]
fn check_k_is_not_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
let env_inner = env.env_inner();
let tx_ro = thread_local(env);
let tables = thread_local(env);
// Key image check function.
let key_image_exists = |key_image| {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
key_image_exists(&key_image, tables.key_images())
};
// FIXME:
// Create/use `enum cuprate_types::Exist { Does, DoesNot }`
// or similar instead of `bool` for clarity.
// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1581536526>
//
// Collect results using `rayon`.
match key_images
.into_par_iter()
.map(key_image_exists)
// If the result is either:
// `Ok(true)` => a key image was found, return early
// `Err` => an error was found, return early
//
// Else, `Ok(false)` will continue the iterator.
.find_any(|result| !matches!(result, Ok(false)))
{
None | Some(Ok(false)) => Ok(Response::CheckKIsNotSpent(true)), // Key image was NOT found.
Some(Ok(true)) => Ok(Response::CheckKIsNotSpent(false)), // Key image was found.
Some(Err(e)) => Err(e), // A database error occurred.
}
}

View file

@ -1,41 +0,0 @@
//! Read/write `Request`s to the database.
//!
//! TODO: could add `strum` derives.
//---------------------------------------------------------------------------------------------------- Import
//---------------------------------------------------------------------------------------------------- Constants
//---------------------------------------------------------------------------------------------------- ReadRequest
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
/// A read request to the database.
pub enum ReadRequest {
/// TODO
Example1,
/// TODO
Example2(usize),
/// TODO
Example3(String),
}
//---------------------------------------------------------------------------------------------------- WriteRequest
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
/// A write request to the database.
pub enum WriteRequest {
/// TODO
Example1,
/// TODO
Example2(usize),
/// TODO
Example3(String),
}
//---------------------------------------------------------------------------------------------------- IMPL
//---------------------------------------------------------------------------------------------------- Trait Impl
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
// use super::*;
}

View file

@ -1,38 +0,0 @@
//! Read/write `Response`'s from the database.
//!
//! TODO: could add `strum` derives.
//---------------------------------------------------------------------------------------------------- Import
//---------------------------------------------------------------------------------------------------- Constants
//---------------------------------------------------------------------------------------------------- Response
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
/// A response from the database.
///
/// TODO
pub enum Response {
//-------------------------------------------------------- Read responses
/// TODO
Example1,
/// TODO
Example2(usize),
/// TODO
Example3(String),
//-------------------------------------------------------- Write responses
/// The response
///
/// TODO
ExampleWriteResponse, // Probably will be just `Ok`
}
//---------------------------------------------------------------------------------------------------- IMPL
//---------------------------------------------------------------------------------------------------- Trait Impl
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
// use super::*;
}

View file

@ -1,76 +1,377 @@
//! `crate::service` tests.
//!
//! This module contains general tests for the `service` implementation.
//!
//! Testing a thread-pool is slightly more complicated,
//! so this file provides TODO.
// This is only imported on `#[cfg(test)]` in `mod.rs`.
#![allow(unused_mut, clippy::significant_drop_tightening)]
#![allow(clippy::await_holding_lock, clippy::too_many_lines)]
//---------------------------------------------------------------------------------------------------- Use
use tower::{Service, ServiceExt};
use crate::{
config::Config,
service::{init, DatabaseReadHandle, DatabaseWriteHandle, ReadRequest, Response, WriteRequest},
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
//---------------------------------------------------------------------------------------------------- Tests
use pretty_assertions::assert_eq;
use tower::{Service, ServiceExt};
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
use cuprate_types::{
service::{ReadRequest, Response, WriteRequest},
OutputOnChain, VerifiedBlockInformation,
};
use crate::{
config::ConfigBuilder,
ops::{
block::{get_block_extended_header_from_height, get_block_info},
blockchain::chain_height,
output::id_to_output_on_chain,
},
service::{init, DatabaseReadHandle, DatabaseWriteHandle},
tables::{Tables, TablesIter},
tests::AssertTableLen,
types::{Amount, AmountIndex, PreRctOutputId},
ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError,
};
//---------------------------------------------------------------------------------------------------- Helper functions
/// Initialize the `service`.
fn init_service() -> (DatabaseReadHandle, DatabaseWriteHandle, tempfile::TempDir) {
fn init_service() -> (
DatabaseReadHandle,
DatabaseWriteHandle,
Arc<ConcreteEnv>,
tempfile::TempDir,
) {
let tempdir = tempfile::tempdir().unwrap();
let config = Config::low_power(Some(tempdir.path().into()));
let config = ConfigBuilder::new()
.db_directory(tempdir.path().into())
.low_power()
.build();
let (reader, writer) = init(config).unwrap();
(reader, writer, tempdir)
let env = reader.env().clone();
(reader, writer, env, tempdir)
}
/// This is the template used in the actual test functions below.
///
/// - Send write request(s)
/// - Receive response(s)
/// - Assert proper tables were mutated
/// - Assert read requests lead to expected responses
#[allow(clippy::future_not_send)] // INVARIANT: tests are using a single threaded runtime
async fn test_template(
// Which block(s) to add?
block_fns: &[fn() -> &'static VerifiedBlockInformation],
// Total amount of generated coins after the block(s) have been added.
cumulative_generated_coins: u64,
// What are the table lengths be after the block(s) have been added?
assert_table_len: AssertTableLen,
) {
//----------------------------------------------------------------------- Write requests
let (reader, mut writer, env, _tempdir) = init_service();
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro().unwrap();
let tables = env_inner.open_tables(&tx_ro).unwrap();
// HACK: `add_block()` asserts blocks with non-sequential heights
// cannot be added, to get around this, manually edit the block height.
for (i, block_fn) in block_fns.iter().enumerate() {
let mut block = block_fn().clone();
block.height = i as u64;
// Request a block to be written, assert it was written.
let request = WriteRequest::WriteBlock(block);
let response_channel = writer.call(request);
let response = response_channel.await.unwrap();
assert_eq!(response, Response::WriteBlockOk);
}
//----------------------------------------------------------------------- Reset the transaction
drop(tables);
drop(tx_ro);
let tx_ro = env_inner.tx_ro().unwrap();
let tables = env_inner.open_tables(&tx_ro).unwrap();
//----------------------------------------------------------------------- Assert all table lengths are correct
assert_table_len.assert(&tables);
//----------------------------------------------------------------------- Read request prep
// Next few lines are just for preparing the expected responses,
// see further below for usage.
let extended_block_header_0 = Ok(Response::BlockExtendedHeader(
get_block_extended_header_from_height(&0, &tables).unwrap(),
));
let extended_block_header_1 = if block_fns.len() > 1 {
Ok(Response::BlockExtendedHeader(
get_block_extended_header_from_height(&1, &tables).unwrap(),
))
} else {
Err(RuntimeError::KeyNotFound)
};
let block_hash_0 = Ok(Response::BlockHash(
get_block_info(&0, tables.block_infos()).unwrap().block_hash,
));
let block_hash_1 = if block_fns.len() > 1 {
Ok(Response::BlockHash(
get_block_info(&1, tables.block_infos()).unwrap().block_hash,
))
} else {
Err(RuntimeError::KeyNotFound)
};
let range_0_1 = Ok(Response::BlockExtendedHeaderInRange(vec![
get_block_extended_header_from_height(&0, &tables).unwrap(),
]));
let range_0_2 = if block_fns.len() >= 2 {
Ok(Response::BlockExtendedHeaderInRange(vec![
get_block_extended_header_from_height(&0, &tables).unwrap(),
get_block_extended_header_from_height(&1, &tables).unwrap(),
]))
} else {
Err(RuntimeError::KeyNotFound)
};
let chain_height = {
let height = chain_height(tables.block_heights()).unwrap();
let block_info = get_block_info(&height.saturating_sub(1), tables.block_infos()).unwrap();
Ok(Response::ChainHeight(height, block_info.block_hash))
};
let cumulative_generated_coins = Ok(Response::GeneratedCoins(cumulative_generated_coins));
let num_req = tables
.outputs_iter()
.keys()
.unwrap()
.map(Result::unwrap)
.map(|key| key.amount)
.collect::<Vec<Amount>>();
let num_resp = Ok(Response::NumberOutputsWithAmount(
num_req
.iter()
.map(|amount| match tables.num_outputs().get(amount) {
// INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
#[allow(clippy::cast_possible_truncation)]
Ok(count) => (*amount, count as usize),
Err(RuntimeError::KeyNotFound) => (*amount, 0),
Err(e) => panic!("{e:?}"),
})
.collect::<HashMap<Amount, usize>>(),
));
// Contains a fake non-spent key-image.
let ki_req = HashSet::from([[0; 32]]);
let ki_resp = Ok(Response::CheckKIsNotSpent(true));
//----------------------------------------------------------------------- Assert expected response
// Assert read requests lead to the expected responses.
for (request, expected_response) in [
(ReadRequest::BlockExtendedHeader(0), extended_block_header_0),
(ReadRequest::BlockExtendedHeader(1), extended_block_header_1),
(ReadRequest::BlockHash(0), block_hash_0),
(ReadRequest::BlockHash(1), block_hash_1),
(ReadRequest::BlockExtendedHeaderInRange(0..1), range_0_1),
(ReadRequest::BlockExtendedHeaderInRange(0..2), range_0_2),
(ReadRequest::ChainHeight, chain_height),
(ReadRequest::GeneratedCoins, cumulative_generated_coins),
(ReadRequest::NumberOutputsWithAmount(num_req), num_resp),
(ReadRequest::CheckKIsNotSpent(ki_req), ki_resp),
] {
let response = reader.clone().oneshot(request).await;
println!("response: {response:#?}, expected_response: {expected_response:#?}");
match response {
Ok(resp) => assert_eq!(resp, expected_response.unwrap()),
Err(_) => assert!(matches!(response, _expected_response)),
}
}
//----------------------------------------------------------------------- Key image checks
// Assert each key image we inserted comes back as "spent".
for key_image in tables.key_images_iter().keys().unwrap() {
let key_image = key_image.unwrap();
let request = ReadRequest::CheckKIsNotSpent(HashSet::from([key_image]));
let response = reader.clone().oneshot(request).await;
println!("response: {response:#?}, key_image: {key_image:#?}");
assert_eq!(response.unwrap(), Response::CheckKIsNotSpent(false));
}
//----------------------------------------------------------------------- Output checks
// Create the map of amounts and amount indices.
//
// FIXME: There's definitely a better way to map
// `Vec<PreRctOutputId>` -> `HashMap<u64, HashSet<u64>>`
let (map, output_count) = {
let mut ids = tables
.outputs_iter()
.keys()
.unwrap()
.map(Result::unwrap)
.collect::<Vec<PreRctOutputId>>();
ids.extend(
tables
.rct_outputs_iter()
.keys()
.unwrap()
.map(Result::unwrap)
.map(|amount_index| PreRctOutputId {
amount: 0,
amount_index,
}),
);
// Used later to compare the amount of Outputs
// returned in the Response is equal to the amount
// we asked for.
let output_count = ids.len();
let mut map = HashMap::<Amount, HashSet<AmountIndex>>::new();
for id in ids {
map.entry(id.amount)
.and_modify(|set| {
set.insert(id.amount_index);
})
.or_insert_with(|| HashSet::from([id.amount_index]));
}
(map, output_count)
};
// Map `Output` -> `OutputOnChain`
// This is the expected output from the `Response`.
let outputs_on_chain = map
.iter()
.flat_map(|(amount, amount_index_set)| {
amount_index_set.iter().map(|amount_index| {
let id = PreRctOutputId {
amount: *amount,
amount_index: *amount_index,
};
id_to_output_on_chain(&id, &tables).unwrap()
})
})
.collect::<Vec<OutputOnChain>>();
// Send a request for every output we inserted before.
let request = ReadRequest::Outputs(map.clone());
let response = reader.clone().oneshot(request).await;
println!("Response::Outputs response: {response:#?}");
let Ok(Response::Outputs(response)) = response else {
panic!("{response:#?}")
};
// Assert amount of `Amount`'s are the same.
assert_eq!(map.len(), response.len());
// Assert we get back the same map of
// `Amount`'s and `AmountIndex`'s.
let mut response_output_count = 0;
for (amount, output_map) in response {
let amount_index_set = map.get(&amount).unwrap();
for (amount_index, output) in output_map {
response_output_count += 1;
assert!(amount_index_set.contains(&amount_index));
assert!(outputs_on_chain.contains(&output));
}
}
// Assert the amount of `Output`'s returned is as expected.
let table_output_len = tables.outputs().len().unwrap() + tables.rct_outputs().len().unwrap();
assert_eq!(output_count as u64, table_output_len);
assert_eq!(output_count, response_output_count);
}
//---------------------------------------------------------------------------------------------------- Tests
/// Simply `init()` the service and then drop it.
///
/// If this test fails, something is very wrong.
#[test]
fn init_drop() {
let (reader, writer, _tempdir) = init_service();
let (_reader, _writer, _env, _tempdir) = init_service();
}
/// Send a read request, and receive a response,
/// asserting the response the expected value.
/// Assert write/read correctness of [`block_v1_tx2`].
#[tokio::test]
async fn read_request() {
let (reader, writer, _tempdir) = init_service();
for (request, expected_response) in [
(ReadRequest::Example1, Response::Example1),
(ReadRequest::Example2(123), Response::Example2(123)),
(
ReadRequest::Example3("hello".into()),
Response::Example3("hello".into()),
),
] {
// This calls `poll_ready()` asserting we have a permit before `call()`.
let response_channel = reader.clone().oneshot(request);
let response = response_channel.await.unwrap();
assert_eq!(response, expected_response);
}
async fn v1_tx2() {
test_template(
&[block_v1_tx2],
14_535_350_982_449,
AssertTableLen {
block_infos: 1,
block_blobs: 1,
block_heights: 1,
key_images: 65,
num_outputs: 41,
pruned_tx_blobs: 0,
prunable_hashes: 0,
outputs: 111,
prunable_tx_blobs: 0,
rct_outputs: 0,
tx_blobs: 3,
tx_ids: 3,
tx_heights: 3,
tx_unlock_time: 1,
},
)
.await;
}
/// Send a write request, and receive a response,
/// asserting the response the expected value.
/// Assert write/read correctness of [`block_v9_tx3`].
#[tokio::test]
async fn write_request() {
let (reader, mut writer, _tempdir) = init_service();
for (request, expected_response) in [
(WriteRequest::Example1, Response::Example1),
(WriteRequest::Example2(123), Response::Example2(123)),
(
WriteRequest::Example3("hello".into()),
Response::Example3("hello".into()),
),
] {
let response_channel = writer.call(request);
let response = response_channel.await.unwrap();
assert_eq!(response, expected_response);
}
async fn v9_tx3() {
test_template(
&[block_v9_tx3],
3_403_774_022_163,
AssertTableLen {
block_infos: 1,
block_blobs: 1,
block_heights: 1,
key_images: 4,
num_outputs: 0,
pruned_tx_blobs: 0,
prunable_hashes: 0,
outputs: 0,
prunable_tx_blobs: 0,
rct_outputs: 7,
tx_blobs: 4,
tx_ids: 4,
tx_heights: 4,
tx_unlock_time: 1,
},
)
.await;
}
/// Assert write/read correctness of [`block_v16_tx0`].
#[tokio::test]
async fn v16_tx0() {
test_template(
&[block_v16_tx0],
600_000_000_000,
AssertTableLen {
block_infos: 1,
block_blobs: 1,
block_heights: 1,
key_images: 0,
num_outputs: 0,
pruned_tx_blobs: 0,
prunable_hashes: 0,
outputs: 0,
prunable_tx_blobs: 0,
rct_outputs: 1,
tx_blobs: 1,
tx_ids: 1,
tx_heights: 1,
tx_unlock_time: 1,
},
)
.await;
}

View file

@ -0,0 +1,31 @@
//! Database service type aliases.
//!
//! Only used internally for our `tower::Service` impls.
//---------------------------------------------------------------------------------------------------- Use
use futures::channel::oneshot::Sender;
use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::service::Response;
use crate::error::RuntimeError;
//---------------------------------------------------------------------------------------------------- Types
/// The actual type of the response.
///
/// Either our [`Response`], or a database error occurred.
pub(super) type ResponseResult = Result<Response, RuntimeError>;
/// The `Receiver` channel that receives the read response.
///
/// This is owned by the caller (the reader/writer thread)
/// who `.await`'s for the response.
///
/// The channel itself should never fail,
/// but the actual database operation might.
pub(super) type ResponseReceiver = InfallibleOneshotReceiver<ResponseResult>;
/// The `Sender` channel for the response.
///
/// The database reader/writer thread uses this to send the database result to the caller.
pub(super) type ResponseSender = Sender<ResponseResult>;

View file

@ -9,31 +9,22 @@ use std::{
use futures::channel::oneshot;
use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::{
service::{Response, WriteRequest},
VerifiedBlockInformation,
};
use crate::{
env::{Env, EnvInner},
error::RuntimeError,
service::{request::WriteRequest, response::Response},
ConcreteEnv, Env,
service::types::{ResponseReceiver, ResponseResult, ResponseSender},
transaction::TxRw,
ConcreteEnv,
};
//---------------------------------------------------------------------------------------------------- Constants
/// Name of the writer thread.
const WRITER_THREAD_NAME: &str = "cuprate_helper::service::read::DatabaseWriter";
//---------------------------------------------------------------------------------------------------- Types
/// The actual type of the response.
///
/// Either our [Response], or a database error occurred.
type ResponseResult = Result<Response, RuntimeError>;
/// The `Receiver` channel that receives the write response.
///
/// The channel itself should never fail,
/// but the actual database operation might.
type ResponseReceiver = InfallibleOneshotReceiver<ResponseResult>;
/// The `Sender` channel for the response.
type ResponseSender = oneshot::Sender<ResponseResult>;
const WRITER_THREAD_NAME: &str = concat!(module_path!(), "::DatabaseWriter");
//---------------------------------------------------------------------------------------------------- DatabaseWriteHandle
/// Write handle to the database.
@ -57,7 +48,7 @@ impl DatabaseWriteHandle {
/// Initialize the single `DatabaseWriter` thread.
#[cold]
#[inline(never)] // Only called once.
pub(super) fn init(db: Arc<ConcreteEnv>) -> Self {
pub(super) fn init(env: Arc<ConcreteEnv>) -> Self {
// Initialize `Request/Response` channels.
let (sender, receiver) = crossbeam::channel::unbounded();
@ -65,7 +56,7 @@ impl DatabaseWriteHandle {
std::thread::Builder::new()
.name(WRITER_THREAD_NAME.into())
.spawn(move || {
let this = DatabaseWriter { receiver, db };
let this = DatabaseWriter { receiver, env };
DatabaseWriter::main(this);
})
.unwrap();
@ -107,7 +98,7 @@ pub(super) struct DatabaseWriter {
receiver: crossbeam::channel::Receiver<(WriteRequest, ResponseSender)>,
/// Access to the database.
db: Arc<ConcreteEnv>,
env: Arc<ConcreteEnv>,
}
impl Drop for DatabaseWriter {
@ -119,7 +110,8 @@ impl Drop for DatabaseWriter {
impl DatabaseWriter {
/// The `DatabaseWriter`'s main function.
///
/// The writer just loops in this function.
/// The writer just loops in this function, handling requests forever
/// until the request channel is dropped or a panic occurs.
#[cold]
#[inline(never)] // Only called once.
fn main(self) {
@ -127,7 +119,7 @@ impl DatabaseWriter {
// 2. Map request to some database function
// 3. Execute that function, get the result
// 4. Return the result via channel
loop {
'main: loop {
let Ok((request, response_sender)) = self.receiver.recv() else {
// If this receive errors, it means that the channel is empty
// and disconnected, meaning the other side (all senders) have
@ -140,60 +132,114 @@ impl DatabaseWriter {
return;
};
/// How many times should we retry handling the request on resize errors?
///
/// This is 1 on automatically resizing databases, meaning there is only 1 iteration.
const REQUEST_RETRY_LIMIT: usize = if ConcreteEnv::MANUAL_RESIZE { 3 } else { 1 };
// Map [`Request`]'s to specific database functions.
match request {
WriteRequest::Example1 => self.example_handler_1(response_sender),
WriteRequest::Example2(x) => self.example_handler_2(response_sender, x),
WriteRequest::Example3(x) => self.example_handler_3(response_sender, x),
//
// Both will:
// 1. Map the request to a function
// 2. Call the function
// 3. (manual resize only) If resize is needed, resize and retry
// 4. (manual resize only) Redo step {1, 2}
// 5. Send the function's `Result` back to the requester
//
// FIXME: there's probably a more elegant way
// to represent this retry logic with recursive
// functions instead of a loop.
'retry: for retry in 0..REQUEST_RETRY_LIMIT {
// FIXME: will there be more than 1 write request?
// this won't have to be an enum.
let response = match &request {
WriteRequest::WriteBlock(block) => write_block(&self.env, block),
};
// If the database needs to resize, do so.
if ConcreteEnv::MANUAL_RESIZE && matches!(response, Err(RuntimeError::ResizeNeeded))
{
// If this is the last iteration of the outer `for` loop and we
// encounter a resize error _again_, it means something is wrong.
assert_ne!(
retry, REQUEST_RETRY_LIMIT,
"database resize failed maximum of {REQUEST_RETRY_LIMIT} times"
);
// Resize the map, and retry the request handling loop.
//
// FIXME:
// We could pass in custom resizes to account for
// batches, i.e., we're about to add ~5GB of data,
// add that much instead of the default 1GB.
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L665-L695>
let old = self.env.current_map_size();
let new = self.env.resize_map(None);
// TODO: use tracing.
println!("resizing database memory map, old: {old}B, new: {new}B");
// Try handling the request again.
continue 'retry;
}
// Automatically resizing databases should not be returning a resize error.
#[cfg(debug_assertions)]
if !ConcreteEnv::MANUAL_RESIZE {
assert!(
!matches!(response, Err(RuntimeError::ResizeNeeded)),
"auto-resizing database returned a ResizeNeeded error"
);
}
// Send the response back, whether if it's an `Ok` or `Err`.
if let Err(e) = response_sender.send(response) {
// TODO: use tracing.
println!("database writer failed to send response: {e:?}");
}
continue 'main;
}
// Above retry loop should either:
// - continue to the next ['main] loop or...
// - ...retry until panic
unreachable!();
}
}
}
//---------------------------------------------------------------------------------------------------- Handler functions
// These are the actual functions that do stuff according to the incoming [`Request`].
//
// Each function name is a 1-1 mapping (from CamelCase -> snake_case) to
// the enum variant name, e.g: `BlockExtendedHeader` -> `block_extended_header`.
//
// Each function will return the [`Response`] that we
// should send back to the caller in [`map_request()`].
/// [`WriteRequest::WriteBlock`].
#[inline]
fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult {
let env_inner = env.env_inner();
let tx_rw = env_inner.tx_rw()?;
let result = {
let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?;
crate::ops::block::add_block(block, &mut tables_mut)
};
match result {
Ok(()) => {
TxRw::commit(tx_rw)?;
Ok(Response::WriteBlockOk)
}
Err(e) => {
// INVARIANT: ensure database atomicity by aborting
// the transaction on `add_block()` failures.
TxRw::abort(tx_rw)
.expect("could not maintain database atomicity by aborting write transaction");
Err(e)
}
}
/// Resize the database's memory map.
fn resize_map(&self) {
// The compiler most likely optimizes out this
// entire function call if this returns here.
if !ConcreteEnv::MANUAL_RESIZE {
return;
}
// INVARIANT:
// [`Env`]'s that are `MANUAL_RESIZE` are expected to implement
// their internals such that we have exclusive access when calling
// this function. We do not handle the exclusion part, `resize_map()`
// itself does. The `heed` backend does this with `RwLock`.
//
// We need mutual exclusion due to:
// <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
self.db.resize_map(None);
// TODO:
// We could pass in custom resizes to account for
// batch transactions, i.e., we're about to add ~5GB
// of data, add that much instead of the default 1GB.
// <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L665-L695>
}
/// TODO
#[inline]
#[allow(clippy::unused_self)] // TODO: remove me
fn example_handler_1(&self, response_sender: ResponseSender) {
let db_result = Ok(Response::Example1);
response_sender.send(db_result).unwrap();
}
/// TODO
#[inline]
#[allow(clippy::unused_self)] // TODO: remove me
fn example_handler_2(&self, response_sender: ResponseSender, x: usize) {
let db_result = Ok(Response::Example2(x));
response_sender.send(db_result).unwrap();
}
/// TODO
#[inline]
#[allow(clippy::unused_self)] // TODO: remove me
fn example_handler_3(&self, response_sender: ResponseSender, x: String) {
let db_result = Ok(Response::Example3(x));
response_sender.send(db_result).unwrap();
}
}

View file

@ -1,15 +1,9 @@
//! (De)serialization for table keys & values.
//---------------------------------------------------------------------------------------------------- Import
use std::{
borrow::{Borrow, Cow},
char::ToLowercase,
fmt::Debug,
io::{Read, Write},
sync::Arc,
};
use std::{borrow::Borrow, fmt::Debug};
use bytemuck::{Pod, Zeroable};
use bytemuck::Pod;
use bytes::Bytes;
//---------------------------------------------------------------------------------------------------- Storable
@ -25,16 +19,14 @@ use bytes::Bytes;
/// Any type that implements:
/// - [`bytemuck::Pod`]
/// - [`Debug`]
/// - [`ToOwned`]
///
/// will automatically implement [`Storable`].
///
/// This includes:
/// - Most primitive types
/// - All types in [`tables`](crate::tables)
/// - Slices, e.g, `[T] where T: Storable`
///
/// See [`StorableVec`] for storing slices of `T: Storable`.
/// See [`StorableVec`] & [`StorableBytes`] for storing slices of `T: Storable`.
///
/// ```rust
/// # use cuprate_database::*;
@ -142,6 +134,7 @@ where
///
/// This is needed as `impl Storable for Vec<T>` runs into impl conflicts.
///
/// # Example
/// ```rust
/// # use cuprate_database::*;
/// //---------------------------------------------------- u8
@ -284,7 +277,7 @@ mod test {
println!("serialized: {se:?}, deserialized: {de:?}\n");
// Assert we wrote correct amount of bytes.
if let Some(len) = T::BYTE_LENGTH {
if T::BYTE_LENGTH.is_some() {
assert_eq!(se.len(), expected_bytes.len());
}
// Assert the data is the same.

View file

@ -1,7 +1,6 @@
//! Database table abstraction; `trait Table`.
//---------------------------------------------------------------------------------------------------- Import
use std::fmt::Debug;
use crate::{key::Key, storable::Storable};
@ -13,7 +12,7 @@ use crate::{key::Key, storable::Storable};
/// ## Sealed
/// This trait is [`Sealed`](https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed).
///
/// It is, and can only be implemented on the types inside [`tables`][crate::tables].
/// It is only implemented on the types inside [`tables`][crate::tables].
pub trait Table: crate::tables::private::Sealed + 'static {
/// Name of the database table.
const NAME: &'static str;

View file

@ -1,22 +1,35 @@
//! Database tables.
//!
//! This module contains all the table definitions used by `cuprate-database`.
//! # Table marker structs
//! This module contains all the table definitions used by `cuprate_database`.
//!
//! The zero-sized structs here represents the table type;
//! they all are essentially marker types that implement [`Table`].
//!
//! Table structs are `CamelCase`, and their static string
//! names used by the actual database backend are `snake_case`.
//!
//! For example: [`BlockBlobs`] -> `block_blobs`.
//!
//! # Traits
//! This module also contains a set of traits for
//! accessing _all_ tables defined here at once.
//!
//! For example, this is the object returned by [`EnvInner::open_tables`](crate::EnvInner::open_tables).
//---------------------------------------------------------------------------------------------------- Import
use crate::{
database::{DatabaseIter, DatabaseRo, DatabaseRw},
table::Table,
types::{
Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfoV1,
BlockInfoV2, BlockInfoV3, KeyImage, Output, PreRctOutputId, PrunableBlob, PrunableHash,
PrunedBlob, RctOutput, TxHash, TxId, UnlockTime,
Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage,
Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput, TxBlob, TxHash,
TxId, UnlockTime,
},
};
//---------------------------------------------------------------------------------------------------- Tables
//---------------------------------------------------------------------------------------------------- Sealed
/// Private module, should not be accessible outside this crate.
///
/// Used to block outsiders implementing [`Table`].
/// All [`Table`] types must also implement [`Sealed`].
pub(super) mod private {
/// Private sealed trait.
///
@ -24,6 +37,272 @@ pub(super) mod private {
pub trait Sealed {}
}
//---------------------------------------------------------------------------------------------------- `trait Tables[Mut]`
/// Creates:
/// - `pub trait Tables`
/// - `pub trait TablesIter`
/// - `pub trait TablesMut`
/// - Blanket implementation for `(tuples, containing, all, open, database, tables, ...)`
///
/// For why this exists, see: <https://github.com/Cuprate/cuprate/pull/102#pullrequestreview-1978348871>.
macro_rules! define_trait_tables {
($(
// The `T: Table` type The index in a tuple
// | containing all tables
// v v
$table:ident => $index:literal
),* $(,)?) => { paste::paste! {
/// Object containing all opened [`Table`]s in read-only mode.
///
/// This is an encapsulated object that contains all
/// available [`Table`]'s in read-only mode.
///
/// It is a `Sealed` trait and is only implemented on a
/// `(tuple, containing, all, table, types, ...)`.
///
/// This is used to return a _single_ object from functions like
/// [`EnvInner::open_tables`](crate::EnvInner::open_tables) rather
/// than the tuple containing the tables itself.
///
/// To replace `tuple.0` style indexing, `field_accessor_functions()`
/// are provided on this trait, which essentially map the object to
/// fields containing the particular database table, for example:
/// ```rust,ignore
/// let tables = open_tables();
///
/// // The accessor function `block_infos()` returns the field
/// // containing an open database table for `BlockInfos`.
/// let _ = tables.block_infos();
/// ```
///
/// See also:
/// - [`TablesMut`]
/// - [`TablesIter`]
pub trait Tables: private::Sealed {
// This expands to creating `fn field_accessor_functions()`
// for each passed `$table` type.
//
// It is essentially a mapping to the field
// containing the proper opened database table.
//
// The function name of the function is
// the table type in `snake_case`, e.g., `block_info_v1s()`.
$(
/// Access an opened
#[doc = concat!("[`", stringify!($table), "`]")]
/// database.
fn [<$table:snake>](&self) -> &impl DatabaseRo<$table>;
)*
/// This returns `true` if all tables are empty.
///
/// # Errors
/// This returns errors on regular database errors.
fn all_tables_empty(&self) -> Result<bool, $crate::error::RuntimeError>;
}
/// Object containing all opened [`Table`]s in read + iter mode.
///
/// This is the same as [`Tables`] but includes `_iter()` variants.
///
/// Note that this trait is a supertrait of `Tables`,
/// as in it can use all of its functions as well.
///
/// See [`Tables`] for documentation - this trait exists for the same reasons.
pub trait TablesIter: private::Sealed + Tables {
$(
/// Access an opened read-only + iterable
#[doc = concat!("[`", stringify!($table), "`]")]
/// database.
fn [<$table:snake _iter>](&self) -> &(impl DatabaseRo<$table> + DatabaseIter<$table>);
)*
}
/// Object containing all opened [`Table`]s in write mode.
///
/// This is the same as [`Tables`] but for mutable accesses.
///
/// Note that this trait is a supertrait of `Tables`,
/// as in it can use all of its functions as well.
///
/// See [`Tables`] for documentation - this trait exists for the same reasons.
pub trait TablesMut: private::Sealed + Tables {
$(
/// Access an opened
#[doc = concat!("[`", stringify!($table), "`]")]
/// database.
fn [<$table:snake _mut>](&mut self) -> &mut impl DatabaseRw<$table>;
)*
}
// Implement `Sealed` for all table types.
impl<$([<$table:upper>]),*> private::Sealed for ($([<$table:upper>]),*) {}
// This creates a blanket-implementation for
// `(tuple, containing, all, table, types)`.
//
// There is a generic defined here _for each_ `$table` input.
// Specifically, the generic letters are just the table types in UPPERCASE.
// Concretely, this expands to something like:
// ```rust
// impl<BLOCKINFOSV1S, BLOCKINFOSV2S, BLOCKINFOSV3S, [...]>
// ```
impl<$([<$table:upper>]),*> Tables
// We are implementing `Tables` on a tuple that
// contains all those generics specified, i.e.,
// a tuple containing all open table types.
//
// Concretely, this expands to something like:
// ```rust
// (BLOCKINFOSV1S, BLOCKINFOSV2S, BLOCKINFOSV3S, [...])
// ```
// which is just a tuple of the generics defined above.
for ($([<$table:upper>]),*)
where
// This expands to a where bound that asserts each element
// in the tuple implements some database table type.
//
// Concretely, this expands to something like:
// ```rust
// BLOCKINFOSV1S: DatabaseRo<BlockInfoV1s> + DatabaseIter<BlockInfoV1s>,
// BLOCKINFOSV2S: DatabaseRo<BlockInfoV2s> + DatabaseIter<BlockInfoV2s>,
// [...]
// ```
$(
[<$table:upper>]: DatabaseRo<$table>,
)*
{
$(
// The function name of the accessor function is
// the table type in `snake_case`, e.g., `block_info_v1s()`.
#[inline]
fn [<$table:snake>](&self) -> &impl DatabaseRo<$table> {
// The index of the database table in
// the tuple implements the table trait.
&self.$index
}
)*
fn all_tables_empty(&self) -> Result<bool, $crate::error::RuntimeError> {
$(
if !DatabaseRo::is_empty(&self.$index)? {
return Ok(false);
}
)*
Ok(true)
}
}
// This is the same as the above
// `Tables`, but for `TablesIter`.
impl<$([<$table:upper>]),*> TablesIter
for ($([<$table:upper>]),*)
where
$(
[<$table:upper>]: DatabaseRo<$table> + DatabaseIter<$table>,
)*
{
$(
// The function name of the accessor function is
// the table type in `snake_case` + `_iter`, e.g., `block_info_v1s_iter()`.
#[inline]
fn [<$table:snake _iter>](&self) -> &(impl DatabaseRo<$table> + DatabaseIter<$table>) {
&self.$index
}
)*
}
// This is the same as the above
// `Tables`, but for `TablesMut`.
impl<$([<$table:upper>]),*> TablesMut
for ($([<$table:upper>]),*)
where
$(
[<$table:upper>]: DatabaseRw<$table>,
)*
{
$(
// The function name of the mutable accessor function is
// the table type in `snake_case` + `_mut`, e.g., `block_info_v1s_mut()`.
#[inline]
fn [<$table:snake _mut>](&mut self) -> &mut impl DatabaseRw<$table> {
&mut self.$index
}
)*
}
}};
}
// Input format: $table_type => $index
//
// The $index:
// - Simply increments by 1 for each table
// - Must be 0..
// - Must end at the total amount of table types - 1
//
// Compile errors will occur if these aren't satisfied.
//
// $index is just the `tuple.$index`, as the above [`define_trait_tables`]
// macro has a blanket impl for `(all, table, types, ...)` and we must map
// each type to a tuple index explicitly.
//
// FIXME: there's definitely an automatic way to this :)
define_trait_tables! {
BlockInfos => 0,
BlockBlobs => 1,
BlockHeights => 2,
KeyImages => 3,
NumOutputs => 4,
PrunedTxBlobs => 5,
PrunableHashes => 6,
Outputs => 7,
PrunableTxBlobs => 8,
RctOutputs => 9,
TxBlobs => 10,
TxIds => 11,
TxHeights => 12,
TxOutputs => 13,
TxUnlockTime => 14,
}
//---------------------------------------------------------------------------------------------------- Table function macro
/// `crate`-private macro for callings functions on all tables.
///
/// This calls the function `$fn` with the optional
/// arguments `$args` on all tables - returning early
/// (within whatever scope this is called) if any
/// of the function calls error.
///
/// Else, it evaluates to an `Ok((tuple, of, all, table, types, ...))`,
/// i.e., an `impl Table[Mut]` wrapped in `Ok`.
macro_rules! call_fn_on_all_tables_or_early_return {
(
$($fn:ident $(::)?)*
(
$($arg:ident),* $(,)?
)
) => {{
Ok((
$($fn ::)*<$crate::tables::BlockInfos>($($arg),*)?,
$($fn ::)*<$crate::tables::BlockBlobs>($($arg),*)?,
$($fn ::)*<$crate::tables::BlockHeights>($($arg),*)?,
$($fn ::)*<$crate::tables::KeyImages>($($arg),*)?,
$($fn ::)*<$crate::tables::NumOutputs>($($arg),*)?,
$($fn ::)*<$crate::tables::PrunedTxBlobs>($($arg),*)?,
$($fn ::)*<$crate::tables::PrunableHashes>($($arg),*)?,
$($fn ::)*<$crate::tables::Outputs>($($arg),*)?,
$($fn ::)*<$crate::tables::PrunableTxBlobs>($($arg),*)?,
$($fn ::)*<$crate::tables::RctOutputs>($($arg),*)?,
$($fn ::)*<$crate::tables::TxBlobs>($($arg),*)?,
$($fn ::)*<$crate::tables::TxIds>($($arg),*)?,
$($fn ::)*<$crate::tables::TxHeights>($($arg),*)?,
$($fn ::)*<$crate::tables::TxOutputs>($($arg),*)?,
$($fn ::)*<$crate::tables::TxUnlockTime>($($arg),*)?,
))
}};
}
pub(crate) use call_fn_on_all_tables_or_early_return;
//---------------------------------------------------------------------------------------------------- Table macro
/// Create all tables, should be used _once_.
///
@ -47,6 +326,9 @@ macro_rules! tables {
// Table struct.
$(#[$attr])*
// The below test show the `snake_case` table name in cargo docs.
#[doc = concat!("- Key: [`", stringify!($key), "`]")]
#[doc = concat!("- Value: [`", stringify!($value), "`]")]
///
/// ## Table Name
/// ```rust
/// # use cuprate_database::{*,tables::*};
@ -80,66 +362,109 @@ macro_rules! tables {
// Notes:
// - Keep this sorted A-Z (by table name)
// - Tables are defined in plural to avoid name conflicts with types
// - If adding/changing a table, also edit the tests in `src/backend/tests.rs`
// and edit `Env::open` to make sure it creates the table
// - If adding/changing a table also edit:
// a) the tests in `src/backend/tests.rs`
// b) `Env::open` to make sure it creates the table (for all backends)
// c) `call_fn_on_all_tables_or_early_return!()` macro defined in this file
tables! {
/// TODO
/// Serialized block blobs (bytes).
///
/// Contains the serialized version of all blocks.
BlockBlobs,
BlockHeight => BlockBlob,
/// TODO
/// Block heights.
///
/// Contains the height of all blocks.
BlockHeights,
BlockHash => BlockHeight,
/// TODO
BlockInfoV1s,
BlockHeight => BlockInfoV1,
/// Block information.
///
/// Contains metadata of all blocks.
BlockInfos,
BlockHeight => BlockInfo,
/// TODO
BlockInfoV2s,
BlockHeight => BlockInfoV2,
/// TODO
BlockInfoV3s,
BlockHeight => BlockInfoV3,
/// TODO
/// Set of key images.
///
/// Contains all the key images known to be spent.
///
/// This table has `()` as the value type, as in,
/// it is a set of key images.
KeyImages,
KeyImage => (),
/// TODO
/// Maps an output's amount to the number of outputs with that amount.
///
/// For example, if there are 5 outputs with `amount = 123`
/// then calling `get(123)` on this table will return 5.
NumOutputs,
Amount => AmountIndex,
Amount => u64,
/// TODO
PrunedTxBlobs,
TxId => PrunedBlob,
/// TODO
/// Pre-RCT output data.
Outputs,
PreRctOutputId => Output,
/// TODO
/// Pruned transaction blobs (bytes).
///
/// Contains the pruned portion of serialized transaction data.
PrunedTxBlobs,
TxId => PrunedBlob,
/// Prunable transaction blobs (bytes).
///
/// Contains the prunable portion of serialized transaction data.
// SOMEDAY: impl when `monero-serai` supports pruning
PrunableTxBlobs,
TxId => PrunableBlob,
/// TODO
/// Prunable transaction hashes.
///
/// Contains the prunable portion of transaction hashes.
// SOMEDAY: impl when `monero-serai` supports pruning
PrunableHashes,
TxId => PrunableHash,
/// TODO
// SOMEDAY: impl a properties table:
// - db version
// - pruning seed
// Properties,
// StorableString => StorableVec,
/// RCT output data.
RctOutputs,
AmountIndex => RctOutput,
/// TODO
/// Transaction blobs (bytes).
///
/// Contains the serialized version of all transactions.
// SOMEDAY: remove when `monero-serai` supports pruning
TxBlobs,
TxId => TxBlob,
/// Transaction indices.
///
/// Contains the indices all transactions.
TxIds,
TxHash => TxId,
/// TODO
/// Transaction heights.
///
/// Contains the block height associated with all transactions.
TxHeights,
TxId => BlockHeight,
/// TODO
/// Transaction outputs.
///
/// Contains the list of `AmountIndex`'s of the
/// outputs associated with all transactions.
TxOutputs,
TxId => AmountIndices,
/// Transaction unlock time.
///
/// Contains the unlock time of transactions IF they have one.
/// Transactions without unlock times will not exist in this table.
TxUnlockTime,
TxId => UnlockTime,
}

85
database/src/tests.rs Normal file
View file

@ -0,0 +1,85 @@
//! Utilities for `cuprate_database` testing.
//!
//! These types/fn's are only:
//! - enabled on #[cfg(test)]
//! - only used internally
//---------------------------------------------------------------------------------------------------- Import
use std::fmt::Debug;
use pretty_assertions::assert_eq;
use crate::{config::ConfigBuilder, tables::Tables, ConcreteEnv, DatabaseRo, Env, EnvInner};
//---------------------------------------------------------------------------------------------------- Struct
/// Named struct to assert the length of all tables.
///
/// This is a struct with fields instead of a function
/// so that callers can name arguments, otherwise the call-site
/// is a little confusing, i.e. `assert_table_len(0, 25, 1, 123)`.
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct AssertTableLen {
pub(crate) block_infos: u64,
pub(crate) block_blobs: u64,
pub(crate) block_heights: u64,
pub(crate) key_images: u64,
pub(crate) num_outputs: u64,
pub(crate) pruned_tx_blobs: u64,
pub(crate) prunable_hashes: u64,
pub(crate) outputs: u64,
pub(crate) prunable_tx_blobs: u64,
pub(crate) rct_outputs: u64,
pub(crate) tx_blobs: u64,
pub(crate) tx_ids: u64,
pub(crate) tx_heights: u64,
pub(crate) tx_unlock_time: u64,
}
impl AssertTableLen {
/// Assert the length of all tables.
pub(crate) fn assert(self, tables: &impl Tables) {
let other = Self {
block_infos: tables.block_infos().len().unwrap(),
block_blobs: tables.block_blobs().len().unwrap(),
block_heights: tables.block_heights().len().unwrap(),
key_images: tables.key_images().len().unwrap(),
num_outputs: tables.num_outputs().len().unwrap(),
pruned_tx_blobs: tables.pruned_tx_blobs().len().unwrap(),
prunable_hashes: tables.prunable_hashes().len().unwrap(),
outputs: tables.outputs().len().unwrap(),
prunable_tx_blobs: tables.prunable_tx_blobs().len().unwrap(),
rct_outputs: tables.rct_outputs().len().unwrap(),
tx_blobs: tables.tx_blobs().len().unwrap(),
tx_ids: tables.tx_ids().len().unwrap(),
tx_heights: tables.tx_heights().len().unwrap(),
tx_unlock_time: tables.tx_unlock_time().len().unwrap(),
};
assert_eq!(self, other);
}
}
//---------------------------------------------------------------------------------------------------- fn
/// Create an `Env` in a temporarily directory.
/// The directory is automatically removed after the `TempDir` is dropped.
///
/// FIXME: changing this to `-> impl Env` causes lifetime errors...
pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) {
let tempdir = tempfile::tempdir().unwrap();
let config = ConfigBuilder::new()
.db_directory(tempdir.path().into())
.low_power()
.build();
let env = ConcreteEnv::open(config).unwrap();
(env, tempdir)
}
/// Assert all the tables in the environment are empty.
pub(crate) fn assert_all_tables_are_empty(env: &ConcreteEnv) {
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro().unwrap();
let tables = env_inner.open_tables(&tx_ro).unwrap();
assert!(tables.all_tables_empty().unwrap());
assert_eq!(crate::ops::tx::get_num_tx(tables.tx_ids()).unwrap(), 0);
}

View file

@ -1,21 +1,21 @@
//! Database transaction abstraction; `trait TxRo`, `trait TxRw`.
//---------------------------------------------------------------------------------------------------- Import
use crate::{config::SyncMode, env::Env, error::RuntimeError};
use crate::error::RuntimeError;
//---------------------------------------------------------------------------------------------------- TxRo
/// Read-only database transaction.
///
/// Returned from [`EnvInner::tx_ro`](crate::EnvInner::tx_ro).
///
/// # TODO
/// I don't think we need this, we can just drop the `tx_ro`?
/// <https://docs.rs/heed/0.20.0-alpha.9/heed/struct.RoTxn.html#method.commit>
/// # Commit
/// It's recommended but may not be necessary to call [`TxRo::commit`] in certain cases:
/// - <https://docs.rs/heed/0.20.0-alpha.9/heed/struct.RoTxn.html#method.commit>
pub trait TxRo<'env> {
/// Commit the read-only transaction.
///
/// # Errors
/// This operation is infallible (will always return `Ok(())`) with the `redb` backend.
/// This operation will always return `Ok(())` with the `redb` backend.
fn commit(self) -> Result<(), RuntimeError>;
}
@ -29,20 +29,15 @@ pub trait TxRw<'env> {
/// Note that this doesn't necessarily sync the database caches to disk.
///
/// # Errors
/// This operation is infallible (will always return `Ok(())`) with the `redb` backend.
/// This operation will always return `Ok(())` with the `redb` backend.
///
/// Else, this will only return:
/// - [`RuntimeError::ResizeNeeded`] (if `Env::MANUAL_RESIZE == true`)
/// - [`RuntimeError::Io`]
/// If `Env::MANUAL_RESIZE == true`,
/// [`RuntimeError::ResizeNeeded`] may be returned.
fn commit(self) -> Result<(), RuntimeError>;
/// Abort the transaction, erasing any writes that have occurred.
///
/// # Errors
/// This operation is infallible (will always return `Ok(())`) with the `heed` backend.
///
/// Else, this will only return:
/// - [`RuntimeError::ResizeNeeded`] (if `Env::MANUAL_RESIZE == true`)
/// - [`RuntimeError::Io`]
/// This operation will always return `Ok(())` with the `heed` backend.
fn abort(self) -> Result<(), RuntimeError>;
}

View file

@ -1,8 +1,10 @@
//! Database [table](crate::tables) types.
//!
//! This module contains all types used by the database tables.
//! This module contains all types used by the database tables,
//! and aliases for common Monero-related types that use the
//! same underlying primitive type.
//!
//! TODO: Add schema here or a link to it.
//! <!-- FIXME: Add schema here or a link to it when complete -->
/*
* <============================================> VERY BIG SCARY SAFETY MESSAGE <============================================>
@ -39,7 +41,7 @@
#![forbid(unsafe_code)] // if you remove this line i will steal your monero
//---------------------------------------------------------------------------------------------------- Import
use bytemuck::{AnyBitPattern, NoUninit, Pod, Zeroable};
use bytemuck::{Pod, Zeroable};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@ -47,52 +49,59 @@ use serde::{Deserialize, Serialize};
use crate::storable::StorableVec;
//---------------------------------------------------------------------------------------------------- Aliases
// TODO: document these, why they exist, and their purpose.
//
// Notes:
// - Keep this sorted A-Z
// These type aliases exist as many Monero-related types are the exact same.
// For clarity, they're given type aliases as to not confuse them.
/// TODO
/// An output's amount.
pub type Amount = u64;
/// TODO
/// The index of an [`Amount`] in a list of duplicate `Amount`s.
pub type AmountIndex = u64;
/// TODO
/// A list of [`AmountIndex`]s.
pub type AmountIndices = StorableVec<AmountIndex>;
/// TODO
/// A serialized block.
pub type BlockBlob = StorableVec<u8>;
/// TODO
/// A block's hash.
pub type BlockHash = [u8; 32];
/// TODO
/// A block's height.
pub type BlockHeight = u64;
/// TODO
/// A key image.
pub type KeyImage = [u8; 32];
/// TODO
/// Pruned serialized bytes.
pub type PrunedBlob = StorableVec<u8>;
/// TODO
/// A prunable serialized bytes.
pub type PrunableBlob = StorableVec<u8>;
/// TODO
/// A prunable hash.
pub type PrunableHash = [u8; 32];
/// TODO
/// A serialized transaction.
pub type TxBlob = StorableVec<u8>;
/// A transaction's global index, or ID.
pub type TxId = u64;
/// TODO
/// A transaction's hash.
pub type TxHash = [u8; 32];
/// TODO
/// The unlock time value of an output.
pub type UnlockTime = u64;
//---------------------------------------------------------------------------------------------------- BlockInfoV1
/// TODO
/// A identifier for a pre-RCT [`Output`].
///
/// This can also serve as an identifier for [`RctOutput`]'s
/// when [`PreRctOutputId::amount`] is set to `0`, although,
/// in that case, only [`AmountIndex`] needs to be known.
///
/// This is the key to the [`Outputs`](crate::tables::Outputs) table.
///
/// ```rust
/// # use std::borrow::*;
@ -118,121 +127,41 @@ pub type UnlockTime = u64;
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
#[repr(C)]
pub struct PreRctOutputId {
/// TODO
/// Amount of the output.
///
/// This should be `0` if the output is an [`RctOutput`].
pub amount: Amount,
/// TODO
/// The index of the output with the same `amount`.
///
/// In the case of [`Output`]'s, this is the index of the list
/// of outputs with the same clear amount.
///
/// In the case of [`RctOutput`]'s, this is the
/// global index of _all_ `RctOutput`s
pub amount_index: AmountIndex,
}
//---------------------------------------------------------------------------------------------------- BlockInfoV1
/// TODO
///
/// ```rust
/// # use std::borrow::*;
/// # use cuprate_database::{*, types::*};
/// // Assert Storable is correct.
/// let a = BlockInfoV1 {
/// timestamp: 1,
/// total_generated_coins: 123,
/// weight: 321,
/// cumulative_difficulty: 111,
/// block_hash: [54; 32],
/// };
/// let b = Storable::as_bytes(&a);
/// let c: BlockInfoV1 = Storable::from_bytes(b);
/// assert_eq!(a, c);
/// ```
///
/// # Size & Alignment
/// ```rust
/// # use cuprate_database::types::*;
/// # use std::mem::*;
/// assert_eq!(size_of::<BlockInfoV1>(), 64);
/// assert_eq!(align_of::<BlockInfoV1>(), 8);
/// ```
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
#[repr(C)]
pub struct BlockInfoV1 {
/// TODO
pub timestamp: u64,
/// TODO
pub total_generated_coins: u64,
/// TODO
pub weight: u64,
/// TODO
pub cumulative_difficulty: u64,
/// TODO
pub block_hash: [u8; 32],
}
//---------------------------------------------------------------------------------------------------- BlockInfoV2
/// TODO
///
/// ```rust
/// # use std::borrow::*;
/// # use cuprate_database::{*, types::*};
/// // Assert Storable is correct.
/// let a = BlockInfoV2 {
/// timestamp: 1,
/// total_generated_coins: 123,
/// weight: 321,
/// block_hash: [54; 32],
/// cumulative_difficulty: 111,
/// cumulative_rct_outs: 2389,
/// };
/// let b = Storable::as_bytes(&a);
/// let c: BlockInfoV2 = Storable::from_bytes(b);
/// assert_eq!(a, c);
/// ```
///
/// # Size & Alignment
/// ```rust
/// # use cuprate_database::types::*;
/// # use std::mem::*;
/// assert_eq!(size_of::<BlockInfoV2>(), 72);
/// assert_eq!(align_of::<BlockInfoV2>(), 8);
/// ```
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
#[repr(C)]
pub struct BlockInfoV2 {
/// TODO
pub timestamp: u64,
/// TODO
pub total_generated_coins: u64,
/// TODO
pub weight: u64,
/// TODO
pub block_hash: [u8; 32],
/// TODO
pub cumulative_difficulty: u64,
/// TODO
///
/// TODO: note that this is originally u32,
/// but is u64 here for padding reasons.
pub cumulative_rct_outs: u64,
}
//---------------------------------------------------------------------------------------------------- BlockInfoV3
/// TODO
/// Block information.
///
/// This is the value in the [`BlockInfos`](crate::tables::BlockInfos) table.
///
/// ```rust
/// # use std::borrow::*;
/// # use cuprate_database::{*, types::*};
/// // Assert Storable is correct.
/// let a = BlockInfoV3 {
/// let a = BlockInfo {
/// timestamp: 1,
/// total_generated_coins: 123,
/// cumulative_generated_coins: 123,
/// weight: 321,
/// cumulative_difficulty_low: 111,
/// cumulative_difficulty_low: 112,
/// cumulative_difficulty_high: 112,
/// block_hash: [54; 32],
/// cumulative_rct_outs: 2389,
/// long_term_weight: 2389,
/// };
/// let b = Storable::as_bytes(&a);
/// let c: BlockInfoV3 = Storable::from_bytes(b);
/// let c: BlockInfo = Storable::from_bytes(b);
/// assert_eq!(a, c);
/// ```
///
@ -240,34 +169,70 @@ pub struct BlockInfoV2 {
/// ```rust
/// # use cuprate_database::types::*;
/// # use std::mem::*;
/// assert_eq!(size_of::<BlockInfoV3>(), 88);
/// assert_eq!(align_of::<BlockInfoV3>(), 8);
/// assert_eq!(size_of::<BlockInfo>(), 88);
/// assert_eq!(align_of::<BlockInfo>(), 8);
/// ```
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
#[repr(C)]
pub struct BlockInfoV3 {
/// TODO
pub struct BlockInfo {
/// The UNIX time at which the block was mined.
pub timestamp: u64,
/// TODO
pub total_generated_coins: u64,
/// TODO
/// The total amount of coins mined in all blocks so far, including this block's.
pub cumulative_generated_coins: u64,
/// The adjusted block size, in bytes.
///
/// See [`block_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#blocks-weight).
pub weight: u64,
// Maintain 8 byte alignment.
/// TODO
/// Least-significant 64 bits of the 128-bit cumulative difficulty.
pub cumulative_difficulty_low: u64,
/// TODO
/// Most-significant 64 bits of the 128-bit cumulative difficulty.
pub cumulative_difficulty_high: u64,
/// TODO
/// The block's hash.
pub block_hash: [u8; 32],
/// TODO
/// The total amount of RCT outputs so far, including this block's.
pub cumulative_rct_outs: u64,
/// TODO
/// The long term block weight, based on the median weight of the preceding `100_000` blocks.
///
/// See [`long_term_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#long-term-block-weight).
pub long_term_weight: u64,
}
//---------------------------------------------------------------------------------------------------- OutputFlags
bitflags::bitflags! {
/// Bit flags for [`Output`]s and [`RctOutput`]s,
///
/// Currently only the first bit is used and, if set,
/// it means this output has a non-zero unlock time.
///
/// ```rust
/// # use std::borrow::*;
/// # use cuprate_database::{*, types::*};
/// // Assert Storable is correct.
/// let a = OutputFlags::NON_ZERO_UNLOCK_TIME;
/// let b = Storable::as_bytes(&a);
/// let c: OutputFlags = Storable::from_bytes(b);
/// assert_eq!(a, c);
/// ```
///
/// # Size & Alignment
/// ```rust
/// # use cuprate_database::types::*;
/// # use std::mem::*;
/// assert_eq!(size_of::<OutputFlags>(), 4);
/// assert_eq!(align_of::<OutputFlags>(), 4);
/// ```
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
#[repr(transparent)]
pub struct OutputFlags: u32 {
/// This output has a non-zero unlock time.
const NON_ZERO_UNLOCK_TIME = 0b0000_0001;
}
}
//---------------------------------------------------------------------------------------------------- Output
/// TODO
/// A pre-RCT (v1) output's data.
///
/// ```rust
/// # use std::borrow::*;
@ -276,7 +241,7 @@ pub struct BlockInfoV3 {
/// let a = Output {
/// key: [1; 32],
/// height: 1,
/// output_flags: 0,
/// output_flags: OutputFlags::empty(),
/// tx_idx: 3,
/// };
/// let b = Storable::as_bytes(&a);
@ -295,18 +260,20 @@ pub struct BlockInfoV3 {
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
#[repr(C)]
pub struct Output {
/// TODO
/// The public key of the output.
pub key: [u8; 32],
/// We could get this from the tx_idx with the Tx Heights table but that would require another look up per out.
/// The block height this output belongs to.
// PERF: We could get this from the tx_idx with the `TxHeights`
// table but that would require another look up per out.
pub height: u32,
/// Bit flags for this output, currently only the first bit is used and, if set, it means this output has a non-zero unlock time.
pub output_flags: u32,
/// TODO
/// Bit flags for this output.
pub output_flags: OutputFlags,
/// The index of the transaction this output belongs to.
pub tx_idx: u64,
}
//---------------------------------------------------------------------------------------------------- RctOutput
/// TODO
/// An RCT (v2+) output's data.
///
/// ```rust
/// # use std::borrow::*;
@ -315,7 +282,7 @@ pub struct Output {
/// let a = RctOutput {
/// key: [1; 32],
/// height: 1,
/// output_flags: 0,
/// output_flags: OutputFlags::empty(),
/// tx_idx: 3,
/// commitment: [3; 32],
/// };
@ -335,13 +302,15 @@ pub struct Output {
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
#[repr(C)]
pub struct RctOutput {
/// TODO
/// The public key of the output.
pub key: [u8; 32],
/// We could get this from the tx_idx with the Tx Heights table but that would require another look up per out.
/// The block height this output belongs to.
// PERF: We could get this from the tx_idx with the `TxHeights`
// table but that would require another look up per out.
pub height: u32,
/// Bit flags for this output, currently only the first bit is used and, if set, it means this output has a non-zero unlock time.
pub output_flags: u32,
/// TODO
pub output_flags: OutputFlags,
/// The index of the transaction this output belongs to.
pub tx_idx: u64,
/// The amount commitment of this output.
pub commitment: [u8; 32],

View file

@ -0,0 +1,85 @@
//! Wrapper type for partially-`unsafe` usage of `T: !Send`.
//---------------------------------------------------------------------------------------------------- Import
use std::{
borrow::Borrow,
ops::{Deref, DerefMut},
};
use bytemuck::TransparentWrapper;
//---------------------------------------------------------------------------------------------------- Aliases
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, TransparentWrapper)]
#[repr(transparent)]
/// A wrapper type that `unsafe`ly implements `Send` for any `T`.
///
/// This is a marker/wrapper type that allows wrapping
/// any type `T` such that it implements `Send`.
///
/// This is to be used when `T` is `Send`, but only in certain
/// situations not provable to the compiler, or is otherwise a
/// a pain to prove and/or less efficient.
///
/// It is up to the users of this type to ensure their
/// usage of `UnsafeSendable` are actually safe.
///
/// Notably, `heed`'s table type uses this inside `service`.
pub(crate) struct UnsafeSendable<T>(T);
#[allow(clippy::non_send_fields_in_send_ty)]
// SAFETY: Users ensure that their usage of this type is safe.
unsafe impl<T> Send for UnsafeSendable<T> {}
impl<T> UnsafeSendable<T> {
/// Create a new [`UnsafeSendable`].
///
/// # Safety
/// By constructing this type, you must ensure the usage
/// of the resulting `Self` is follows all the [`Send`] rules.
pub(crate) const unsafe fn new(t: T) -> Self {
Self(t)
}
/// Extract the inner `T`.
#[allow(dead_code)]
pub(crate) fn into_inner(self) -> T {
self.0
}
}
impl<T> Borrow<T> for UnsafeSendable<T> {
fn borrow(&self) -> &T {
&self.0
}
}
impl<T> AsRef<T> for UnsafeSendable<T> {
fn as_ref(&self) -> &T {
&self.0
}
}
impl<T> AsMut<T> for UnsafeSendable<T> {
fn as_mut(&mut self) -> &mut T {
&mut self.0
}
}
impl<T> Deref for UnsafeSendable<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for UnsafeSendable<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
// use super::*;
}

View file

@ -10,22 +10,24 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
[features]
# All features on by default.
default = ["std", "atomic", "asynch", "fs", "num", "time", "thread", "constants"]
default = ["std", "atomic", "asynch", "fs", "num", "map", "time", "thread", "constants"]
std = []
atomic = ["dep:crossbeam"]
asynch = ["dep:futures", "dep:rayon"]
constants = []
fs = ["dep:dirs"]
num = []
map = ["dep:monero-serai"]
time = ["dep:chrono", "std"]
thread = ["std", "dep:target_os_lib"]
[dependencies]
crossbeam = { workspace = true, optional = true }
chrono = { workspace = true, optional = true, features = ["std", "clock"] }
dirs = { workspace = true, optional = true }
futures = { workspace = true, optional = true, features = ["std"] }
rayon = { workspace = true, optional = true }
crossbeam = { workspace = true, optional = true }
chrono = { workspace = true, optional = true, features = ["std", "clock"] }
dirs = { workspace = true, optional = true }
futures = { workspace = true, optional = true, features = ["std"] }
monero-serai = { workspace = true, optional = true }
rayon = { workspace = true, optional = true }
# This is kinda a stupid work around.
# [thread] needs to activate one of these libs (windows|libc)

View file

@ -1,11 +1,31 @@
//! Cuprate directories and filenames.
//!
//! # TODO
//! Document how environment variables can change these.
//! # Environment variables on Linux
//! Note that this module's functions uses [`dirs`],
//! which adheres to the XDG standard on Linux.
//!
//! # Reference
//! <https://github.com/Cuprate/cuprate/issues/46>
//! <https://docs.rs/dirs>
//! This means that the values returned by these functions
//! may change at runtime depending on environment variables,
//! for example:
//!
//! By default the config directory is `~/.config`, however
//! if `$XDG_CONFIG_HOME` is set to something, that will be
//! used instead.
//!
//! ```rust
//! # use cuprate_helper::fs::*;
//! # if cfg!(target_os = "linux") {
//! std::env::set_var("XDG_CONFIG_HOME", "/custom/path");
//! assert_eq!(
//! cuprate_config_dir().to_string_lossy(),
//! "/custom/path/cuprate"
//! );
//! # }
//! ```
//!
//! Reference:
//! - <https://github.com/Cuprate/cuprate/issues/46>
//! - <https://docs.rs/dirs>
//---------------------------------------------------------------------------------------------------- Use
use std::{

View file

@ -51,6 +51,9 @@ pub mod network;
#[cfg(feature = "num")]
pub mod num;
#[cfg(feature = "map")]
pub mod map;
#[cfg(feature = "thread")]
pub mod thread;

107
helper/src/map.rs Normal file
View file

@ -0,0 +1,107 @@
//! Mapping of data types.
//!
//! This module provides functions solely for mapping data types into others, mostly similar ones.
//!
//! `#[no_std]` compatible.
//---------------------------------------------------------------------------------------------------- Use
use monero_serai::transaction::Timelock;
//---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128`
/// Split a [`u128`] value into 2 64-bit values.
///
/// The tuple returned is `(low, high)` where `low` is the least significant
/// 64-bits of `number`, and `high` is the most significant.
///
/// Note that the output of this function are `u64` representations of _bits_, not numerical values.
///
/// See [`combine_low_high_bits_to_u128`] for the inverse function.
///
/// ```rust
/// # use cuprate_helper::map::*;
/// let value = u128::MAX - 1;
/// let low = u64::MAX - 1;
/// let high = u64::MAX;
///
/// assert_eq!(split_u128_into_low_high_bits(value), (low, high));
/// ```
#[inline]
pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) {
(value as u64, (value >> 64) as u64)
}
/// Combine 2 64-bit values into a single [`u128`] value.
///
/// The inputs:
/// - `low_bits` are the _least_ significant 64-bits of `cumulative_difficulty`
/// - `high_bits` are the _most_ significant 64-bits of `cumulative_difficulty`
///
/// Note that `low_bits` & `high_bits` should be `u64` representation of _bits_, not numerical values.
///
/// See [`split_u128_into_low_high_bits`] for the inverse function.
///
/// ```rust
/// # use cuprate_helper::map::*;
/// let value = u128::MAX - 1;
/// let low = u64::MAX - 1;
/// let high = u64::MAX;
///
/// assert_eq!(combine_low_high_bits_to_u128(low, high), value);
/// ```
#[inline]
pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u128 {
let res = (high_bits as u128) << 64;
res | (low_bits as u128)
}
//---------------------------------------------------------------------------------------------------- Timelock
/// Map a [`u64`] to a [`Timelock`].
///
/// Height/time is not differentiated via type, but rather:
/// "height is any value less than 500_000_000 and timestamp is any value above"
/// so the `u64/usize` is stored without any tag.
///
/// See [`timelock_to_u64`] for the inverse function.
///
/// - <https://github.com/Cuprate/cuprate/pull/102#discussion_r1558504285>
/// - <https://github.com/serai-dex/serai/blob/bc1dec79917d37d326ac3d9bc571a64131b0424a/coins/monero/src/transaction.rs#L139>
///
/// ```rust
/// # use cuprate_helper::map::*;
/// # use monero_serai::transaction::*;
/// assert_eq!(u64_to_timelock(0), Timelock::None);
/// assert_eq!(u64_to_timelock(499_999_999), Timelock::Block(499_999_999));
/// assert_eq!(u64_to_timelock(500_000_000), Timelock::Time(500_000_000));
/// ```
pub fn u64_to_timelock(u: u64) -> Timelock {
if u == 0 {
Timelock::None
} else if u < 500_000_000 {
Timelock::Block(usize::try_from(u).unwrap())
} else {
Timelock::Time(u)
}
}
/// Map [`Timelock`] to a [`u64`].
///
/// See [`u64_to_timelock`] for the inverse function and more documentation.
///
/// ```rust
/// # use cuprate_helper::map::*;
/// # use monero_serai::transaction::*;
/// assert_eq!(timelock_to_u64(Timelock::None), 0);
/// assert_eq!(timelock_to_u64(Timelock::Block(499_999_999)), 499_999_999);
/// assert_eq!(timelock_to_u64(Timelock::Time(500_000_000)), 500_000_000);
/// ```
pub fn timelock_to_u64(timelock: Timelock) -> u64 {
match timelock {
Timelock::None => 0,
Timelock::Block(u) => u64::try_from(u).unwrap(),
Timelock::Time(u) => u,
}
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {}

View file

@ -0,0 +1,14 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mDMEZb0y4RYJKwYBBAHaRw8BAQdAvMid+QsSxLULIkKPLf0XWgPxaoG89qPNiQ4S
fXH0BfW0VlN5bnRoZXRpY0JpcmQ0NSAoQ3VwcmF0ZSdzIGRldmVsb3BlcikgPHNv
bWVvbmVlbHNlLmlzX29uLmdpdGh1Yi5yaW83eEBzaW1wbGVsb2dpbi5jb20+iJME
ExYKADsWIQQEmOfWc9FTBiAKoHnHaXP3SFIeEQUCZb0y4QIbAwULCQgHAgIiAgYV
CgkICwIEFgIDAQIeBwIXgAAKCRDHaXP3SFIeEUx+AQDYd7t75+V4/aSTczLxMGuT
A84qGRuYNStXUJzjV8F21wD/YVlybZcr9dDQ/+YOgh5aXBzo+oGm+XhhSbI3QdIX
LAC4OARlvTLhEgorBgEEAZdVAQUBAQdAgRoSFUmnCqETElyry97kFwsdzlNyldk2
ZPgH9J4fCHwDAQgHiHgEGBYKACAWIQQEmOfWc9FTBiAKoHnHaXP3SFIeEQUCZb0y
4QIbDAAKCRDHaXP3SFIeETDSAP4k8+jUaStnjrkzN1jvRg136qNfwe8ZzjrsWJ0n
FOS8zAEA/fwRjRyvEP28KJNiKdyhDYWYJTpyLGTiPP8b43NsHAM=
=gqqy
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -1,15 +0,0 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mE8EZAt90BMFK4EEAAoCAwS8WnB3wMu+JxWm3LpuHO1jcdwIlMjndqoGCcJnFEKm
shkx1eE21AoCGJYYAjeVLrazF5hqTzs6UpBuP7ZNaXvJtEBTeW50aGV0aWNCaXJk
NDUgPHNvbWVvbmVlbHNlLmlzX29uLmdpdGh1Yi5yaW83eEBzaW1wbGVsb2dpbi5j
b20+iJAEExMIADgWIQTX0AOzMdcNEMyKDV31QokN0AEPEQUCZAt90AIbAwULCQgH
AgYVCgkICwIEFgIDAQIeAQIXgAAKCRD1QokN0AEPEWp0AQCDCOdgi3LRFLrF/rR9
zBy6ceMgAp4Z/GJMO66je3BeIgD9HPo7OkRsKvI1kCf7X9KDV6M0+bmYpC23HYpN
1zWnq++4UwRkC33QEgUrgQQACgIDBGfPz0WQRKwicAMkUF2InuOns4aU/1bDwidd
wP426408APfJ7vTtKOVFjfHzKLLiw1Z0texwhBL0y76nggkzVbMDAQgHiHgEGBMI
ACAWIQTX0AOzMdcNEMyKDV31QokN0AEPEQUCZAt90AIbDAAKCRD1QokN0AEPERQg
APsHUaCbt1BByhXpVu34C9bY6P1Sw9ARpfl9cc2kAEnQRQD+Klmx13c/WOj6euF6
RMKtt34En+0xhP99yfEpoofta/0=
=Pkk7
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -39,11 +39,7 @@ impl<T: Containerable + EpeeValue> EpeeValue for ContainerAsBlob<T> {
}
Ok(ContainerAsBlob(
bytes
.windows(T::SIZE)
.step_by(T::SIZE)
.map(T::from_bytes)
.collect(),
bytes.chunks(T::SIZE).map(T::from_bytes).collect(),
))
}

View file

@ -1,5 +1,8 @@
use core::fmt::{Debug, Formatter};
use core::{num::TryFromIntError, str::Utf8Error};
use core::{
fmt::{Debug, Formatter},
num::TryFromIntError,
str::Utf8Error,
};
pub type Result<T> = core::result::Result<T, Error>;

View file

@ -308,11 +308,7 @@ impl<const N: usize> EpeeValue for ByteArrayVec<N> {
return Err(Error::Format("Byte array exceeded max length"));
}
if r.remaining()
< usize::try_from(len)?
.checked_mul(N)
.ok_or(Error::Value("Length of field is too long".to_string()))?
{
if r.remaining() < usize::try_from(len)? {
return Err(Error::IO("Not enough bytes to fill object"));
}

View file

@ -1,8 +1,9 @@
use core::ops::Deref;
use std::fmt::{Debug, Formatter};
use std::ops::Index;
use core::{
fmt::{Debug, Formatter},
ops::{Deref, Index},
};
use bytes::Bytes;
use bytes::{BufMut, Bytes, BytesMut};
#[cfg_attr(feature = "std", derive(thiserror::Error))]
pub enum FixedByteError {
@ -101,6 +102,40 @@ impl<const N: usize> ByteArrayVec<N> {
pub fn take_bytes(self) -> Bytes {
self.0
}
/// Splits the byte array vec into two at the given index.
///
/// Afterwards self contains elements [0, at), and the returned [`ByteArrayVec`] contains elements [at, len).
///
/// This is an O(1) operation that just increases the reference count and sets a few indices.
///
/// # Panics
/// Panics if at > len.
pub fn split_off(&mut self, at: usize) -> Self {
Self(self.0.split_off(at * N))
}
}
impl<const N: usize> From<&ByteArrayVec<N>> for Vec<[u8; N]> {
fn from(value: &ByteArrayVec<N>) -> Self {
let mut out = Vec::with_capacity(value.len());
for i in 0..value.len() {
out.push(value[i])
}
out
}
}
impl<const N: usize> From<Vec<[u8; N]>> for ByteArrayVec<N> {
fn from(value: Vec<[u8; N]>) -> Self {
let mut bytes = BytesMut::with_capacity(N * value.len());
for i in value.into_iter() {
bytes.extend_from_slice(&i)
}
ByteArrayVec(bytes.freeze())
}
}
impl<const N: usize> TryFrom<Bytes> for ByteArrayVec<N> {
@ -115,8 +150,38 @@ impl<const N: usize> TryFrom<Bytes> for ByteArrayVec<N> {
}
}
impl<const N: usize> From<[u8; N]> for ByteArrayVec<N> {
fn from(value: [u8; N]) -> Self {
ByteArrayVec(Bytes::copy_from_slice(value.as_slice()))
}
}
impl<const N: usize, const LEN: usize> From<[[u8; N]; LEN]> for ByteArrayVec<N> {
fn from(value: [[u8; N]; LEN]) -> Self {
let mut bytes = BytesMut::with_capacity(N * LEN);
for val in value.into_iter() {
bytes.put_slice(val.as_slice());
}
ByteArrayVec(bytes.freeze())
}
}
impl<const N: usize> TryFrom<Vec<u8>> for ByteArrayVec<N> {
type Error = FixedByteError;
fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> {
if value.len() % N != 0 {
return Err(FixedByteError::InvalidLength);
}
Ok(ByteArrayVec(Bytes::from(value)))
}
}
impl<const N: usize> Index<usize> for ByteArrayVec<N> {
type Output = [u8; 32];
type Output = [u8; N];
fn index(&self, index: usize) -> &Self::Output {
if (index + 1) * N > self.0.len() {

View file

@ -15,7 +15,8 @@ levin-cuprate = {path="../levin"}
epee-encoding = { path = "../epee-encoding" }
fixed-bytes = { path = "../fixed-bytes" }
bytes = { workspace = true }
bitflags = { workspace = true, features = ["std"] }
bytes = { workspace = true, features = ["std"] }
thiserror = { workspace = true }
[dev-dependencies]

View file

@ -139,8 +139,7 @@ mod tests {
my_port: 0,
network_id: [
18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161, 161, 16,
]
.into(),
],
peer_id: 9671405426614699871,
support_flags: PeerSupportFlags::from(1_u32),
rpc_port: 0,
@ -945,8 +944,7 @@ mod tests {
my_port: 18080,
network_id: [
18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161, 161, 16,
]
.into(),
],
peer_id: 6037804360359455404,
support_flags: PeerSupportFlags::from(1_u32),
rpc_port: 18089,

View file

@ -15,7 +15,9 @@
//! Common types that are used across multiple messages.
use bitflags::bitflags;
use bytes::{Buf, BufMut, Bytes};
use epee_encoding::{epee_object, EpeeValue, InnerMarker};
use fixed_bytes::ByteArray;
@ -24,6 +26,13 @@ use crate::NetworkAddress;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct PeerSupportFlags(u32);
bitflags! {
impl PeerSupportFlags: u32 {
const FLUFFY_BLOCKS = 0b0000_0001;
const _ = !0;
}
}
impl From<u32> for PeerSupportFlags {
fn from(value: u32) -> Self {
PeerSupportFlags(value)
@ -42,27 +51,14 @@ impl<'a> From<&'a PeerSupportFlags> for &'a u32 {
}
}
impl PeerSupportFlags {
//const FLUFFY_BLOCKS: u32 = 0b0000_0001;
pub fn is_empty(&self) -> bool {
self.0 == 0
}
}
impl From<u8> for PeerSupportFlags {
fn from(value: u8) -> Self {
PeerSupportFlags(value.into())
}
}
/// Basic Node Data, information on the connected peer
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct BasicNodeData {
/// Port
pub my_port: u32,
/// The Network Id
pub network_id: ByteArray<16>,
// We don't use ByteArray here to allow users to keep this data long term.
pub network_id: [u8; 16],
/// Peer ID
pub peer_id: u64,
/// The Peers Support Flags
@ -79,7 +75,7 @@ pub struct BasicNodeData {
epee_object! {
BasicNodeData,
my_port: u32,
network_id: ByteArray<16>,
network_id: [u8; 16],
peer_id: u64,
support_flags: PeerSupportFlags as u32 = 0_u32,
rpc_port: u16 = 0_u16,
@ -101,7 +97,8 @@ pub struct CoreSyncData {
/// (If this is not in the message the default is 0)
pub pruning_seed: u32,
/// Hash of the top block
pub top_id: ByteArray<32>,
// We don't use ByteArray here to allow users to keep this data long term.
pub top_id: [u8; 32],
/// Version of the top block
pub top_version: u8,
}
@ -112,7 +109,7 @@ epee_object! {
cumulative_difficulty_top64: u64 = 0_u64,
current_height: u64,
pruning_seed: u32 = 0_u32,
top_id: ByteArray<32>,
top_id: [u8; 32],
top_version: u8 = 0_u8,
}
@ -131,7 +128,7 @@ impl CoreSyncData {
cumulative_difficulty_top64,
current_height,
pruning_seed,
top_id: top_id.into(),
top_id,
top_version,
}
}

View file

@ -114,7 +114,7 @@ pub struct ChainResponse {
/// Total Height
pub total_height: u64,
/// Cumulative Difficulty Low
pub cumulative_difficulty: u64,
pub cumulative_difficulty_low64: u64,
/// Cumulative Difficulty High
pub cumulative_difficulty_top64: u64,
/// Block IDs
@ -125,11 +125,19 @@ pub struct ChainResponse {
pub first_block: Bytes,
}
impl ChainResponse {
#[inline]
pub fn cumulative_difficulty(&self) -> u128 {
let cumulative_difficulty = self.cumulative_difficulty_top64 as u128;
cumulative_difficulty << 64 | self.cumulative_difficulty_low64 as u128
}
}
epee_object!(
ChainResponse,
start_height: u64,
total_height: u64,
cumulative_difficulty: u64,
cumulative_difficulty_low64("cumulative_difficulty"): u64,
cumulative_difficulty_top64: u64 = 0_u64,
m_block_ids: ByteArrayVec<32>,
m_block_weights: Vec<u64> as ContainerAsBlob<u64>,

View file

@ -1,33 +0,0 @@
[package]
name = "cuprate-database"
version = "0.0.1"
edition = "2021"
license = "AGPL-3.0-only"
# All Contributors on github
authors=[
"SyntheticBird45 <@someoneelse495495:matrix.org>",
"Boog900"
]
[features]
mdbx = ["dep:libmdbx"]
hse = []
[dependencies]
monero = {workspace = true, features = ["serde"]}
tiny-keccak = { version = "2.0", features = ["sha3"] }
serde = { workspace = true}
thiserror = {workspace = true }
bincode = { workspace = true }
libmdbx = { version = "0.3.1", optional = true }
[build]
linker="clang"
rustflags=[
"-Clink-arg=-fuse-ld=mold",
"-Zcf-protection=full",
"-Zsanitizer=cfi",
"-Crelocation-model=pie",
"-Cstack-protector=all",
]

View file

@ -1,14 +0,0 @@
Copyright (C) 2023 Cuprate Contributors
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.

View file

@ -1,78 +0,0 @@
//! ### Encoding module
//! The encoding module contains a trait that permit compatibility between `monero-rs` consensus encoding/decoding logic and `bincode` traits.
//! The database tables only accept types that implement [`bincode::Encode`] and [`bincode::Decode`] and since we can't implement these on `monero-rs` types directly
//! we use a wrapper struct `Compat<T>` that permit us to use `monero-rs`'s `consensus_encode`/`consensus_decode` functions under bincode traits.
//! The choice of using `bincode` comes from performance measurement at encoding. Sometimes `bincode` implementations was 5 times faster than `monero-rs` impl.
use bincode::{de::read::Reader, enc::write::Writer};
use monero::consensus::{Decodable, Encodable};
use std::{fmt::Debug, io::Read, ops::Deref};
#[derive(Debug, Clone)]
/// A single-tuple struct, used to contains monero-rs types that implement [`monero::consensus::Encodable`] and [`monero::consensus::Decodable`]
pub struct Compat<T: Encodable + Decodable>(pub T);
/// A wrapper around a [`bincode::de::read::Reader`] type. Permit us to use [`std::io::Read`] and feed monero-rs functions with an actual `&[u8]`
pub struct ReaderCompat<'src, R: Reader>(pub &'src mut R);
// Actual implementation of `std::io::read` for `bincode`'s `Reader` types
impl<'src, R: Reader> Read for ReaderCompat<'src, R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.0
.read(buf)
.map_err(|_| std::io::Error::new(std::io::ErrorKind::Other, "bincode reader Error"))?;
Ok(buf.len())
}
}
// Convenient implementation. `Deref` and `From`
impl<T: Encodable + Decodable> Deref for Compat<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: Encodable + Decodable> From<T> for Compat<T> {
fn from(value: T) -> Self {
Compat(value)
}
}
// TODO: Investigate specialization optimization
// Implementation of `bincode::Decode` for monero-rs `Decodable` type
impl<T: Encodable + Decodable + Debug> bincode::Decode for Compat<T> {
fn decode<D: bincode::de::Decoder>(
decoder: &mut D,
) -> Result<Self, bincode::error::DecodeError> {
Ok(Compat(
Decodable::consensus_decode(&mut ReaderCompat(decoder.reader()))
.map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?,
))
}
}
// Implementation of `bincode::BorrowDecode` for monero-rs `Decodable` type
impl<'de, T: Encodable + Decodable + Debug> bincode::BorrowDecode<'de> for Compat<T> {
fn borrow_decode<D: bincode::de::BorrowDecoder<'de>>(
decoder: &mut D,
) -> Result<Self, bincode::error::DecodeError> {
Ok(Compat(
Decodable::consensus_decode(&mut ReaderCompat(decoder.borrow_reader()))
.map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?,
))
}
}
// Implementation of `bincode::Encode` for monero-rs `Encodable` type
impl<T: Encodable + Decodable + Debug> bincode::Encode for Compat<T> {
fn encode<E: bincode::enc::Encoder>(
&self,
encoder: &mut E,
) -> Result<(), bincode::error::EncodeError> {
let writer = encoder.writer();
let buf = monero::consensus::serialize(&self.0);
writer.write(&buf)
}
}

View file

@ -1,53 +0,0 @@
//! ### Error module
//! This module contains all errors abstraction used by the database crate. By implementing [`From<E>`] to the specific errors of storage engine crates, it let us
//! handle more easily any type of error that can happen. This module does **NOT** contain interpretation of these errors, as these are defined for Blockchain abstraction. This is another difference
//! from monerod which interpret these errors directly in its database functions:
//! ```cpp
//! /**
//! * @brief A base class for BlockchainDB exceptions
//! */
//! class DB_EXCEPTION : public std::exception
//! ```
//! see `blockchain_db/blockchain_db.h` in monerod `src/` folder for more details.
#[derive(thiserror::Error, Debug)]
/// `DB_FAILURES` is an enum for backend-agnostic, internal database errors. The `From` Trait must be implemented to the specific backend errors to match DB_FAILURES.
pub enum DB_FAILURES {
#[error("MDBX returned an error {0}")]
MDBX_Error(#[from] libmdbx::Error),
#[error("\n<DB_FAILURES::EncodingError> Failed to encode some data : `{0}`")]
SerializeIssue(DB_SERIAL),
#[error("\nObject already exist in the database : {0}")]
AlreadyExist(&'static str),
#[error("NotFound? {0}")]
NotFound(&'static str),
#[error("\n<DB_FAILURES::Other> `{0}`")]
Other(&'static str),
#[error(
"\n<DB_FAILURES::FailedToCommit> A transaction tried to commit to the db, but failed."
)]
FailedToCommit,
}
#[derive(thiserror::Error, Debug)]
pub enum DB_SERIAL {
#[error("An object failed to be serialized into bytes. It is likely an issue from monero-rs library. Please report this error on cuprate's github : https://github.com/Cuprate/cuprate/issues")]
ConsensusEncode,
#[error("Bytes failed to be deserialized into the requested object. It is likely an issue from monero-rs library. Please report this error on cuprate's github : https://github.com/Cuprate/cuprate/issues")]
ConsensusDecode(Vec<u8>),
#[error("monero-rs encoding|decoding logic failed : {0}")]
MoneroEncode(#[from] monero::consensus::encode::Error),
#[error("Bincode failed to decode a type from the database : {0}")]
BincodeDecode(#[from] bincode::error::DecodeError),
#[error("Bincode failed to encode a type for the database : {0}")]
BincodeEncode(#[from] bincode::error::EncodeError),
}

View file

@ -1,11 +0,0 @@
/* There is nothing here as no wrapper exist for HSE yet */
/* KVS supported functions :
-------------------------------------
hse_kvs_delete
hse_kvs_get
hse_kvs_name_get
hse_kvs_param_get
hse_kvs_prefix_delete
hse_kvs_put
*/

File diff suppressed because it is too large Load diff

View file

@ -1,221 +0,0 @@
// Copyright (C) 2023 Cuprate Contributors
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! The cuprate-db crate implement (as its name suggests) the relations between the blockchain/txpool objects and their databases.
//! `lib.rs` contains all the generics, trait and specification for interfaces between blockchain and a backend-agnostic database
//! Every other files in this folder are implementation of these traits/methods to real storage engine.
//!
//! At the moment, the only storage engine available is MDBX.
//! The next storage engine planned is HSE (Heteregeonous Storage Engine) from Micron.
//!
//! For more information, please consult this docs:
#![deny(unused_attributes)]
#![forbid(unsafe_code)]
#![allow(non_camel_case_types)]
#![deny(clippy::expect_used, clippy::panic)]
#![allow(dead_code, unused_macros)] // temporary
use monero::{util::ringct::RctSig, Block, BlockHeader, Hash};
use std::ops::Range;
use thiserror::Error;
#[cfg(feature = "mdbx")]
pub mod mdbx;
//#[cfg(feature = "hse")]
//pub mod hse;
pub mod encoding;
pub mod error;
pub mod interface;
pub mod table;
pub mod types;
const DEFAULT_BLOCKCHAIN_DATABASE_DIRECTORY: &str = "blockchain";
const DEFAULT_TXPOOL_DATABASE_DIRECTORY: &str = "txpool_mem";
const BINCODE_CONFIG: bincode::config::Configuration<
bincode::config::LittleEndian,
bincode::config::Fixint,
> = bincode::config::standard().with_fixed_int_encoding();
// ------------------------------------------| Database |------------------------------------------
pub mod database {
//! This module contains the Database abstraction trait. Any key/value storage engine implemented need
//! to fulfil these associated types and functions, in order to be usable. This module also contains the
//! Interface struct which is used by the DB Reactor to interact with the database.
use crate::{
error::DB_FAILURES,
transaction::{Transaction, WriteTransaction},
};
use std::{ops::Deref, path::PathBuf, sync::Arc};
/// `Database` Trait implement all the methods necessary to generate transactions as well as execute specific functions. It also implement generic associated types to identify the
/// different transaction modes (read & write) and it's native errors.
pub trait Database<'a> {
type TX: Transaction<'a>;
type TXMut: WriteTransaction<'a>;
type Error: Into<DB_FAILURES>;
// Create a transaction from the database
fn tx(&'a self) -> Result<Self::TX, Self::Error>;
// Create a mutable transaction from the database
fn tx_mut(&'a self) -> Result<Self::TXMut, Self::Error>;
// Open a database from the specified path
fn open(path: PathBuf) -> Result<Self, Self::Error>
where
Self: std::marker::Sized;
// Check if the database is built.
fn check_all_tables_exist(&'a self) -> Result<(), Self::Error>;
// Build the database
fn build(&'a self) -> Result<(), Self::Error>;
}
/// `Interface` is a struct containing a shared pointer to the database and transaction's to be used for the implemented method of Interface.
pub struct Interface<'a, D: Database<'a>> {
pub db: Arc<D>,
pub tx: Option<<D as Database<'a>>::TXMut>,
}
// Convenient implementations for database
impl<'service, D: Database<'service>> Interface<'service, D> {
fn from(db: Arc<D>) -> Result<Self, DB_FAILURES> {
Ok(Self { db, tx: None })
}
fn open(&'service mut self) -> Result<(), DB_FAILURES> {
let tx = self.db.tx_mut().map_err(Into::into)?;
self.tx = Some(tx);
Ok(())
}
}
impl<'service, D: Database<'service>> Deref for Interface<'service, D> {
type Target = <D as Database<'service>>::TXMut;
fn deref(&self) -> &Self::Target {
return self.tx.as_ref().unwrap();
}
}
}
// ------------------------------------------| DatabaseTx |------------------------------------------
pub mod transaction {
//! This module contains the abstractions of Transactional Key/Value database functions.
//! Any key/value database/storage engine can be implemented easily for Cuprate as long as
//! these functions or equivalent logic exist for it.
use crate::{
error::DB_FAILURES,
table::{DupTable, Table},
};
// Abstraction of a read-only cursor, for simple tables
#[allow(clippy::type_complexity)]
pub trait Cursor<'t, T: Table> {
fn first(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn get_cursor(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn last(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn next(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn prev(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
fn set(&mut self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES>;
}
// Abstraction of a read-only cursor with support for duplicated tables. DupCursor inherit Cursor methods as
// a duplicated table can be treated as a simple table.
#[allow(clippy::type_complexity)]
pub trait DupCursor<'t, T: DupTable>: Cursor<'t, T> {
fn first_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES>;
fn get_dup(
&mut self,
key: &T::Key,
subkey: &T::SubKey,
) -> Result<Option<T::Value>, DB_FAILURES>;
fn last_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES>;
fn next_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES>;
fn prev_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES>;
}
// Abstraction of a read-write cursor, for simple tables. WriteCursor inherit Cursor methods.
pub trait WriteCursor<'t, T: Table>: Cursor<'t, T> {
fn put_cursor(&mut self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES>;
fn del(&mut self) -> Result<(), DB_FAILURES>;
}
// Abstraction of a read-write cursor with support for duplicated tables. DupWriteCursor inherit DupCursor and WriteCursor methods.
pub trait DupWriteCursor<'t, T: DupTable>: WriteCursor<'t, T> {
fn put_cursor_dup(
&mut self,
key: &T::Key,
subkey: &T::SubKey,
value: &T::Value,
) -> Result<(), DB_FAILURES>;
/// Delete all data under associated to its key
fn del_nodup(&mut self) -> Result<(), DB_FAILURES>;
}
// Abstraction of a read-only transaction.
pub trait Transaction<'a>: Send + Sync {
type Cursor<T: Table>: Cursor<'a, T>;
type DupCursor<T: DupTable>: DupCursor<'a, T> + Cursor<'a, T>;
fn get<T: Table>(&self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES>;
fn commit(self) -> Result<(), DB_FAILURES>;
fn cursor<T: Table>(&self) -> Result<Self::Cursor<T>, DB_FAILURES>;
fn cursor_dup<T: DupTable>(&self) -> Result<Self::DupCursor<T>, DB_FAILURES>;
fn num_entries<T: Table>(&self) -> Result<usize, DB_FAILURES>;
}
// Abstraction of a read-write transaction. WriteTransaction inherits Transaction methods.
pub trait WriteTransaction<'a>: Transaction<'a> {
type WriteCursor<T: Table>: WriteCursor<'a, T>;
type DupWriteCursor<T: DupTable>: DupWriteCursor<'a, T> + DupCursor<'a, T>;
fn put<T: Table>(&self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES>;
fn delete<T: Table>(
&self,
key: &T::Key,
value: &Option<T::Value>,
) -> Result<(), DB_FAILURES>;
fn clear<T: Table>(&self) -> Result<(), DB_FAILURES>;
fn write_cursor<T: Table>(&self) -> Result<Self::WriteCursor<T>, DB_FAILURES>;
fn write_cursor_dup<T: DupTable>(&self) -> Result<Self::DupWriteCursor<T>, DB_FAILURES>;
}
}

View file

@ -1,474 +0,0 @@
//! ### MDBX implementation
//! This module contains the implementation of all the database traits for the MDBX storage engine.
//! This include basic transactions methods, cursors and errors conversion.
use crate::{
database::Database,
error::{DB_FAILURES, DB_SERIAL},
table::{self, DupTable, Table},
transaction::{Transaction, WriteTransaction},
BINCODE_CONFIG,
};
use libmdbx::{
Cursor, DatabaseFlags, DatabaseKind, Geometry, Mode, PageSize, SyncMode, TableFlags,
TransactionKind, WriteFlags, RO, RW,
};
use std::ops::Range;
// Constant used in mdbx implementation
const MDBX_DEFAULT_SYNC_MODE: SyncMode = SyncMode::Durable;
const MDBX_MAX_MAP_SIZE: usize = 4 * 1024usize.pow(3); // 4TB
const MDBX_GROWTH_STEP: isize = 100 * 1024isize.pow(2); // 100MB
const MDBX_PAGE_SIZE: Option<PageSize> = None;
const MDBX_GEOMETRY: Geometry<Range<usize>> = Geometry {
size: Some(0..MDBX_MAX_MAP_SIZE),
growth_step: Some(MDBX_GROWTH_STEP),
shrink_threshold: None,
page_size: MDBX_PAGE_SIZE,
};
/// [`mdbx_decode`] is a function which the supplied bytes will be deserialized using `bincode::decode_from_slice(src, BINCODE_CONFIG)`
/// function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeDecode(err)))` if it failed to decode the value. It is used for clarity purpose.
fn mdbx_decode<T: bincode::Decode>(src: &[u8]) -> Result<(T, usize), DB_FAILURES> {
bincode::decode_from_slice(src, BINCODE_CONFIG)
.map_err(|e| DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeDecode(e)))
}
/// [`mdbx_encode`] is a function that serialize a given value into a vector using `bincode::encode_to_vec(src, BINCODE_CONFIG)`
/// function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(err)))` if it failed to encode the value. It is used for clarity purpose.
fn mdbx_encode<T: bincode::Encode>(src: &T) -> Result<Vec<u8>, DB_FAILURES> {
bincode::encode_to_vec(src, BINCODE_CONFIG)
.map_err(|e| DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(e)))
}
/// [`mdbx_open_table`] is a simple function used for syntax clarity. It try to open the table, and return a `DB_FAILURES` if it failed.
fn mdbx_open_table<'db, K: TransactionKind, E: DatabaseKind, T: Table>(
tx: &'db libmdbx::Transaction<'db, K, E>,
) -> Result<libmdbx::Table, DB_FAILURES> {
tx.open_table(Some(T::TABLE_NAME))
.map_err(std::convert::Into::<DB_FAILURES>::into)
}
/// [`cursor_pair_decode`] is a function defining a conditional return used in (almost) every cursor functions. If a pair of key/value effectively exist from the cursor,
/// the two values are decoded using `mdbx_decode` function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(err)))` if it failed to encode the value.
/// It is used for clarity purpose.
fn cursor_pair_decode<L: bincode::Decode, R: bincode::Decode>(
pair: Option<(Vec<u8>, Vec<u8>)>,
) -> Result<Option<(L, R)>, DB_FAILURES> {
if let Some(pair) = pair {
let decoded_key = mdbx_decode(pair.0.as_slice())?;
let decoded_value = mdbx_decode(pair.1.as_slice())?;
Ok(Some((decoded_key.0, decoded_value.0)))
} else {
Ok(None)
}
}
// Implementation of the database trait with mdbx types
impl<'a, E> Database<'a> for libmdbx::Database<E>
where
E: DatabaseKind,
{
type TX = libmdbx::Transaction<'a, RO, E>;
type TXMut = libmdbx::Transaction<'a, RW, E>;
type Error = libmdbx::Error;
// Open a Read-Only transaction
fn tx(&'a self) -> Result<Self::TX, Self::Error> {
self.begin_ro_txn()
}
// Open a Read-Write transaction
fn tx_mut(&'a self) -> Result<Self::TXMut, Self::Error> {
self.begin_rw_txn()
}
// Open the database with the given path
fn open(path: std::path::PathBuf) -> Result<Self, Self::Error> {
let db: libmdbx::Database<E> = libmdbx::Database::new()
.set_flags(DatabaseFlags::from(Mode::ReadWrite {
sync_mode: MDBX_DEFAULT_SYNC_MODE,
}))
.set_geometry(MDBX_GEOMETRY)
.set_max_readers(32)
.set_max_tables(15)
.open(path.as_path())?;
Ok(db)
}
// Open each tables to verify if the database is complete.
fn check_all_tables_exist(&'a self) -> Result<(), Self::Error> {
let ro_tx = self.begin_ro_txn()?;
// ----- BLOCKS -----
ro_tx.open_table(Some(table::blockhash::TABLE_NAME))?;
ro_tx.open_table(Some(table::blockmetadata::TABLE_NAME))?;
ro_tx.open_table(Some(table::blocks::TABLE_NAME))?;
ro_tx.open_table(Some(table::altblock::TABLE_NAME))?;
// ------ TXNs ------
ro_tx.open_table(Some(table::txspruned::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsprunablehash::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsprunabletip::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsprunable::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsoutputs::TABLE_NAME))?;
ro_tx.open_table(Some(table::txsidentifier::TABLE_NAME))?;
// ---- OUTPUTS -----
ro_tx.open_table(Some(table::prerctoutputmetadata::TABLE_NAME))?;
ro_tx.open_table(Some(table::outputmetadata::TABLE_NAME))?;
// ---- SPT KEYS ----
ro_tx.open_table(Some(table::spentkeys::TABLE_NAME))?;
// --- PROPERTIES ---
ro_tx.open_table(Some(table::properties::TABLE_NAME))?;
Ok(())
}
// Construct the table of the database
fn build(&'a self) -> Result<(), Self::Error> {
let rw_tx = self.begin_rw_txn()?;
// Constructing the tables
// ----- BLOCKS -----
rw_tx.create_table(
Some(table::blockhash::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(
Some(table::blockmetadata::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(Some(table::blocks::TABLE_NAME), TableFlags::INTEGER_KEY)?;
rw_tx.create_table(Some(table::altblock::TABLE_NAME), TableFlags::INTEGER_KEY)?;
// ------ TXNs ------
rw_tx.create_table(Some(table::txspruned::TABLE_NAME), TableFlags::INTEGER_KEY)?;
rw_tx.create_table(
Some(table::txsprunable::TABLE_NAME),
TableFlags::INTEGER_KEY,
)?;
rw_tx.create_table(
Some(table::txsprunablehash::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(
Some(table::txsprunabletip::TABLE_NAME),
TableFlags::INTEGER_KEY,
)?;
rw_tx.create_table(
Some(table::txsoutputs::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(
Some(table::txsidentifier::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
// ---- OUTPUTS -----
rw_tx.create_table(
Some(table::prerctoutputmetadata::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
rw_tx.create_table(
Some(table::outputmetadata::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
// ---- SPT KEYS ----
rw_tx.create_table(
Some(table::spentkeys::TABLE_NAME),
TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
)?;
// --- PROPERTIES ---
rw_tx.create_table(Some(table::properties::TABLE_NAME), TableFlags::INTEGER_KEY)?;
rw_tx.commit()?;
Ok(())
}
}
// Implementation of the Cursor trait for mdbx's Cursors
impl<'a, T, R> crate::transaction::Cursor<'a, T> for Cursor<'a, R>
where
T: Table,
R: TransactionKind,
{
fn first(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES> {
let pair = self
.first::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn get_cursor(
&mut self,
) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
let pair = self
.get_current::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn last(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
let pair = self
.last::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn next(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
let pair = self
.next::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn prev(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
let pair = self
.prev::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
cursor_pair_decode(pair)
}
fn set(&mut self, key: &T::Key) -> Result<Option<<T as Table>::Value>, DB_FAILURES> {
let encoded_key = mdbx_encode(key)?;
let value = self
.set::<Vec<u8>>(&encoded_key)
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
}
// Implementation of the DupCursor trait for mdbx's Cursors
impl<'t, T, R> crate::transaction::DupCursor<'t, T> for Cursor<'t, R>
where
R: TransactionKind,
T: DupTable,
{
fn first_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES> {
let value = self
.first_dup::<Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
fn get_dup(
&mut self,
key: &T::Key,
subkey: &T::SubKey,
) -> Result<Option<<T>::Value>, DB_FAILURES> {
let (encoded_key, encoded_subkey) = (mdbx_encode(key)?, mdbx_encode(subkey)?);
let value = self
.get_both::<Vec<u8>>(&encoded_key, &encoded_subkey)
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
fn last_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES> {
let value = self
.last_dup::<Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
fn next_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES> {
let pair = self
.next_dup::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(pair) = pair {
let (decoded_key, decoded_value) = (
mdbx_decode(pair.0.as_slice())?,
mdbx_decode(pair.1.as_slice())?,
);
return Ok(Some((decoded_key.0, decoded_value.0)));
}
Ok(None)
}
fn prev_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES> {
let pair = self
.prev_dup::<Vec<u8>, Vec<u8>>()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(pair) = pair {
let (decoded_key, decoded_value) = (
mdbx_decode(pair.0.as_slice())?,
mdbx_decode(pair.1.as_slice())?,
);
return Ok(Some((decoded_key.0, decoded_value.0)));
}
Ok(None)
}
}
// Implementation of the WriteCursor trait for mdbx's Cursors in RW permission
impl<'a, T> crate::transaction::WriteCursor<'a, T> for Cursor<'a, RW>
where
T: Table,
{
fn put_cursor(&mut self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES> {
let (encoded_key, encoded_value) = (mdbx_encode(key)?, mdbx_encode(value)?);
self.put(&encoded_key, &encoded_value, WriteFlags::empty())
.map_err(Into::into)
}
fn del(&mut self) -> Result<(), DB_FAILURES> {
self.del(WriteFlags::empty()).map_err(Into::into)
}
}
// Implementation of the DupWriteCursor trait for mdbx's Cursors in RW permission
impl<'a, T> crate::transaction::DupWriteCursor<'a, T> for Cursor<'a, RW>
where
T: DupTable,
{
fn put_cursor_dup(
&mut self,
key: &<T>::Key,
subkey: &<T as DupTable>::SubKey,
value: &<T>::Value,
) -> Result<(), DB_FAILURES> {
let (encoded_key, mut encoded_subkey, mut encoded_value) =
(mdbx_encode(key)?, mdbx_encode(subkey)?, mdbx_encode(value)?);
encoded_subkey.append(&mut encoded_value);
self.put(
encoded_key.as_slice(),
encoded_subkey.as_slice(),
WriteFlags::empty(),
)
.map_err(Into::into)
}
fn del_nodup(&mut self) -> Result<(), DB_FAILURES> {
self.del(WriteFlags::NO_DUP_DATA).map_err(Into::into)
}
}
// Implementation of the Transaction trait for mdbx's Transactions
impl<'a, E, R: TransactionKind> Transaction<'a> for libmdbx::Transaction<'_, R, E>
where
E: DatabaseKind,
{
type Cursor<T: Table> = Cursor<'a, R>;
type DupCursor<T: DupTable> = Cursor<'a, R>;
fn get<T: Table>(&self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
let encoded_key = mdbx_encode(key)?;
let value = self
.get::<Vec<u8>>(&table, &encoded_key)
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if let Some(value) = value {
return Ok(Some(mdbx_decode(value.as_slice())?.0));
}
Ok(None)
}
fn cursor<T: Table>(&self) -> Result<Self::Cursor<T>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.cursor(&table).map_err(Into::into)
}
fn commit(self) -> Result<(), DB_FAILURES> {
let b = self
.commit()
.map_err(std::convert::Into::<DB_FAILURES>::into)?;
if b {
Ok(())
} else {
Err(DB_FAILURES::FailedToCommit)
}
}
fn cursor_dup<T: DupTable>(&self) -> Result<Self::DupCursor<T>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.cursor(&table).map_err(Into::into)
}
fn num_entries<T: Table>(&self) -> Result<usize, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
let stat = self.table_stat(&table)?;
Ok(stat.entries())
}
}
// Implementation of the Transaction trait for mdbx's Transactions with RW permissions
impl<'a, E> WriteTransaction<'a> for libmdbx::Transaction<'a, RW, E>
where
E: DatabaseKind,
{
type WriteCursor<T: Table> = Cursor<'a, RW>;
type DupWriteCursor<T: DupTable> = Cursor<'a, RW>;
fn put<T: Table>(&self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
let (encoded_key, encoded_value) = (mdbx_encode(key)?, mdbx_encode(value)?);
self.put(&table, encoded_key, encoded_value, WriteFlags::empty())
.map_err(Into::into)
}
fn delete<T: Table>(&self, key: &T::Key, value: &Option<T::Value>) -> Result<(), DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
let encoded_key = mdbx_encode(key)?;
if let Some(value) = value {
let encoded_value = mdbx_encode(value)?;
return self
.del(&table, encoded_key, Some(encoded_value.as_slice()))
.map(|_| ())
.map_err(Into::into);
}
self.del(&table, encoded_key, None)
.map(|_| ())
.map_err(Into::into)
}
fn clear<T: Table>(&self) -> Result<(), DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.clear_table(&table).map_err(Into::into)
}
fn write_cursor<T: Table>(&self) -> Result<Self::WriteCursor<T>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.cursor(&table).map_err(Into::into)
}
fn write_cursor_dup<T: DupTable>(&self) -> Result<Self::DupWriteCursor<T>, DB_FAILURES> {
let table = mdbx_open_table::<_, _, T>(self)?;
self.cursor(&table).map_err(Into::into)
}
}

View file

@ -1,181 +0,0 @@
//! ### Table module
//! This module contains the definition of the [`Table`] and [`DupTable`] trait, and the actual tables used in the database.
//! [`DupTable`] are just a trait used to define that they support DUPSORT|DUPFIXED operation (as of now we don't know the equivalent for HSE).
//! All tables are defined with docs explaining its purpose, what types are the key and data.
//! For more details please look at Cuprate's book : <link to cuprate book>
use crate::{
encoding::Compat,
types::{
/*OutTx,*/ AltBlock, BlockMetadata, /*RctOutkey,*/ OutputMetadata,
TransactionPruned, TxIndex, /*OutAmountIdx,*/ /*KeyImage,*/ TxOutputIdx,
},
};
use bincode::{de::Decode, enc::Encode};
use monero::{blockdata::transaction::KeyImage, Block, Hash};
/// A trait implementing a table interaction for the database. It is implemented to an empty struct to specify the name and table's associated types. These associated
/// types are used to simplify deserialization process.
pub trait Table: Send + Sync + 'static + Clone {
// name of the table
const TABLE_NAME: &'static str;
// Definition of a key & value types of the database
type Key: Encode + Decode;
type Value: Encode + Decode;
}
/// A trait implementing a table with duplicated data support.
pub trait DupTable: Table {
// Subkey of the table (prefix of the data)
type SubKey: Encode + Decode;
}
/// This declarative macro declare a new empty struct and impl the specified name, and corresponding types.
macro_rules! impl_table {
( $(#[$docs:meta])* $table:ident , $key:ty , $value:ty ) => {
#[derive(Clone)]
$(#[$docs])*
pub(crate) struct $table;
impl Table for $table {
const TABLE_NAME: &'static str = stringify!($table);
type Key = $key;
type Value = $value;
}
};
}
/// This declarative macro declare extend the original impl_table! macro by implementy DupTable trait.
macro_rules! impl_duptable {
($(#[$docs:meta])* $table:ident, $key:ty, $subkey:ty, $value:ty) => {
impl_table!($(#[$docs])* $table, $key, $value);
impl DupTable for $table {
type SubKey = $subkey;
}
};
}
// ------------------------------------------| Tables definition |------------------------------------------
// ----- BLOCKS -----
impl_duptable!(
/// `blockhash` is table defining a relation between the hash of a block and its height. Its primary use is to quickly find block's hash by its height.
blockhash,
(),
Compat<Hash>,
u64
);
impl_duptable!(
/// `blockmetadata` store block metadata alongside their corresponding Hash. The blocks metadata can contains the total_coins_generated, weight, long_term_block_weight & cumulative RingCT
blockmetadata,
(),
u64,
BlockMetadata
);
impl_table!(
/// `blockbody` store blocks' bodies along their Hash. The blocks body contains the coinbase transaction and its corresponding mined transactions' hashes.
blocks,
u64,
Compat<Block>
);
/*
impl_table!(
/// `blockhfversion` keep track of block's hard fork version. If an outdated node continue to run after a hard fork, it needs to know, after updating, what blocks needs to be update.
blockhfversion, u64, u8);
*/
impl_table!(
/// `altblock` is a table that permits the storage of blocks from an alternative chain, which may cause a re-org. These blocks can be fetch by their corresponding hash.
altblock,
Compat<Hash>,
AltBlock
);
// ------- TXNs -------
impl_table!(
/// `txspruned` is table storing TransactionPruned (or Pruned Tx). These can be fetch by the corresponding Transaction ID.
txspruned,
u64,
TransactionPruned
);
impl_table!(
/// `txsprunable` is a table storing the Prunable part of transactions (Signatures and RctSig), stored as raw bytes. These can be fetch by the corresponding Transaction ID.
txsprunable,
u64,
Vec<u8>
);
impl_duptable!(
/// `txsprunablehash` is a table storing hashes of prunable part of transactions. These hash can be fetch by the corresponding Transaction ID.
txsprunablehash,
u64,
(),
Compat<Hash>
);
impl_table!(
/// `txsprunabletip` is a table used for optimization purpose. It defines at which block's height this transaction belong as long as the block is with Tip blocks. These can be fetch by the corresponding Transaction ID.
txsprunabletip,
u64,
u64
);
impl_duptable!(
/// `txsoutputs` is a table storing output indices used in a transaction. These can be fetch by the corresponding Transaction ID.
txsoutputs,
u64,
(),
TxOutputIdx
);
impl_duptable!(
/// `txsidentifier` is a table defining a relation between the hash of a transaction and its transaction Indexes. Its primarily used to quickly find tx's ID by its hash.
txsidentifier,
Compat<Hash>,
(),
TxIndex
);
// ---- OUTPUTS ----
impl_duptable!(
/// `prerctoutputmetadata` is a duplicated table storing Pre-RingCT output's metadata. The key is the amount of this output, and the subkey is its amount idx.
prerctoutputmetadata,
u64,
u64,
OutputMetadata
);
impl_duptable!(
/// `prerctoutputmetadata` is a table storing RingCT output's metadata. The key is the amount idx of this output since amount is always 0 for RingCT outputs.
outputmetadata,
(),
u64,
OutputMetadata
);
// ---- SPT KEYS ----
impl_duptable!(
/// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
spentkeys,
(),
Compat<KeyImage>,
()
);
// ---- PROPERTIES ----
impl_table!(
/// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
properties,
u32,
u32
);

View file

@ -1,516 +0,0 @@
//! ### Types module
//! This module contains definition and implementations of some of the structures stored in the database.
//! Some of these types are just Wrapper for convenience or re-definition of `monero-rs` database type (see Boog900/monero-rs, "db" branch)
//! Since the database do not use dummy keys, these redefined structs are the same as monerod without the prefix data used as a key.
//! All these types implement [`bincode::Encode`] and [`bincode::Decode`]. They can store `monero-rs` types in their field. In this case, these field
//! use the [`Compat<T>`] wrapper.
use crate::encoding::{Compat, ReaderCompat};
use bincode::{enc::write::Writer, Decode, Encode};
use monero::{
consensus::{encode, Decodable},
util::ringct::{Key, RctSig, RctSigBase, RctSigPrunable, RctType, Signature},
Block, Hash, PublicKey, Transaction, TransactionPrefix, TxIn,
};
// ---- BLOCKS ----
#[derive(Clone, Debug, Encode, Decode)]
/// [`BlockMetadata`] is a struct containing metadata of a block such as the block's `timestamp`, the `total_coins_generated` at this height, its `weight`, its difficulty (`diff_lo`)
/// and cumulative difficulty (`diff_hi`), the `block_hash`, the cumulative RingCT (`cum_rct`) and its long term weight (`long_term_block_weight`). The monerod's struct equivalent is `mdb_block_info_4`
/// This struct is used in [`crate::table::blockmetadata`] table.
pub struct BlockMetadata {
/// Block's timestamp (the time at which it started to be mined)
pub timestamp: u64,
/// Total monero supply, this block included
pub total_coins_generated: u64,
/// Block's weight (sum of all transactions weights)
pub weight: u64,
/// Block's cumulative_difficulty. In monerod this field would have been split into two `u64`, since cpp don't support *natively* `uint128_t`/`u128`
pub cumulative_difficulty: u128,
/// Block's hash
pub block_hash: Compat<Hash>,
/// Cumulative number of RingCT outputs up to this block
pub cum_rct: u64,
/// Block's long term weight
pub long_term_block_weight: u64,
}
#[derive(Clone, Debug, Encode, Decode)]
/// [`AltBlock`] is a struct containing an alternative `block` (defining an alternative mainchain) and its metadata (`block_height`, `cumulative_weight`,
/// `cumulative_difficulty_low`, `cumulative_difficulty_high`, `already_generated_coins`).
/// This struct is used in [`crate::table::altblock`] table.
pub struct AltBlock {
/// Alternative block's height.
pub height: u64,
/// Cumulative weight median at this block
pub cumulative_weight: u64,
/// Cumulative difficulty
pub cumulative_difficulty: u128,
/// Total generated coins excluding this block's coinbase reward + fees
pub already_generated_coins: u64,
/// Actual block data, with Prefix and Transactions.
/// It is worth noting that monerod implementation do not contain the block in its struct, but still append it at the end of metadata.
pub block: Compat<Block>,
}
// ---- TRANSACTIONS ----
#[derive(Clone, Debug)]
/// [`TransactionPruned`] is, as its name suggest, the pruned part of a transaction, which is the Transaction Prefix and its RingCT ring.
/// This struct is used in the [`crate::table::txsprefix`] table.
pub struct TransactionPruned {
/// The transaction prefix.
pub prefix: TransactionPrefix,
/// The RingCT ring, will only contain the 'sig' field.
pub rct_signatures: RctSig,
}
impl bincode::Decode for TransactionPruned {
fn decode<D: bincode::de::Decoder>(
decoder: &mut D,
) -> Result<Self, bincode::error::DecodeError> {
let mut r = ReaderCompat(decoder.reader());
// We first decode the TransactionPrefix and get the n° of inputs/outputs
let prefix: TransactionPrefix = Decodable::consensus_decode(&mut r)
.map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?;
let (inputs, outputs) = (prefix.inputs.len(), prefix.outputs.len());
// Handle the prefix accordingly to its version
match *prefix.version {
// First transaction format, Pre-RingCT, so the ring are None
1 => Ok(TransactionPruned {
prefix,
rct_signatures: RctSig { sig: None, p: None },
}),
_ => {
let mut rct_signatures = RctSig { sig: None, p: None };
// No inputs so no RingCT
if inputs == 0 {
return Ok(TransactionPruned {
prefix,
rct_signatures,
});
}
// Otherwise get the RingCT ring for the tx inputs
if let Some(sig) = RctSigBase::consensus_decode(&mut r, inputs, outputs)
.map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?
{
rct_signatures = RctSig {
sig: Some(sig),
p: None,
};
}
// And we return it
Ok(TransactionPruned {
prefix,
rct_signatures,
})
}
}
}
}
impl bincode::Encode for TransactionPruned {
fn encode<E: bincode::enc::Encoder>(
&self,
encoder: &mut E,
) -> Result<(), bincode::error::EncodeError> {
let writer = encoder.writer();
// Encoding the Transaction prefix first
let buf = monero::consensus::serialize(&self.prefix);
writer.write(&buf)?;
match *self.prefix.version {
1 => {} // First transaction format, Pre-RingCT, so the there is no Rct ring to add
_ => {
if let Some(sig) = &self.rct_signatures.sig {
// If there is ring then we append it at the end
let buf = monero::consensus::serialize(sig);
writer.write(&buf)?;
}
}
}
Ok(())
}
}
impl TransactionPruned {
/// Turns a pruned transaction to a normal transaction with the missing pruned data
pub fn into_transaction(self, prunable: &[u8]) -> Result<Transaction, encode::Error> {
let mut r = std::io::Cursor::new(prunable);
match *self.prefix.version {
// Pre-RingCT transactions
1 => {
let signatures: Result<Vec<Vec<Signature>>, encode::Error> = self
.prefix
.inputs
.iter()
.filter_map(|input| match input {
TxIn::ToKey { key_offsets, .. } => {
let sigs: Result<Vec<Signature>, encode::Error> = key_offsets
.iter()
.map(|_| Decodable::consensus_decode(&mut r))
.collect();
Some(sigs)
}
_ => None,
})
.collect();
Ok(Transaction {
prefix: self.prefix,
signatures: signatures?,
rct_signatures: RctSig { sig: None, p: None },
})
}
// Post-RingCT Transactions
_ => {
let signatures = Vec::new();
let mut rct_signatures = RctSig { sig: None, p: None };
if self.prefix.inputs.is_empty() {
return Ok(Transaction {
prefix: self.prefix,
signatures,
rct_signatures: RctSig { sig: None, p: None },
});
}
if let Some(sig) = self.rct_signatures.sig {
let p = {
if sig.rct_type != RctType::Null {
let mixin_size = if !self.prefix.inputs.is_empty() {
match &self.prefix.inputs[0] {
TxIn::ToKey { key_offsets, .. } => key_offsets.len() - 1,
_ => 0,
}
} else {
0
};
RctSigPrunable::consensus_decode(
&mut r,
sig.rct_type,
self.prefix.inputs.len(),
self.prefix.outputs.len(),
mixin_size,
)?
} else {
None
}
};
rct_signatures = RctSig { sig: Some(sig), p };
}
Ok(Transaction {
prefix: self.prefix,
signatures,
rct_signatures,
})
}
}
}
}
pub fn get_transaction_prunable_blob<W: std::io::Write + ?Sized>(
tx: &monero::Transaction,
w: &mut W,
) -> Result<usize, std::io::Error> {
let mut len = 0;
match tx.prefix.version.0 {
1 => {
for sig in tx.signatures.iter() {
for c in sig {
len += monero::consensus::encode::Encodable::consensus_encode(c, w)?;
}
}
}
_ => {
if let Some(sig) = &tx.rct_signatures.sig {
if let Some(p) = &tx.rct_signatures.p {
len += p.consensus_encode(w, sig.rct_type)?;
}
}
}
}
Ok(len)
}
pub fn calculate_prunable_hash(tx: &monero::Transaction, tx_prunable_blob: &[u8]) -> Option<Hash> {
// V1 transaction don't have prunable hash
if tx.prefix.version.0 == 1 {
return None;
}
// Checking if it's a miner tx
if let TxIn::Gen { height: _ } = &tx.prefix.inputs[0] {
if tx.prefix.inputs.len() == 1 {
// Returning miner tx's empty hash
return Some(Hash::from_slice(&[
0x70, 0xa4, 0x85, 0x5d, 0x04, 0xd8, 0xfa, 0x7b, 0x3b, 0x27, 0x82, 0xca, 0x53, 0xb6,
0x00, 0xe5, 0xc0, 0x03, 0xc7, 0xdc, 0xb2, 0x7d, 0x7e, 0x92, 0x3c, 0x23, 0xf7, 0x86,
0x01, 0x46, 0xd2, 0xc5,
]));
}
};
// Calculating the hash
Some(Hash::new(tx_prunable_blob))
}
#[derive(Clone, Debug, Encode, Decode)]
/// [`TxIndex`] is a struct used in the [`crate::table::txsidentifier`]. It store the `unlock_time` of a transaction, the `height` of the block
/// whose transaction belong to and the Transaction ID (`tx_id`)
pub struct TxIndex {
/// Transaction ID
pub tx_id: u64,
/// The unlock time of this transaction (the height at which it is unlocked, it is not a timestamp)
pub unlock_time: u64,
/// The height of the block whose transaction belong to
pub height: u64, // TODO USELESS already in txs_prunable_tip
}
#[derive(Clone, Debug, Encode, Decode)]
/// [`TxOutputIdx`] is a single-tuple struct used to contain the indexes (amount and amount indices) of the transactions outputs. It is defined for more clarity on its role.
/// This struct is used in [`crate::table::txsoutputs`] table.
pub struct TxOutputIdx(pub Vec<u64>);
// ---- OUTPUTS ----
#[derive(Clone, Debug, Encode, Decode)]
/// [`RctOutkey`] is a struct containing RingCT metadata and an output ID. It is equivalent to the `output_data_t` struct in monerod
/// This struct is used in [`crate::table::outputamounts`]
pub struct RctOutkey {
// /// amount_index
//pub amount_index: u64,
/// The output's ID
pub output_id: u64,
/// The output's public key (for spend verification)
pub pubkey: Compat<PublicKey>,
/// The output's unlock time (the height at which it is unlocked, it is not a timestamp)
pub unlock_time: u64,
/// The height of the block which used this output
pub height: u64,
/// The output's amount commitment (for spend verification)
/// For compatibility with Pre-RingCT outputs, this field is an option. In fact, monerod distinguish between `pre_rct_output_data_t` and `output_data_t` field like that :
/// ```cpp
/// // This MUST be identical to output_data_t, without the extra rct data at the end
/// struct pre_rct_output_data_t
/// ```
pub commitment: Option<Compat<Key>>,
}
#[derive(Clone, Debug, Encode, Decode)]
/// [`OutputMetadata`] is a struct containing Outputs Metadata. It is used in [`crate::table::outputmetadata`]. It is a struct merging the
/// `out_tx_index` tuple with `output_data_t` structure in monerod, without the output ID.
pub struct OutputMetadata {
pub tx_hash: Compat<Hash>,
pub local_index: u64,
pub pubkey: Option<Compat<PublicKey>>,
pub unlock_time: u64,
pub height: u64,
pub commitment: Option<Compat<Key>>,
}
//#[derive(Clone, Debug, Encode, Decode)]
//// [`OutAmountIdx`] is a struct tuple used to contain the two keys used in [`crate::table::outputamounts`] table.
//// In monerod, the database key is the amount while the *cursor key* (the amount index) is the prefix of the actual data being returned.
//// As we prefer to note use cursor with partial data, we prefer to concat these two into a unique key
//pub struct OutAmountIdx(u64,u64);
// MAYBE NOT FINALLY
//#[derive(Clone, Debug, Encode, Decode)]
// /// [`OutTx`] is a struct containing the hash of the transaction whose output belongs to, and the local index of this output.
// /// This struct is used in [`crate::table::outputinherit`].
/*pub struct OutTx {
/// Output's transaction hash
pub tx_hash: Compat<Hash>,
/// Local index of the output
pub local_index: u64,
}*/
#[cfg(test)]
mod tests {
use monero::Hash;
use super::get_transaction_prunable_blob;
#[test]
fn calculate_tx_prunable_hash() {
let prunable_blob: Vec<u8> = vec![
1, 113, 10, 7, 87, 70, 119, 97, 244, 126, 155, 133, 254, 167, 60, 204, 134, 45, 71, 17,
87, 21, 252, 8, 218, 233, 219, 192, 84, 181, 196, 74, 213, 2, 246, 222, 66, 45, 152,
159, 156, 19, 224, 251, 110, 154, 188, 91, 129, 53, 251, 82, 134, 46, 93, 119, 136, 35,
13, 190, 235, 231, 44, 183, 134, 221, 12, 131, 222, 209, 246, 52, 14, 33, 94, 173, 251,
233, 18, 154, 91, 72, 229, 180, 43, 35, 152, 130, 38, 82, 56, 179, 36, 168, 54, 41, 62,
49, 208, 35, 245, 29, 27, 81, 72, 140, 104, 4, 59, 22, 120, 252, 67, 197, 130, 245, 93,
100, 129, 134, 19, 137, 228, 237, 166, 89, 5, 42, 1, 110, 139, 39, 81, 89, 159, 40,
239, 211, 251, 108, 82, 68, 125, 182, 75, 152, 129, 74, 73, 208, 215, 15, 63, 3, 106,
168, 35, 56, 126, 66, 2, 189, 53, 201, 77, 187, 102, 127, 154, 60, 209, 33, 217, 109,
81, 217, 183, 252, 114, 90, 245, 21, 229, 174, 254, 177, 147, 130, 74, 49, 118, 203,
14, 7, 118, 221, 81, 181, 78, 97, 224, 76, 160, 134, 73, 206, 204, 199, 201, 30, 201,
77, 4, 78, 237, 167, 76, 92, 104, 247, 247, 203, 141, 243, 72, 52, 83, 61, 35, 147,
231, 124, 21, 115, 81, 83, 67, 222, 61, 225, 171, 66, 243, 185, 195, 51, 72, 243, 80,
104, 4, 166, 54, 199, 235, 193, 175, 4, 242, 42, 146, 170, 90, 212, 101, 208, 113, 58,
65, 121, 55, 179, 206, 92, 50, 94, 171, 33, 67, 108, 220, 19, 193, 155, 30, 58, 46, 9,
227, 48, 246, 187, 82, 230, 61, 64, 95, 197, 183, 150, 62, 203, 252, 36, 157, 135, 160,
120, 189, 52, 94, 186, 93, 5, 36, 120, 160, 62, 254, 178, 101, 11, 228, 63, 128, 249,
182, 56, 100, 9, 5, 2, 81, 243, 229, 245, 43, 234, 35, 216, 212, 46, 165, 251, 183,
133, 10, 76, 172, 95, 106, 231, 13, 216, 222, 15, 92, 122, 103, 68, 238, 190, 108, 124,
138, 62, 255, 243, 22, 209, 2, 138, 45, 178, 101, 240, 18, 186, 71, 239, 137, 191, 134,
128, 221, 181, 173, 242, 111, 117, 45, 255, 138, 101, 79, 242, 42, 4, 144, 245, 193,
79, 14, 44, 201, 223, 0, 193, 123, 75, 155, 140, 248, 0, 226, 246, 230, 126, 7, 32,
107, 173, 193, 206, 184, 11, 33, 148, 104, 32, 79, 149, 71, 68, 150, 6, 47, 90, 231,
151, 14, 121, 196, 169, 249, 117, 154, 167, 139, 103, 62, 97, 250, 131, 160, 92, 239,
18, 236, 110, 184, 102, 30, 194, 175, 243, 145, 169, 183, 163, 141, 244, 186, 172, 251,
3, 78, 165, 33, 12, 2, 136, 180, 178, 83, 117, 0, 184, 170, 255, 69, 131, 123, 8, 212,
158, 162, 119, 137, 146, 63, 95, 133, 186, 91, 255, 152, 187, 107, 113, 147, 51, 219,
207, 5, 160, 169, 97, 9, 1, 202, 152, 186, 128, 160, 110, 120, 7, 176, 103, 87, 30,
137, 240, 67, 55, 79, 147, 223, 45, 177, 210, 101, 225, 22, 25, 129, 111, 101, 21, 213,
20, 254, 36, 57, 67, 70, 93, 192, 11, 180, 75, 99, 185, 77, 75, 74, 63, 182, 183, 208,
16, 69, 237, 96, 76, 96, 212, 242, 6, 169, 14, 250, 168, 129, 18, 141, 240, 101, 196,
96, 120, 88, 90, 51, 77, 12, 133, 212, 192, 107, 131, 238, 34, 237, 93, 157, 108, 13,
255, 187, 163, 106, 148, 108, 105, 244, 243, 174, 189, 180, 48, 102, 57, 170, 118, 211,
110, 126, 222, 165, 93, 36, 157, 90, 14, 135, 184, 197, 185, 7, 99, 199, 224, 225, 243,
212, 116, 149, 137, 186, 16, 196, 73, 23, 11, 248, 248, 67, 167, 149, 154, 64, 76, 218,
119, 135, 239, 34, 48, 66, 57, 109, 246, 3, 141, 169, 42, 157, 222, 21, 40, 183, 168,
97, 195, 106, 244, 229, 61, 122, 136, 59, 255, 120, 86, 30, 63, 226, 18, 65, 218, 188,
195, 217, 85, 12, 211, 221, 188, 27, 8, 98, 103, 211, 213, 217, 65, 82, 229, 145, 80,
147, 220, 57, 143, 20, 189, 253, 106, 13, 21, 170, 60, 24, 48, 162, 234, 0, 240, 226,
4, 28, 76, 93, 56, 3, 187, 223, 58, 31, 184, 58, 234, 198, 140, 223, 217, 1, 147, 94,
218, 199, 154, 121, 137, 44, 229, 0, 1, 10, 133, 250, 140, 64, 150, 89, 64, 112, 178,
221, 87, 19, 24, 104, 252, 28, 65, 207, 28, 195, 217, 73, 12, 16, 83, 55, 199, 84, 117,
175, 123, 13, 234, 10, 54, 63, 245, 161, 74, 235, 92, 189, 247, 47, 62, 176, 41, 159,
40, 250, 116, 63, 33, 193, 78, 72, 29, 215, 9, 191, 233, 243, 87, 14, 195, 7, 89, 101,
0, 28, 0, 234, 205, 59, 142, 119, 119, 52, 143, 80, 151, 211, 184, 235, 98, 222, 206,
170, 166, 4, 155, 3, 235, 26, 62, 8, 171, 19, 14, 53, 245, 77, 114, 175, 246, 170, 139,
227, 212, 141, 72, 223, 134, 63, 91, 26, 12, 78, 253, 198, 162, 152, 202, 207, 170,
254, 8, 4, 4, 175, 207, 84, 10, 108, 179, 157, 132, 110, 76, 201, 247, 227, 158, 106,
59, 41, 206, 229, 128, 2, 60, 203, 65, 71, 160, 232, 186, 227, 51, 12, 142, 85, 93, 89,
234, 236, 157, 230, 247, 167, 99, 7, 37, 146, 13, 53, 39, 255, 209, 177, 179, 17, 131,
59, 16, 75, 180, 21, 119, 88, 4, 12, 49, 140, 3, 110, 235, 231, 92, 13, 41, 137, 21,
37, 46, 138, 44, 250, 44, 161, 179, 114, 94, 63, 207, 192, 81, 234, 35, 125, 54, 2,
214, 10, 57, 116, 154, 150, 147, 223, 232, 36, 108, 152, 145, 157, 132, 190, 103, 233,
155, 141, 243, 249, 120, 72, 168, 14, 196, 35, 54, 107, 167, 218, 209, 1, 209, 197,
187, 242, 76, 86, 229, 114, 131, 196, 69, 171, 118, 28, 51, 192, 146, 14, 140, 84, 66,
155, 237, 194, 167, 121, 160, 166, 198, 166, 57, 13, 66, 162, 234, 148, 102, 133, 111,
18, 166, 77, 156, 75, 84, 220, 80, 35, 81, 141, 23, 197, 162, 23, 167, 187, 187, 187,
137, 184, 96, 140, 162, 6, 49, 63, 39, 84, 107, 85, 202, 168, 51, 194, 214, 132, 253,
253, 189, 231, 1, 226, 118, 104, 84, 147, 244, 58, 233, 250, 66, 26, 109, 223, 34, 2,
2, 112, 141, 147, 230, 134, 73, 45, 105, 180, 223, 52, 95, 40, 235, 209, 50, 67, 193,
22, 176, 176, 128, 140, 238, 252, 129, 220, 175, 79, 133, 12, 123, 209, 64, 5, 160, 39,
47, 66, 122, 245, 65, 102, 133, 58, 74, 138, 153, 217, 48, 59, 84, 135, 117, 92, 131,
44, 109, 40, 105, 69, 29, 14, 142, 71, 87, 112, 68, 134, 0, 14, 158, 14, 68, 15, 180,
150, 108, 49, 196, 94, 82, 27, 208, 163, 103, 81, 85, 124, 61, 242, 151, 29, 74, 87,
134, 166, 145, 186, 110, 207, 162, 99, 92, 133, 121, 137, 124, 90, 134, 5, 249, 231,
181, 222, 38, 170, 141, 113, 204, 172, 169, 173, 63, 81, 170, 76,
];
let prunable_hash = Hash::from_slice(&[
0x5c, 0x5e, 0x69, 0xd8, 0xfc, 0x0d, 0x22, 0x6a, 0x60, 0x91, 0x47, 0xda, 0x98, 0x36,
0x06, 0x00, 0xf4, 0xea, 0x49, 0xcc, 0x49, 0x45, 0x2c, 0x5e, 0xf8, 0xba, 0x20, 0xf5,
0x93, 0xd4, 0x80, 0x7d,
]);
assert_eq!(prunable_hash, Hash::new(prunable_blob));
}
#[test]
fn get_prunable_tx_blob() {
let mut pruned_p_blob: Vec<u8> = vec![
2, 0, 1, 2, 0, 16, 180, 149, 135, 30, 237, 231, 156, 1, 132, 145, 47, 182, 251, 153, 1,
225, 234, 94, 219, 134, 23, 222, 210, 30, 208, 213, 12, 136, 158, 5, 159, 148, 15, 206,
144, 2, 132, 63, 135, 22, 151, 8, 134, 8, 178, 26, 194, 111, 101, 192, 45, 104, 18,
115, 178, 194, 100, 255, 227, 10, 253, 165, 53, 62, 81, 67, 202, 169, 56, 99, 42, 146,
175, 137, 85, 195, 27, 151, 2, 0, 3, 207, 28, 183, 85, 7, 58, 81, 205, 53, 9, 191, 141,
209, 70, 58, 30, 38, 225, 212, 68, 14, 4, 216, 204, 101, 163, 66, 156, 101, 143, 255,
196, 134, 0, 3, 254, 66, 159, 187, 180, 41, 78, 252, 85, 255, 154, 55, 239, 222, 199,
37, 159, 210, 71, 186, 188, 46, 134, 181, 236, 221, 173, 43, 93, 50, 138, 249, 221, 44,
1, 34, 67, 111, 182, 199, 28, 219, 56, 238, 143, 188, 101, 103, 205, 139, 160, 144,
226, 34, 92, 235, 221, 75, 38, 7, 104, 255, 108, 208, 1, 184, 169, 2, 9, 1, 84, 62, 77,
107, 119, 22, 148, 222, 6, 128, 128, 211, 14, 242, 200, 16, 137, 239, 249, 55, 59, 16,
193, 192, 140, 240, 153, 129, 228, 115, 222, 247, 41, 128, 219, 241, 249, 198, 214, 75,
31, 82, 225, 1, 158, 183, 226, 220, 126, 228, 191, 211, 79, 43, 220, 95, 124, 109, 14,
162, 170, 68, 37, 62, 21, 139, 182, 246, 152, 36, 156, 172, 197, 20, 145, 85, 9, 8,
106, 237, 112, 63, 189, 172, 145, 49, 234, 68, 152, 200, 241, 0, 37,
];
let prunable_blob: Vec<u8> = vec![
1, 113, 10, 7, 87, 70, 119, 97, 244, 126, 155, 133, 254, 167, 60, 204, 134, 45, 71, 17,
87, 21, 252, 8, 218, 233, 219, 192, 84, 181, 196, 74, 213, 2, 246, 222, 66, 45, 152,
159, 156, 19, 224, 251, 110, 154, 188, 91, 129, 53, 251, 82, 134, 46, 93, 119, 136, 35,
13, 190, 235, 231, 44, 183, 134, 221, 12, 131, 222, 209, 246, 52, 14, 33, 94, 173, 251,
233, 18, 154, 91, 72, 229, 180, 43, 35, 152, 130, 38, 82, 56, 179, 36, 168, 54, 41, 62,
49, 208, 35, 245, 29, 27, 81, 72, 140, 104, 4, 59, 22, 120, 252, 67, 197, 130, 245, 93,
100, 129, 134, 19, 137, 228, 237, 166, 89, 5, 42, 1, 110, 139, 39, 81, 89, 159, 40,
239, 211, 251, 108, 82, 68, 125, 182, 75, 152, 129, 74, 73, 208, 215, 15, 63, 3, 106,
168, 35, 56, 126, 66, 2, 189, 53, 201, 77, 187, 102, 127, 154, 60, 209, 33, 217, 109,
81, 217, 183, 252, 114, 90, 245, 21, 229, 174, 254, 177, 147, 130, 74, 49, 118, 203,
14, 7, 118, 221, 81, 181, 78, 97, 224, 76, 160, 134, 73, 206, 204, 199, 201, 30, 201,
77, 4, 78, 237, 167, 76, 92, 104, 247, 247, 203, 141, 243, 72, 52, 83, 61, 35, 147,
231, 124, 21, 115, 81, 83, 67, 222, 61, 225, 171, 66, 243, 185, 195, 51, 72, 243, 80,
104, 4, 166, 54, 199, 235, 193, 175, 4, 242, 42, 146, 170, 90, 212, 101, 208, 113, 58,
65, 121, 55, 179, 206, 92, 50, 94, 171, 33, 67, 108, 220, 19, 193, 155, 30, 58, 46, 9,
227, 48, 246, 187, 82, 230, 61, 64, 95, 197, 183, 150, 62, 203, 252, 36, 157, 135, 160,
120, 189, 52, 94, 186, 93, 5, 36, 120, 160, 62, 254, 178, 101, 11, 228, 63, 128, 249,
182, 56, 100, 9, 5, 2, 81, 243, 229, 245, 43, 234, 35, 216, 212, 46, 165, 251, 183,
133, 10, 76, 172, 95, 106, 231, 13, 216, 222, 15, 92, 122, 103, 68, 238, 190, 108, 124,
138, 62, 255, 243, 22, 209, 2, 138, 45, 178, 101, 240, 18, 186, 71, 239, 137, 191, 134,
128, 221, 181, 173, 242, 111, 117, 45, 255, 138, 101, 79, 242, 42, 4, 144, 245, 193,
79, 14, 44, 201, 223, 0, 193, 123, 75, 155, 140, 248, 0, 226, 246, 230, 126, 7, 32,
107, 173, 193, 206, 184, 11, 33, 148, 104, 32, 79, 149, 71, 68, 150, 6, 47, 90, 231,
151, 14, 121, 196, 169, 249, 117, 154, 167, 139, 103, 62, 97, 250, 131, 160, 92, 239,
18, 236, 110, 184, 102, 30, 194, 175, 243, 145, 169, 183, 163, 141, 244, 186, 172, 251,
3, 78, 165, 33, 12, 2, 136, 180, 178, 83, 117, 0, 184, 170, 255, 69, 131, 123, 8, 212,
158, 162, 119, 137, 146, 63, 95, 133, 186, 91, 255, 152, 187, 107, 113, 147, 51, 219,
207, 5, 160, 169, 97, 9, 1, 202, 152, 186, 128, 160, 110, 120, 7, 176, 103, 87, 30,
137, 240, 67, 55, 79, 147, 223, 45, 177, 210, 101, 225, 22, 25, 129, 111, 101, 21, 213,
20, 254, 36, 57, 67, 70, 93, 192, 11, 180, 75, 99, 185, 77, 75, 74, 63, 182, 183, 208,
16, 69, 237, 96, 76, 96, 212, 242, 6, 169, 14, 250, 168, 129, 18, 141, 240, 101, 196,
96, 120, 88, 90, 51, 77, 12, 133, 212, 192, 107, 131, 238, 34, 237, 93, 157, 108, 13,
255, 187, 163, 106, 148, 108, 105, 244, 243, 174, 189, 180, 48, 102, 57, 170, 118, 211,
110, 126, 222, 165, 93, 36, 157, 90, 14, 135, 184, 197, 185, 7, 99, 199, 224, 225, 243,
212, 116, 149, 137, 186, 16, 196, 73, 23, 11, 248, 248, 67, 167, 149, 154, 64, 76, 218,
119, 135, 239, 34, 48, 66, 57, 109, 246, 3, 141, 169, 42, 157, 222, 21, 40, 183, 168,
97, 195, 106, 244, 229, 61, 122, 136, 59, 255, 120, 86, 30, 63, 226, 18, 65, 218, 188,
195, 217, 85, 12, 211, 221, 188, 27, 8, 98, 103, 211, 213, 217, 65, 82, 229, 145, 80,
147, 220, 57, 143, 20, 189, 253, 106, 13, 21, 170, 60, 24, 48, 162, 234, 0, 240, 226,
4, 28, 76, 93, 56, 3, 187, 223, 58, 31, 184, 58, 234, 198, 140, 223, 217, 1, 147, 94,
218, 199, 154, 121, 137, 44, 229, 0, 1, 10, 133, 250, 140, 64, 150, 89, 64, 112, 178,
221, 87, 19, 24, 104, 252, 28, 65, 207, 28, 195, 217, 73, 12, 16, 83, 55, 199, 84, 117,
175, 123, 13, 234, 10, 54, 63, 245, 161, 74, 235, 92, 189, 247, 47, 62, 176, 41, 159,
40, 250, 116, 63, 33, 193, 78, 72, 29, 215, 9, 191, 233, 243, 87, 14, 195, 7, 89, 101,
0, 28, 0, 234, 205, 59, 142, 119, 119, 52, 143, 80, 151, 211, 184, 235, 98, 222, 206,
170, 166, 4, 155, 3, 235, 26, 62, 8, 171, 19, 14, 53, 245, 77, 114, 175, 246, 170, 139,
227, 212, 141, 72, 223, 134, 63, 91, 26, 12, 78, 253, 198, 162, 152, 202, 207, 170,
254, 8, 4, 4, 175, 207, 84, 10, 108, 179, 157, 132, 110, 76, 201, 247, 227, 158, 106,
59, 41, 206, 229, 128, 2, 60, 203, 65, 71, 160, 232, 186, 227, 51, 12, 142, 85, 93, 89,
234, 236, 157, 230, 247, 167, 99, 7, 37, 146, 13, 53, 39, 255, 209, 177, 179, 17, 131,
59, 16, 75, 180, 21, 119, 88, 4, 12, 49, 140, 3, 110, 235, 231, 92, 13, 41, 137, 21,
37, 46, 138, 44, 250, 44, 161, 179, 114, 94, 63, 207, 192, 81, 234, 35, 125, 54, 2,
214, 10, 57, 116, 154, 150, 147, 223, 232, 36, 108, 152, 145, 157, 132, 190, 103, 233,
155, 141, 243, 249, 120, 72, 168, 14, 196, 35, 54, 107, 167, 218, 209, 1, 209, 197,
187, 242, 76, 86, 229, 114, 131, 196, 69, 171, 118, 28, 51, 192, 146, 14, 140, 84, 66,
155, 237, 194, 167, 121, 160, 166, 198, 166, 57, 13, 66, 162, 234, 148, 102, 133, 111,
18, 166, 77, 156, 75, 84, 220, 80, 35, 81, 141, 23, 197, 162, 23, 167, 187, 187, 187,
137, 184, 96, 140, 162, 6, 49, 63, 39, 84, 107, 85, 202, 168, 51, 194, 214, 132, 253,
253, 189, 231, 1, 226, 118, 104, 84, 147, 244, 58, 233, 250, 66, 26, 109, 223, 34, 2,
2, 112, 141, 147, 230, 134, 73, 45, 105, 180, 223, 52, 95, 40, 235, 209, 50, 67, 193,
22, 176, 176, 128, 140, 238, 252, 129, 220, 175, 79, 133, 12, 123, 209, 64, 5, 160, 39,
47, 66, 122, 245, 65, 102, 133, 58, 74, 138, 153, 217, 48, 59, 84, 135, 117, 92, 131,
44, 109, 40, 105, 69, 29, 14, 142, 71, 87, 112, 68, 134, 0, 14, 158, 14, 68, 15, 180,
150, 108, 49, 196, 94, 82, 27, 208, 163, 103, 81, 85, 124, 61, 242, 151, 29, 74, 87,
134, 166, 145, 186, 110, 207, 162, 99, 92, 133, 121, 137, 124, 90, 134, 5, 249, 231,
181, 222, 38, 170, 141, 113, 204, 172, 169, 173, 63, 81, 170, 76,
];
let mut tx_blob: Vec<u8> = Vec::new();
tx_blob.append(&mut pruned_p_blob);
tx_blob.append(&mut prunable_blob.clone());
let mut buf = Vec::new();
#[allow(clippy::expect_used)]
let tx: monero::Transaction =
monero::consensus::encode::deserialize(&tx_blob).expect("failed to serialize");
#[allow(clippy::expect_used)]
get_transaction_prunable_blob(&tx, &mut buf).expect("failed to get out prunable blob");
assert_eq!(prunable_blob, buf);
}
}

View file

@ -84,7 +84,7 @@ impl<Z: NetworkZone> AddressBook<Z> {
let connected_peers = HashMap::new();
let mut peer_save_interval = interval(cfg.peer_save_period);
peer_save_interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
peer_save_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
Self {
white_list,
@ -236,7 +236,9 @@ impl<Z: NetworkZone> AddressBook<Z> {
) {
tracing::debug!("Received new peer list, length: {}", peer_list.len());
peer_list.retain(|peer| {
peer_list.retain_mut(|peer| {
peer.adr.make_canonical();
if !peer.adr.should_add_to_peer_list() {
false
} else {
@ -259,7 +261,7 @@ impl<Z: NetworkZone> AddressBook<Z> {
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
tracing::debug!("Retrieving random white peer");
self.white_list
.take_random_peer(&mut rand::thread_rng(), block_needed)
.take_random_peer(&mut rand::thread_rng(), block_needed, &self.anchor_list)
}
fn take_random_gray_peer(
@ -268,7 +270,7 @@ impl<Z: NetworkZone> AddressBook<Z> {
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
tracing::debug!("Retrieving random gray peer");
self.gray_list
.take_random_peer(&mut rand::thread_rng(), block_needed)
.take_random_peer(&mut rand::thread_rng(), block_needed, &HashSet::new())
}
fn get_white_peers(&self, len: usize) -> Vec<ZoneSpecificPeerListEntryBase<Z::Addr>> {

View file

@ -1,8 +1,7 @@
use std::{path::PathBuf, sync::Arc, time::Duration};
use futures::StreamExt;
use tokio::sync::Semaphore;
use tokio::time::interval;
use tokio::{sync::Semaphore, time::interval};
use monero_p2p::handles::HandleBuilder;
use monero_pruning::PruningSeed;

View file

@ -82,5 +82,5 @@ pub async fn init_address_book<Z: NetworkZone>(
let address_book = book::AddressBook::<Z>::new(cfg, white_list, gray_list, Vec::new());
Ok(Buffer::new(address_book, 15))
Ok(Buffer::new(address_book, 150))
}

View file

@ -89,28 +89,42 @@ impl<Z: NetworkZone> PeerList<Z> {
&mut self,
r: &mut R,
block_needed: Option<u64>,
must_keep_peers: &HashSet<Z::Addr>,
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
if let Some(needed_height) = block_needed {
let (_, addresses_with_block) = self.pruning_seeds.iter().find(|(seed, _)| {
// TODO: factor in peer blockchain height?
seed.get_next_unpruned_block(needed_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
.expect("Block needed is higher than max block allowed.")
== needed_height
})?;
let n = r.gen_range(0..addresses_with_block.len());
let peer = addresses_with_block[n];
self.remove_peer(&peer)
} else {
let len = self.len();
if len == 0 {
None
} else {
let n = r.gen_range(0..len);
// Take a random peer and see if it's in the list of must_keep_peers, if it is try again.
// TODO: improve this
let (&key, _) = self.peers.get_index(n).unwrap();
self.remove_peer(&key)
for _ in 0..3 {
if let Some(needed_height) = block_needed {
let (_, addresses_with_block) = self.pruning_seeds.iter().find(|(seed, _)| {
// TODO: factor in peer blockchain height?
seed.get_next_unpruned_block(needed_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
.expect("Block needed is higher than max block allowed.")
== needed_height
})?;
let n = r.gen_range(0..addresses_with_block.len());
let peer = addresses_with_block[n];
if must_keep_peers.contains(&peer) {
continue;
}
return self.remove_peer(&peer);
}
let len = self.len();
if len == 0 {
return None;
}
let n = r.gen_range(0..len);
let (&key, _) = self.peers.get_index(n).unwrap();
if !must_keep_peers.contains(&key) {
return self.remove_peer(&key);
}
}
None
}
pub fn get_random_peers<R: Rng>(

View file

@ -87,7 +87,7 @@ fn peer_list_remove_specific_peer() {
let mut peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
let peer = peer_list
.take_random_peer(&mut rand::thread_rng(), None)
.take_random_peer(&mut rand::thread_rng(), None, &HashSet::new())
.unwrap();
let pruning_idxs = peer_list.pruning_seeds;
@ -160,7 +160,7 @@ fn peer_list_get_peer_with_block() {
peer_list.add_new_peer(make_fake_peer(101, Some(384)));
let peer = peer_list
.take_random_peer(&mut r, Some(1))
.take_random_peer(&mut r, Some(1), &HashSet::new())
.expect("We just added a peer with the correct seed");
assert!(peer
@ -173,7 +173,7 @@ fn peer_list_get_peer_with_block() {
fn peer_list_ban_peers() {
let mut peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
let peer = peer_list
.take_random_peer(&mut rand::thread_rng(), None)
.take_random_peer(&mut rand::thread_rng(), None, &HashSet::new())
.unwrap();
let ban_id = peer.adr.ban_id();

View file

@ -0,0 +1,36 @@
[package]
name = "cuprate-p2p"
version = "0.1.0"
edition = "2021"
license = "MIT"
authors = ["Boog900"]
[dependencies]
fixed-bytes = { path = "../../net/fixed-bytes" }
monero-wire = { path = "../../net/monero-wire" }
monero-p2p = { path = "../monero-p2p", features = ["borsh"] }
monero-address-book = { path = "../address-book" }
monero-pruning = { path = "../../pruning" }
cuprate-helper = { path = "../../helper", features = ["asynch"] }
monero-serai = { workspace = true, features = ["std"] }
tower = { workspace = true }
tokio = { workspace = true, features = ["rt"] }
rayon = { workspace = true }
tokio-util = { workspace = true }
tokio-stream = { workspace = true, features = ["sync", "time"] }
futures = { workspace = true, features = ["std"] }
pin-project = { workspace = true }
dashmap = { workspace = true }
thiserror = { workspace = true }
bytes = { workspace = true, features = ["std"] }
indexmap = { workspace = true, features = ["std"] }
rand = { workspace = true, features = ["std", "std_rng"] }
rand_distr = { workspace = true, features = ["std"] }
hex = { workspace = true, features = ["std"] }
tracing = { workspace = true, features = ["std", "attributes"] }
[dev-dependencies]
cuprate-test-utils = { path = "../../test-utils" }

View file

@ -0,0 +1,540 @@
//! # Broadcast Router
//!
//! This module handles broadcasting messages to multiple peers with the [`BroadcastSvc`].
use std::{
future::{ready, Future, Ready},
pin::{pin, Pin},
task::{ready, Context, Poll},
time::Duration,
};
use bytes::Bytes;
use futures::Stream;
use rand::prelude::*;
use rand_distr::Exp;
use tokio::{
sync::{
broadcast::{self, error::TryRecvError},
watch,
},
time::{sleep_until, Instant, Sleep},
};
use tokio_stream::wrappers::WatchStream;
use tower::Service;
use monero_p2p::{client::InternalPeerID, BroadcastMessage, ConnectionDirection, NetworkZone};
use monero_wire::{
common::{BlockCompleteEntry, TransactionBlobs},
protocol::{NewFluffyBlock, NewTransactions},
};
use crate::constants::{
DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND, DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND,
MAX_TXS_IN_BROADCAST_CHANNEL, SOFT_TX_MESSAGE_SIZE_SIZE_LIMIT,
};
/// The configuration for the [`BroadcastSvc`].
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct BroadcastConfig {
/// The average number of seconds between diffusion flushes for outbound connections.
pub diffusion_flush_average_seconds_outbound: Duration,
/// The average number of seconds between diffusion flushes for inbound connections.
pub diffusion_flush_average_seconds_inbound: Duration,
}
impl Default for BroadcastConfig {
fn default() -> Self {
Self {
diffusion_flush_average_seconds_inbound: DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND,
diffusion_flush_average_seconds_outbound: DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND,
}
}
}
/// Initialise the [`BroadcastSvc`] and the functions to produce [`BroadcastMessageStream`]s.
///
/// This function will return in order:
/// - The [`BroadcastSvc`]
/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers.
/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers.
pub fn init_broadcast_channels<N: NetworkZone>(
config: BroadcastConfig,
) -> (
BroadcastSvc<N>,
impl Fn(InternalPeerID<N::Addr>) -> BroadcastMessageStream<N> + Clone + Send + 'static,
impl Fn(InternalPeerID<N::Addr>) -> BroadcastMessageStream<N> + Clone + Send + 'static,
) {
let outbound_dist = Exp::new(
1.0 / config
.diffusion_flush_average_seconds_outbound
.as_secs_f64(),
)
.unwrap();
let inbound_dist =
Exp::new(1.0 / config.diffusion_flush_average_seconds_inbound.as_secs_f64()).unwrap();
// Set a default value for init - the broadcast streams given to the peer tasks will only broadcast from this channel when the value
// changes so no peer will get sent this.
let (block_watch_sender, block_watch_receiver) = watch::channel(NewBlockInfo {
block_bytes: Default::default(),
current_blockchain_height: 0,
});
// create the inbound/outbound broadcast channels.
let (tx_broadcast_channel_outbound_sender, tx_broadcast_channel_outbound_receiver) =
broadcast::channel(MAX_TXS_IN_BROADCAST_CHANNEL);
let (tx_broadcast_channel_inbound_sender, tx_broadcast_channel_inbound_receiver) =
broadcast::channel(MAX_TXS_IN_BROADCAST_CHANNEL);
// create the broadcast service.
let broadcast_svc = BroadcastSvc {
new_block_watch: block_watch_sender,
tx_broadcast_channel_outbound: tx_broadcast_channel_outbound_sender,
tx_broadcast_channel_inbound: tx_broadcast_channel_inbound_sender,
};
// wrap the tx broadcast channels in a wrapper that impls Clone so the closures later on impl clone.
let tx_channel_outbound_receiver_wrapped =
CloneableBroadcastReceiver(tx_broadcast_channel_outbound_receiver);
let tx_channel_inbound_receiver_wrapped =
CloneableBroadcastReceiver(tx_broadcast_channel_inbound_receiver);
// Create the closures that will be used to start the broadcast streams that the connection task will hold to listen
// for messages to broadcast.
let block_watch_receiver_cloned = block_watch_receiver.clone();
let outbound_stream_maker = move |addr| {
BroadcastMessageStream::new(
addr,
outbound_dist,
block_watch_receiver_cloned.clone(),
tx_channel_outbound_receiver_wrapped.clone().0,
)
};
let inbound_stream_maker = move |addr| {
BroadcastMessageStream::new(
addr,
inbound_dist,
block_watch_receiver.clone(),
tx_channel_inbound_receiver_wrapped.clone().0,
)
};
(broadcast_svc, outbound_stream_maker, inbound_stream_maker)
}
/// A request to broadcast some data to all connected peers or a sub-set like all inbound or all outbound.
///
/// Only certain P2P messages are supported here: [`NewFluffyBlock`] and [`NewTransactions`]. These are the only
/// P2P messages that make sense to broadcast to multiple peers.
///
/// [`NewBlock`](monero_wire::protocol::NewBlock) has been excluded as monerod has had fluffy blocks for a while and
/// Cuprate sets fluffy blocks as a requirement during handshakes.
pub enum BroadcastRequest<N: NetworkZone> {
/// Broadcast a block to the network. The block will be broadcast as a fluffy block to all peers.
Block {
/// The block.
block_bytes: Bytes,
/// The current chain height - will be 1 more than the blocks' height.
current_blockchain_height: u64,
},
/// Broadcast transactions to the network. If a [`ConnectionDirection`] is set the transaction
/// will only be broadcast to that sub-set of peers, if it is [`None`] then the transaction will
/// be broadcast to all peers.
Transaction {
/// The serialised tx to broadcast.
tx_bytes: Bytes,
/// The direction of peers to broadcast this tx to, if [`None`] it will be sent to all peers.
direction: Option<ConnectionDirection>,
/// The peer on this network that told us about the tx.
received_from: Option<InternalPeerID<N::Addr>>,
},
}
pub struct BroadcastSvc<N: NetworkZone> {
new_block_watch: watch::Sender<NewBlockInfo>,
tx_broadcast_channel_outbound: broadcast::Sender<BroadcastTxInfo<N>>,
tx_broadcast_channel_inbound: broadcast::Sender<BroadcastTxInfo<N>>,
}
impl<N: NetworkZone> Service<BroadcastRequest<N>> for BroadcastSvc<N> {
type Response = ();
type Error = std::convert::Infallible;
type Future = Ready<Result<(), std::convert::Infallible>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: BroadcastRequest<N>) -> Self::Future {
match req {
BroadcastRequest::Block {
block_bytes,
current_blockchain_height,
} => {
tracing::debug!(
"queuing block at chain height {current_blockchain_height} for broadcast"
);
self.new_block_watch.send_replace(NewBlockInfo {
block_bytes,
current_blockchain_height,
});
}
BroadcastRequest::Transaction {
tx_bytes,
received_from,
direction,
} => {
let nex_tx_info = BroadcastTxInfo {
tx: tx_bytes,
received_from,
};
// An error here means _all_ receivers were dropped which we assume will never happen.
let _ = match direction {
Some(ConnectionDirection::InBound) => {
self.tx_broadcast_channel_inbound.send(nex_tx_info)
}
Some(ConnectionDirection::OutBound) => {
self.tx_broadcast_channel_outbound.send(nex_tx_info)
}
None => {
let _ = self.tx_broadcast_channel_outbound.send(nex_tx_info.clone());
self.tx_broadcast_channel_inbound.send(nex_tx_info)
}
};
}
}
ready(Ok(()))
}
}
/// A wrapper type that impls [`Clone`] for [`broadcast::Receiver`].
///
/// The clone impl just calls [`Receiver::resubscribe`](broadcast::Receiver::resubscribe), which isn't _exactly_
/// a clone but is what we need for our use case.
struct CloneableBroadcastReceiver<T: Clone>(broadcast::Receiver<T>);
impl<T: Clone> Clone for CloneableBroadcastReceiver<T> {
fn clone(&self) -> Self {
Self(self.0.resubscribe())
}
}
/// A new block to broadcast.
#[derive(Clone)]
struct NewBlockInfo {
/// The block.
block_bytes: Bytes,
/// The current chain height - will be 1 more than the blocks' height.
current_blockchain_height: u64,
}
/// A new transaction to broadcast.
#[derive(Clone)]
struct BroadcastTxInfo<N: NetworkZone> {
/// The tx.
tx: Bytes,
/// The peer that sent us this tx (if the peer is on this network).
received_from: Option<InternalPeerID<N::Addr>>,
}
/// A [`Stream`] that returns [`BroadcastMessage`] to broadcast to a peer.
///
/// This is given to the connection task to await on for broadcast messages.
#[pin_project::pin_project]
pub struct BroadcastMessageStream<N: NetworkZone> {
/// The peer that is holding this stream.
addr: InternalPeerID<N::Addr>,
/// The channel where new blocks are received.
#[pin]
new_block_watch: WatchStream<NewBlockInfo>,
/// The channel where txs to broadcast are received.
tx_broadcast_channel: broadcast::Receiver<BroadcastTxInfo<N>>,
/// The distribution to generate the wait time before the next transaction
/// diffusion flush.
diffusion_flush_dist: Exp<f64>,
/// A [`Sleep`] that will awake when it's time to broadcast txs.
#[pin]
next_flush: Sleep,
}
impl<N: NetworkZone> BroadcastMessageStream<N> {
/// Creates a new [`BroadcastMessageStream`]
fn new(
addr: InternalPeerID<N::Addr>,
diffusion_flush_dist: Exp<f64>,
new_block_watch: watch::Receiver<NewBlockInfo>,
tx_broadcast_channel: broadcast::Receiver<BroadcastTxInfo<N>>,
) -> Self {
let next_flush = Instant::now()
+ Duration::from_secs_f64(diffusion_flush_dist.sample(&mut thread_rng()));
Self {
addr,
// We don't want to broadcast the message currently in the queue.
new_block_watch: WatchStream::from_changes(new_block_watch),
tx_broadcast_channel,
diffusion_flush_dist,
next_flush: sleep_until(next_flush),
}
}
}
impl<N: NetworkZone> Stream for BroadcastMessageStream<N> {
type Item = BroadcastMessage;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
// Prioritise blocks.
if let Poll::Ready(res) = this.new_block_watch.poll_next(cx) {
let Some(block) = res else {
return Poll::Ready(None);
};
let block_mes = NewFluffyBlock {
b: BlockCompleteEntry {
pruned: false,
block: block.block_bytes,
// This is a full fluffy block these values do not need to be set.
block_weight: 0,
txs: TransactionBlobs::None,
},
current_blockchain_height: block.current_blockchain_height,
};
return Poll::Ready(Some(BroadcastMessage::NewFluffyBlock(block_mes)));
}
ready!(this.next_flush.as_mut().poll(cx));
let (txs, more_available) = get_txs_to_broadcast::<N>(this.addr, this.tx_broadcast_channel);
let next_flush = if more_available {
// If there are more txs to broadcast then set the next flush for now so we get woken up straight away.
Instant::now()
} else {
Instant::now()
+ Duration::from_secs_f64(this.diffusion_flush_dist.sample(&mut thread_rng()))
};
let next_flush = sleep_until(next_flush);
this.next_flush.set(next_flush);
if let Some(txs) = txs {
tracing::debug!(
"Diffusion flush timer expired, diffusing {} txs",
txs.txs.len()
);
// no need to poll next_flush as we are ready now.
Poll::Ready(Some(BroadcastMessage::NewTransaction(txs)))
} else {
tracing::trace!("Diffusion flush timer expired but no txs to diffuse");
// poll next_flush now to register the waker with it
// the waker will already be registered with the block broadcast channel.
let _ = this.next_flush.poll(cx);
Poll::Pending
}
}
}
/// Returns a list of new transactions to broadcast and a [`bool`] for if there are more txs in the queue
/// that won't fit in the current batch.
fn get_txs_to_broadcast<N: NetworkZone>(
addr: &InternalPeerID<N::Addr>,
broadcast_rx: &mut broadcast::Receiver<BroadcastTxInfo<N>>,
) -> (Option<NewTransactions>, bool) {
let mut new_txs = NewTransactions {
txs: vec![],
dandelionpp_fluff: true,
padding: Bytes::new(),
};
let mut total_size = 0;
loop {
match broadcast_rx.try_recv() {
Ok(txs) => {
if txs.received_from.is_some_and(|from| &from == addr) {
// If we are the one that sent this tx don't broadcast it back to us.
continue;
}
total_size += txs.tx.len();
new_txs.txs.push(txs.tx);
if total_size > SOFT_TX_MESSAGE_SIZE_SIZE_LIMIT {
return (Some(new_txs), true);
}
}
Err(e) => match e {
TryRecvError::Empty | TryRecvError::Closed => {
if new_txs.txs.is_empty() {
return (None, false);
}
return (Some(new_txs), false);
}
TryRecvError::Lagged(lag) => {
tracing::debug!(
"{lag} transaction broadcast messages were missed, continuing."
);
continue;
}
},
}
}
}
#[cfg(test)]
mod tests {
use std::{pin::pin, time::Duration};
use bytes::Bytes;
use futures::StreamExt;
use tokio::time::timeout;
use tower::{Service, ServiceExt};
use cuprate_test_utils::test_netzone::TestNetZone;
use monero_p2p::{client::InternalPeerID, BroadcastMessage, ConnectionDirection};
use super::{init_broadcast_channels, BroadcastConfig, BroadcastRequest};
const TEST_CONFIG: BroadcastConfig = BroadcastConfig {
diffusion_flush_average_seconds_outbound: Duration::from_millis(100),
diffusion_flush_average_seconds_inbound: Duration::from_millis(200),
};
#[tokio::test]
async fn tx_broadcast_direction_correct() {
let (mut brcst, outbound_mkr, inbound_mkr) =
init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
let mut inbound_stream = pin!(inbound_mkr(InternalPeerID::Unknown(1)));
// Outbound should get 1 and 3, inbound should get 2 and 3.
brcst
.ready()
.await
.unwrap()
.call(BroadcastRequest::Transaction {
tx_bytes: Bytes::from_static(&[1]),
direction: Some(ConnectionDirection::OutBound),
received_from: None,
})
.await
.unwrap();
brcst
.ready()
.await
.unwrap()
.call(BroadcastRequest::Transaction {
tx_bytes: Bytes::from_static(&[2]),
direction: Some(ConnectionDirection::InBound),
received_from: None,
})
.await
.unwrap();
brcst
.ready()
.await
.unwrap()
.call(BroadcastRequest::Transaction {
tx_bytes: Bytes::from_static(&[3]),
direction: None,
received_from: None,
})
.await
.unwrap();
let match_tx = |mes, txs| match mes {
BroadcastMessage::NewTransaction(tx) => assert_eq!(tx.txs.as_slice(), txs),
_ => panic!("Block broadcast?"),
};
let next = outbound_stream.next().await.unwrap();
let txs = [Bytes::from_static(&[1]), Bytes::from_static(&[3])];
match_tx(next, &txs);
let next = inbound_stream.next().await.unwrap();
match_tx(next, &[Bytes::from_static(&[2]), Bytes::from_static(&[3])]);
}
#[tokio::test]
async fn block_broadcast_sent_to_all() {
let (mut brcst, outbound_mkr, inbound_mkr) =
init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
let mut inbound_stream = pin!(inbound_mkr(InternalPeerID::Unknown(1)));
brcst
.ready()
.await
.unwrap()
.call(BroadcastRequest::Block {
block_bytes: Default::default(),
current_blockchain_height: 0,
})
.await
.unwrap();
let next = outbound_stream.next().await.unwrap();
assert!(matches!(next, BroadcastMessage::NewFluffyBlock(_)));
let next = inbound_stream.next().await.unwrap();
assert!(matches!(next, BroadcastMessage::NewFluffyBlock(_)));
}
#[tokio::test]
async fn tx_broadcast_skipped_for_received_from_peer() {
let (mut brcst, outbound_mkr, inbound_mkr) =
init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
let mut outbound_stream_from = pin!(outbound_mkr(InternalPeerID::Unknown(0)));
let mut inbound_stream = pin!(inbound_mkr(InternalPeerID::Unknown(1)));
let mut inbound_stream_from = pin!(inbound_mkr(InternalPeerID::Unknown(0)));
brcst
.ready()
.await
.unwrap()
.call(BroadcastRequest::Transaction {
tx_bytes: Bytes::from_static(&[1]),
direction: None,
received_from: Some(InternalPeerID::Unknown(0)),
})
.await
.unwrap();
let match_tx = |mes, txs| match mes {
BroadcastMessage::NewTransaction(tx) => assert_eq!(tx.txs.as_slice(), txs),
_ => panic!("Block broadcast?"),
};
let next = outbound_stream.next().await.unwrap();
let txs = [Bytes::from_static(&[1])];
match_tx(next, &txs);
let next = inbound_stream.next().await.unwrap();
match_tx(next, &[Bytes::from_static(&[1])]);
// Make sure the streams with the same id as the one we said sent the tx do not get the tx to broadcast.
assert!(timeout(
Duration::from_secs(2),
futures::future::select(inbound_stream_from.next(), outbound_stream_from.next())
)
.await
.is_err())
}
}

View file

@ -0,0 +1,148 @@
//! # Client Pool.
//!
//! The [`ClientPool`], is a pool of currently connected peers that can be pulled from.
//! It does _not_ necessarily contain every connected peer as another place could have
//! taken a peer from the pool.
//!
//! When taking peers from the pool they are wrapped in [`ClientPoolDropGuard`], which
//! returns the peer to the pool when it is dropped.
//!
//! Internally the pool is a [`DashMap`] which means care should be taken in `async` code
//! as internally this uses blocking RwLocks.
//!
use std::sync::Arc;
use dashmap::{DashMap, DashSet};
use tokio::sync::mpsc;
use monero_p2p::{
client::{Client, InternalPeerID},
handles::ConnectionHandle,
ConnectionDirection, NetworkZone,
};
pub(crate) mod disconnect_monitor;
mod drop_guard_client;
pub use drop_guard_client::ClientPoolDropGuard;
/// The client pool, which holds currently connected free peers.
///
/// See the [module docs](self) for more.
pub struct ClientPool<N: NetworkZone> {
/// The connected [`Client`]s.
clients: DashMap<InternalPeerID<N::Addr>, Client<N>>,
/// A set of outbound clients, as these allow accesses/mutation from different threads,
/// a peer ID in here does not mean the peer is necessarily in `clients` as it could have been removed
/// by another thread. However, if the peer is in both here and `clients` it is definitely
/// an outbound peer.
outbound_clients: DashSet<InternalPeerID<N::Addr>>,
/// A channel to send new peer ids down to monitor for disconnect.
new_connection_tx: mpsc::UnboundedSender<(ConnectionHandle, InternalPeerID<N::Addr>)>,
}
impl<N: NetworkZone> ClientPool<N> {
/// Returns a new [`ClientPool`] wrapped in an [`Arc`].
pub fn new() -> Arc<ClientPool<N>> {
let (tx, rx) = mpsc::unbounded_channel();
let pool = Arc::new(ClientPool {
clients: DashMap::new(),
outbound_clients: DashSet::new(),
new_connection_tx: tx,
});
tokio::spawn(disconnect_monitor::disconnect_monitor(rx, pool.clone()));
pool
}
/// Adds a [`Client`] to the pool, the client must have previously been taken from the
/// pool.
///
/// See [`ClientPool::add_new_client`] to add a [`Client`] which was not taken from the pool before.
///
/// # Panics
/// This function panics if `client` already exists in the pool.
fn add_client(&self, client: Client<N>) {
let handle = client.info.handle.clone();
let id = client.info.id;
// Fast path: if the client is disconnected don't add it to the peer set.
if handle.is_closed() {
return;
}
if client.info.direction == ConnectionDirection::OutBound {
self.outbound_clients.insert(id);
}
let res = self.clients.insert(id, client);
assert!(res.is_none());
// We have to check this again otherwise we could have a race condition where a
// peer is disconnected after the first check, the disconnect monitor tries to remove it,
// and then it is added to the pool.
if handle.is_closed() {
self.remove_client(&id);
}
}
/// Adds a _new_ [`Client`] to the pool, this client should be a new connection, and not already
/// from the pool.
///
/// # Panics
/// This function panics if `client` already exists in the pool.
pub fn add_new_client(&self, client: Client<N>) {
self.new_connection_tx
.send((client.info.handle.clone(), client.info.id))
.unwrap();
self.add_client(client);
}
/// Remove a [`Client`] from the pool.
///
/// [`None`] is returned if the client did not exist in the pool.
fn remove_client(&self, peer: &InternalPeerID<N::Addr>) -> Option<Client<N>> {
self.outbound_clients.remove(peer);
self.clients.remove(peer).map(|(_, client)| client)
}
/// Borrows a [`Client`] from the pool.
///
/// The [`Client`] is wrapped in [`ClientPoolDropGuard`] which
/// will return the client to the pool when it's dropped.
///
/// See [`Self::borrow_clients`] for borrowing multiple clients.
pub fn borrow_client(
self: &Arc<Self>,
peer: &InternalPeerID<N::Addr>,
) -> Option<ClientPoolDropGuard<N>> {
self.remove_client(peer).map(|client| ClientPoolDropGuard {
pool: Arc::clone(self),
client: Some(client),
})
}
/// Borrows multiple [`Client`]s from the pool.
///
/// Note that the returned iterator is not guaranteed to contain every peer asked for.
///
/// See [`Self::borrow_client`] for borrowing a single client.
#[allow(private_interfaces)] // TODO: Remove me when 2024 Rust
pub fn borrow_clients<'a, 'b>(
self: &'a Arc<Self>,
peers: &'b [InternalPeerID<N::Addr>],
) -> impl Iterator<Item = ClientPoolDropGuard<N>> + Captures<(&'a (), &'b ())> {
peers.iter().filter_map(|peer| self.borrow_client(peer))
}
}
/// TODO: Remove me when 2024 Rust
///
/// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick
trait Captures<U> {}
impl<T: ?Sized, U> Captures<U> for T {}

View file

@ -0,0 +1,72 @@
//! # Disconnect Monitor
//!
//! This module contains the [`disconnect_monitor`] task, which monitors connected peers for disconnection
//! and then removes them from the [`ClientPool`] if they do.
use std::{
future::Future,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use futures::{stream::FuturesUnordered, StreamExt};
use tokio::sync::mpsc;
use tokio_util::sync::WaitForCancellationFutureOwned;
use tracing::instrument;
use monero_p2p::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone};
use super::ClientPool;
/// The disconnect monitor task.
#[instrument(level = "info", skip_all)]
pub async fn disconnect_monitor<N: NetworkZone>(
mut new_connection_rx: mpsc::UnboundedReceiver<(ConnectionHandle, InternalPeerID<N::Addr>)>,
client_pool: Arc<ClientPool<N>>,
) {
tracing::info!("Starting peer disconnect monitor.");
let mut futs: FuturesUnordered<PeerDisconnectFut<N>> = FuturesUnordered::new();
loop {
tokio::select! {
Some((con_handle, peer_id)) = new_connection_rx.recv() => {
tracing::debug!("Monitoring {peer_id} for disconnect");
futs.push(PeerDisconnectFut {
closed_fut: con_handle.closed(),
peer_id: Some(peer_id),
});
}
Some(peer_id) = futs.next() => {
tracing::debug!("{peer_id} has disconnected, removing from client pool.");
client_pool.remove_client(&peer_id);
}
else => {
tracing::info!("Peer disconnect monitor shutting down.");
return;
}
}
}
}
/// A [`Future`] that resolves when a peer disconnects.
#[pin_project::pin_project]
pub(crate) struct PeerDisconnectFut<N: NetworkZone> {
/// The inner [`Future`] that resolves when a peer disconnects.
#[pin]
pub(crate) closed_fut: WaitForCancellationFutureOwned,
/// The peers ID.
pub(crate) peer_id: Option<InternalPeerID<N::Addr>>,
}
impl<N: NetworkZone> Future for PeerDisconnectFut<N> {
type Output = InternalPeerID<N::Addr>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
this.closed_fut
.poll(cx)
.map(|_| this.peer_id.take().unwrap())
}
}

View file

@ -0,0 +1,41 @@
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use monero_p2p::{client::Client, NetworkZone};
use crate::client_pool::ClientPool;
/// A wrapper around [`Client`] which returns the client to the [`ClientPool`] when dropped.
pub struct ClientPoolDropGuard<N: NetworkZone> {
/// The [`ClientPool`] to return the peer to.
pub(super) pool: Arc<ClientPool<N>>,
/// The [`Client`].
///
/// This is set to [`Some`] when this guard is created, then
/// ### [`take`](Option::take)n and returned to the pool when dropped.
pub(super) client: Option<Client<N>>,
}
impl<N: NetworkZone> Deref for ClientPoolDropGuard<N> {
type Target = Client<N>;
fn deref(&self) -> &Self::Target {
self.client.as_ref().unwrap()
}
}
impl<N: NetworkZone> DerefMut for ClientPoolDropGuard<N> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.client.as_mut().unwrap()
}
}
impl<N: NetworkZone> Drop for ClientPoolDropGuard<N> {
fn drop(&mut self) {
let client = self.client.take().unwrap();
self.pool.add_client(client);
}
}

View file

@ -0,0 +1,12 @@
/// P2P config.
#[derive(Clone, Debug)]
pub struct P2PConfig {
/// The number of outbound connections to make and try keep.
pub outbound_connections: usize,
/// The amount of extra connections we can make if we are under load from the rest of Cuprate.
pub extra_outbound_connections: usize,
/// The percent of outbound peers that should be gray aka never connected to before.
///
/// Only values 0..=1 are valid.
pub gray_peers_percent: f64,
}

View file

@ -0,0 +1,291 @@
//! Outbound Connection Maintainer.
//!
//! This module handles maintaining the number of outbound connections defined in the [`P2PConfig`].
//! It also handles making extra connections when the peer set is under load or when we need data that
//! no connected peer has.
use std::sync::Arc;
use rand::{distributions::Bernoulli, prelude::*};
use tokio::{
sync::{mpsc, OwnedSemaphorePermit, Semaphore},
task::JoinSet,
time::{sleep, timeout},
};
use tower::{Service, ServiceExt};
use tracing::instrument;
use monero_p2p::{
client::{Client, ConnectRequest, HandshakeError},
services::{AddressBookRequest, AddressBookResponse},
AddressBook, NetworkZone,
};
use crate::{
client_pool::ClientPool,
config::P2PConfig,
constants::{HANDSHAKE_TIMEOUT, MAX_SEED_CONNECTIONS, OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT},
};
enum OutboundConnectorError {
MaxConnections,
FailedToConnectToSeeds,
NoAvailablePeers,
}
/// A request from the peer set to make an outbound connection.
///
/// This will only be sent when the peer set is under load from the rest of Cuprate or the peer
/// set needs specific data that none of the currently connected peers have.
pub struct MakeConnectionRequest {
/// The block needed that no connected peers have due to pruning.
block_needed: Option<u64>,
}
/// The outbound connection count keeper.
///
/// This handles maintaining a minimum number of connections and making extra connections when needed, upto a maximum.
pub struct OutboundConnectionKeeper<N: NetworkZone, A, C> {
/// The pool of currently connected peers.
pub client_pool: Arc<ClientPool<N>>,
/// The channel that tells us to make new _extra_ outbound connections.
pub make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
/// The address book service
pub address_book_svc: A,
/// The service to connect to a specific peer.
pub connector_svc: C,
/// A semaphore to keep the amount of outbound peers constant.
pub outbound_semaphore: Arc<Semaphore>,
/// The amount of peers we connected to because we needed more peers. If the `outbound_semaphore`
/// is full, and we need to connect to more peers for blocks or because not enough peers are ready
/// we add a permit to the semaphore and keep track here, upto a value in config.
pub extra_peers: usize,
/// The p2p config.
pub config: P2PConfig,
/// The [`Bernoulli`] distribution, when sampled will return true if we should connect to a gray peer or
/// false if we should connect to a white peer.
///
/// This is weighted to the percentage given in `config`.
pub peer_type_gen: Bernoulli,
}
impl<N, A, C> OutboundConnectionKeeper<N, A, C>
where
N: NetworkZone,
A: AddressBook<N>,
C: Service<ConnectRequest<N>, Response = Client<N>, Error = HandshakeError>,
C::Future: Send + 'static,
{
pub fn new(
config: P2PConfig,
client_pool: Arc<ClientPool<N>>,
make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
address_book_svc: A,
connector_svc: C,
) -> Self {
let peer_type_gen = Bernoulli::new(config.gray_peers_percent)
.expect("Gray peer percent is incorrect should be 0..=1");
Self {
client_pool,
make_connection_rx,
address_book_svc,
connector_svc,
outbound_semaphore: Arc::new(Semaphore::new(config.outbound_connections)),
extra_peers: 0,
config,
peer_type_gen,
}
}
/// Connects to random seeds to get peers and immediately disconnects
#[instrument(level = "info", skip(self))]
async fn connect_to_random_seeds(&mut self) -> Result<(), OutboundConnectorError> {
let seeds = N::SEEDS.choose_multiple(&mut thread_rng(), MAX_SEED_CONNECTIONS);
if seeds.len() == 0 {
panic!("No seed nodes available to get peers from");
}
// This isn't really needed here to limit connections as the seed nodes will be dropped when we have got
// peers from them.
let semaphore = Arc::new(Semaphore::new(seeds.len()));
let mut allowed_errors = seeds.len();
let mut handshake_futs = JoinSet::new();
for seed in seeds {
tracing::info!("Getting peers from seed node: {}", seed);
let fut = timeout(
HANDSHAKE_TIMEOUT,
self.connector_svc
.ready()
.await
.expect("Connector had an error in `poll_ready`")
.call(ConnectRequest {
addr: *seed,
permit: semaphore
.clone()
.try_acquire_owned()
.expect("This must have enough permits as we just set the amount."),
}),
);
// Spawn the handshake on a separate task with a timeout, so we don't get stuck connecting to a peer.
handshake_futs.spawn(fut);
}
while let Some(res) = handshake_futs.join_next().await {
if matches!(res, Err(_) | Ok(Err(_)) | Ok(Ok(Err(_)))) {
allowed_errors -= 1;
}
}
if allowed_errors == 0 {
Err(OutboundConnectorError::FailedToConnectToSeeds)
} else {
Ok(())
}
}
/// Connects to a given outbound peer.
#[instrument(level = "info", skip(self, permit), fields(%addr))]
async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) {
let client_pool = self.client_pool.clone();
let connection_fut = self
.connector_svc
.ready()
.await
.expect("Connector had an error in `poll_ready`")
.call(ConnectRequest { addr, permit });
tokio::spawn(async move {
if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await {
client_pool.add_new_client(peer);
}
});
}
/// Handles a request from the peer set for more peers.
async fn handle_peer_request(
&mut self,
req: &MakeConnectionRequest,
) -> Result<(), OutboundConnectorError> {
// try to get a permit.
let permit = self
.outbound_semaphore
.clone()
.try_acquire_owned()
.or_else(|_| {
// if we can't get a permit add one if we are below the max number of connections.
if self.extra_peers >= self.config.extra_outbound_connections {
// If we can't add a permit return an error.
Err(OutboundConnectorError::MaxConnections)
} else {
self.outbound_semaphore.add_permits(1);
self.extra_peers += 1;
Ok(self.outbound_semaphore.clone().try_acquire_owned().unwrap())
}
})?;
// try to get a random peer on any network zone from the address book.
let peer = self
.address_book_svc
.ready()
.await
.expect("Error in address book!")
.call(AddressBookRequest::TakeRandomPeer {
height: req.block_needed,
})
.await;
match peer {
Err(_) => {
// TODO: We should probably send peer requests to our connected peers rather than go to seeds.
tracing::warn!("No peers in address book which are available and have the data we need. Getting peers from seed nodes.");
self.connect_to_random_seeds().await?;
Err(OutboundConnectorError::NoAvailablePeers)
}
Ok(AddressBookResponse::Peer(peer)) => {
self.connect_to_outbound_peer(permit, peer.adr).await;
Ok(())
}
Ok(_) => panic!("peer list sent incorrect response!"),
}
}
/// Handles a free permit, by either connecting to a new peer or by removing a permit if we are above the
/// minimum number of outbound connections.
#[instrument(level = "debug", skip(self, permit))]
async fn handle_free_permit(
&mut self,
permit: OwnedSemaphorePermit,
) -> Result<(), OutboundConnectorError> {
if self.extra_peers > 0 {
tracing::debug!(
"Permit available but we are over the minimum number of peers, forgetting permit."
);
permit.forget();
self.extra_peers -= 1;
return Ok(());
}
tracing::debug!("Permit available, making outbound connection.");
let req = if self.peer_type_gen.sample(&mut thread_rng()) {
AddressBookRequest::TakeRandomGrayPeer { height: None }
} else {
// This will try white peers first then gray.
AddressBookRequest::TakeRandomPeer { height: None }
};
let Ok(AddressBookResponse::Peer(peer)) = self
.address_book_svc
.ready()
.await
.expect("Error in address book!")
.call(req)
.await
else {
tracing::warn!("No peers in peer list to make connection to.");
self.connect_to_random_seeds().await?;
return Err(OutboundConnectorError::NoAvailablePeers);
};
self.connect_to_outbound_peer(permit, peer.adr).await;
Ok(())
}
/// Runs the outbound connection count keeper.
pub async fn run(mut self) {
tracing::info!(
"Starting outbound connection maintainer, target outbound connections: {}",
self.config.outbound_connections
);
loop {
tokio::select! {
biased;
peer_req = self.make_connection_rx.recv() => {
let Some(peer_req) = peer_req else {
tracing::info!("Shutting down outbound connector, make connection channel closed.");
return;
};
// We can't really do much about errors in this function.
let _ = self.handle_peer_request(&peer_req).await;
},
// This future is not cancellation safe as you will lose your space in the queue but as we are the only place
// that actually requires permits that should be ok.
Ok(permit) = self.outbound_semaphore.clone().acquire_owned() => {
if self.handle_free_permit(permit).await.is_err() {
// if we got an error then we still have a permit free so to prevent this from just looping
// uncontrollably add a timeout.
sleep(OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT).await;
}
}
}
}
}
}

View file

@ -0,0 +1,41 @@
use std::time::Duration;
/// The timeout we set on handshakes.
pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(30);
/// The maximum amount of connections to make to seed nodes for when we need peers.
pub(crate) const MAX_SEED_CONNECTIONS: usize = 3;
/// The timeout for when we fail to find a peer to connect to.
pub(crate) const OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_secs(5);
/// The durations of a short ban.
pub(crate) const SHORT_BAN: Duration = Duration::from_secs(60 * 10);
/// The default amount of time between inbound diffusion flushes.
pub(crate) const DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND: Duration = Duration::from_secs(5);
/// The default amount of time between outbound diffusion flushes.
pub(crate) const DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND: Duration = Duration::from_millis(2500);
/// This size limit on [`NewTransactions`](monero_wire::protocol::NewTransactions) messages that we create.
pub(crate) const SOFT_TX_MESSAGE_SIZE_SIZE_LIMIT: usize = 10 * 1024 * 1024;
/// The amount of transactions in the broadcast queue. When this value is hit, old transactions will be dropped from
/// the queue.
///
/// Because of internal implementation details this value is _always_ hit, i.e. a transaction will not be dropped until
/// 50 more transactions after it are added to the queue.
pub(crate) const MAX_TXS_IN_BROADCAST_CHANNEL: usize = 50;
#[cfg(test)]
mod tests {
use super::*;
/// Outbound diffusion flushes should be shorter than
/// inbound ones as we control these connections.
#[test]
fn outbound_diffusion_flush_shorter_than_inbound() {
assert!(DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND < DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND);
}
}

View file

@ -0,0 +1,17 @@
//! Cuprate's P2P Crate.
//!
//! This crate contains a [`ClientPool`](client_pool::ClientPool) which holds connected peers on a single [`NetworkZone`](monero_p2p::NetworkZone).
//!
//! This crate also contains the different routing methods that control how messages should be sent, i.e. broadcast to all,
//! or send to a single peer.
//!
#![allow(dead_code)]
mod broadcast;
pub mod client_pool;
pub mod config;
pub mod connection_maintainer;
mod constants;
mod sync_states;
pub use config::P2PConfig;

View file

@ -0,0 +1,427 @@
//! # Sync States
//!
//! This module contains a [`PeerSyncSvc`], which keeps track of the claimed chain states of connected peers.
//! This allows checking if we are behind and getting a list of peers who claim they are ahead.
use std::{
cmp::Ordering,
collections::{BTreeMap, HashMap, HashSet},
future::{ready, Ready},
task::{Context, Poll},
};
use futures::{stream::FuturesUnordered, StreamExt};
use tokio::sync::watch;
use tower::Service;
use monero_p2p::{
client::InternalPeerID,
handles::ConnectionHandle,
services::{PeerSyncRequest, PeerSyncResponse},
NetworkZone,
};
use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT};
use monero_wire::CoreSyncData;
use crate::{client_pool::disconnect_monitor::PeerDisconnectFut, constants::SHORT_BAN};
/// The highest claimed sync info from our connected peers.
#[derive(Debug)]
pub struct NewSyncInfo {
/// The peers chain height.
chain_height: u64,
/// The peers top block's hash.
top_hash: [u8; 32],
/// The peers cumulative difficulty.
cumulative_difficulty: u128,
}
/// A service that keeps track of our peers blockchains.
///
/// This is the service that handles:
/// 1. Finding out if we need to sync
/// 1. Giving the peers that should be synced _from_, to the requester
pub struct PeerSyncSvc<N: NetworkZone> {
/// A map of cumulative difficulties to peers.
cumulative_difficulties: BTreeMap<u128, HashSet<InternalPeerID<N::Addr>>>,
/// A map of peers to cumulative difficulties.
peers: HashMap<InternalPeerID<N::Addr>, (u128, PruningSeed)>,
/// A watch channel for *a* top synced peer info.
new_height_watcher: watch::Sender<NewSyncInfo>,
/// The handle to the peer that has data in `new_height_watcher`.
last_peer_in_watcher_handle: Option<ConnectionHandle>,
/// A [`FuturesUnordered`] that resolves when a peer disconnects.
closed_connections: FuturesUnordered<PeerDisconnectFut<N>>,
}
impl<N: NetworkZone> PeerSyncSvc<N> {
/// Creates a new [`PeerSyncSvc`] with a [`Receiver`](watch::Receiver) that will be updated with
/// the highest seen sync data, this makes no guarantees about which peer will be chosen in case of a tie.
pub fn new() -> (Self, watch::Receiver<NewSyncInfo>) {
let (watch_tx, mut watch_rx) = watch::channel(NewSyncInfo {
chain_height: 0,
top_hash: [0; 32],
cumulative_difficulty: 0,
});
watch_rx.mark_unchanged();
(
Self {
cumulative_difficulties: BTreeMap::new(),
peers: HashMap::new(),
new_height_watcher: watch_tx,
last_peer_in_watcher_handle: None,
closed_connections: FuturesUnordered::new(),
},
watch_rx,
)
}
/// This function checks if any peers have disconnected, removing them if they have.
fn poll_disconnected(&mut self, cx: &mut Context<'_>) {
while let Poll::Ready(Some(peer_id)) = self.closed_connections.poll_next_unpin(cx) {
tracing::trace!("Peer {peer_id} disconnected, removing from peers sync info service.");
let (peer_cum_diff, _) = self.peers.remove(&peer_id).unwrap();
let cum_diff_peers = self
.cumulative_difficulties
.get_mut(&peer_cum_diff)
.unwrap();
cum_diff_peers.remove(&peer_id);
if cum_diff_peers.is_empty() {
// If this was the last peer remove the whole entry for this cumulative difficulty.
self.cumulative_difficulties.remove(&peer_cum_diff);
}
}
}
/// Returns a list of peers that claim to have a higher cumulative difficulty than `current_cum_diff`.
fn peers_to_sync_from(
&self,
current_cum_diff: u128,
block_needed: Option<u64>,
) -> Vec<InternalPeerID<N::Addr>> {
self.cumulative_difficulties
.range((current_cum_diff + 1)..)
.flat_map(|(_, peers)| peers)
.filter(|peer| {
if let Some(block_needed) = block_needed {
// we just use CRYPTONOTE_MAX_BLOCK_HEIGHT as the blockchain height, this only means
// we don't take into account the tip blocks which are not pruned.
self.peers
.get(peer)
.unwrap()
.1
.has_full_block(block_needed, CRYPTONOTE_MAX_BLOCK_HEIGHT)
} else {
true
}
})
.copied()
.collect()
}
/// Updates a peers sync state.
fn update_peer_sync_info(
&mut self,
peer_id: InternalPeerID<N::Addr>,
handle: ConnectionHandle,
core_sync_data: CoreSyncData,
) -> Result<(), tower::BoxError> {
tracing::trace!(
"Received new core sync data from peer, top hash: {}",
hex::encode(core_sync_data.top_id)
);
let new_cumulative_difficulty = core_sync_data.cumulative_difficulty();
if let Some((old_cum_diff, _)) = self.peers.get_mut(&peer_id) {
match (*old_cum_diff).cmp(&new_cumulative_difficulty) {
Ordering::Equal => {
// If the cumulative difficulty of the peers chain hasn't changed then no need to update anything.
return Ok(());
}
Ordering::Greater => {
// This will only happen if a peer lowers its cumulative difficulty during the connection.
// This won't happen if a peer re-syncs their blockchain as then the connection would have closed.
tracing::debug!(
"Peer's claimed cumulative difficulty has dropped, closing connection and banning peer for: {} seconds.", SHORT_BAN.as_secs()
);
handle.ban_peer(SHORT_BAN);
return Err("Peers cumulative difficulty dropped".into());
}
Ordering::Less => (),
}
// Remove the old cumulative difficulty entry for this peer
let old_cum_diff_peers = self.cumulative_difficulties.get_mut(old_cum_diff).unwrap();
old_cum_diff_peers.remove(&peer_id);
if old_cum_diff_peers.is_empty() {
// If this was the last peer remove the whole entry for this cumulative difficulty.
self.cumulative_difficulties.remove(old_cum_diff);
}
// update the cumulative difficulty
*old_cum_diff = new_cumulative_difficulty;
} else {
// The peer is new so add it the list of peers.
self.peers.insert(
peer_id,
(
new_cumulative_difficulty,
PruningSeed::decompress_p2p_rules(core_sync_data.pruning_seed)?,
),
);
// add it to the list of peers to watch for disconnection.
self.closed_connections.push(PeerDisconnectFut {
closed_fut: handle.closed(),
peer_id: Some(peer_id),
})
}
self.cumulative_difficulties
.entry(new_cumulative_difficulty)
.or_default()
.insert(peer_id);
// If the claimed cumulative difficulty is higher than the current one in the watcher
// or if the peer in the watch has disconnected, update it.
if self.new_height_watcher.borrow().cumulative_difficulty < new_cumulative_difficulty
|| self
.last_peer_in_watcher_handle
.as_ref()
.is_some_and(|handle| handle.is_closed())
{
tracing::debug!(
"Updating sync watcher channel with new highest seen cumulative difficulty: {new_cumulative_difficulty}"
);
let _ = self.new_height_watcher.send(NewSyncInfo {
top_hash: core_sync_data.top_id,
chain_height: core_sync_data.current_height,
cumulative_difficulty: new_cumulative_difficulty,
});
self.last_peer_in_watcher_handle.replace(handle);
}
Ok(())
}
}
impl<N: NetworkZone> Service<PeerSyncRequest<N>> for PeerSyncSvc<N> {
type Response = PeerSyncResponse<N>;
type Error = tower::BoxError;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.poll_disconnected(cx);
Poll::Ready(Ok(()))
}
fn call(&mut self, req: PeerSyncRequest<N>) -> Self::Future {
let res = match req {
PeerSyncRequest::PeersToSyncFrom {
current_cumulative_difficulty,
block_needed,
} => Ok(PeerSyncResponse::PeersToSyncFrom(self.peers_to_sync_from(
current_cumulative_difficulty,
block_needed,
))),
PeerSyncRequest::IncomingCoreSyncData(peer_id, handle, sync_data) => self
.update_peer_sync_info(peer_id, handle, sync_data)
.map(|_| PeerSyncResponse::Ok),
};
ready(res)
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use tokio::sync::Semaphore;
use tower::{Service, ServiceExt};
use monero_p2p::{client::InternalPeerID, handles::HandleBuilder, services::PeerSyncRequest};
use monero_wire::CoreSyncData;
use cuprate_test_utils::test_netzone::TestNetZone;
use monero_p2p::services::PeerSyncResponse;
use super::PeerSyncSvc;
#[tokio::test]
async fn top_sync_channel_updates() {
let semaphore = Arc::new(Semaphore::new(1));
let (_g, handle) = HandleBuilder::new()
.with_permit(semaphore.try_acquire_owned().unwrap())
.build();
let (mut svc, mut watch) = PeerSyncSvc::<TestNetZone<true, true, true>>::new();
assert!(!watch.has_changed().unwrap());
svc.ready()
.await
.unwrap()
.call(PeerSyncRequest::IncomingCoreSyncData(
InternalPeerID::Unknown(0),
handle.clone(),
CoreSyncData {
cumulative_difficulty: 1_000,
cumulative_difficulty_top64: 0,
current_height: 0,
pruning_seed: 0,
top_id: [0; 32],
top_version: 0,
},
))
.await
.unwrap();
assert!(watch.has_changed().unwrap());
assert_eq!(watch.borrow().top_hash, [0; 32]);
assert_eq!(watch.borrow().cumulative_difficulty, 1000);
assert_eq!(watch.borrow_and_update().chain_height, 0);
svc.ready()
.await
.unwrap()
.call(PeerSyncRequest::IncomingCoreSyncData(
InternalPeerID::Unknown(1),
handle.clone(),
CoreSyncData {
cumulative_difficulty: 1_000,
cumulative_difficulty_top64: 0,
current_height: 0,
pruning_seed: 0,
top_id: [0; 32],
top_version: 0,
},
))
.await
.unwrap();
assert!(!watch.has_changed().unwrap());
svc.ready()
.await
.unwrap()
.call(PeerSyncRequest::IncomingCoreSyncData(
InternalPeerID::Unknown(2),
handle.clone(),
CoreSyncData {
cumulative_difficulty: 1_001,
cumulative_difficulty_top64: 0,
current_height: 0,
pruning_seed: 0,
top_id: [1; 32],
top_version: 0,
},
))
.await
.unwrap();
assert!(watch.has_changed().unwrap());
assert_eq!(watch.borrow().top_hash, [1; 32]);
assert_eq!(watch.borrow().cumulative_difficulty, 1001);
assert_eq!(watch.borrow_and_update().chain_height, 0);
}
#[tokio::test]
async fn peer_sync_info_updates() {
let semaphore = Arc::new(Semaphore::new(1));
let (_g, handle) = HandleBuilder::new()
.with_permit(semaphore.try_acquire_owned().unwrap())
.build();
let (mut svc, _watch) = PeerSyncSvc::<TestNetZone<true, true, true>>::new();
svc.ready()
.await
.unwrap()
.call(PeerSyncRequest::IncomingCoreSyncData(
InternalPeerID::Unknown(0),
handle.clone(),
CoreSyncData {
cumulative_difficulty: 1_000,
cumulative_difficulty_top64: 0,
current_height: 0,
pruning_seed: 0,
top_id: [0; 32],
top_version: 0,
},
))
.await
.unwrap();
assert_eq!(svc.peers.len(), 1);
assert_eq!(svc.cumulative_difficulties.len(), 1);
svc.ready()
.await
.unwrap()
.call(PeerSyncRequest::IncomingCoreSyncData(
InternalPeerID::Unknown(0),
handle.clone(),
CoreSyncData {
cumulative_difficulty: 1_001,
cumulative_difficulty_top64: 0,
current_height: 0,
pruning_seed: 0,
top_id: [0; 32],
top_version: 0,
},
))
.await
.unwrap();
assert_eq!(svc.peers.len(), 1);
assert_eq!(svc.cumulative_difficulties.len(), 1);
svc.ready()
.await
.unwrap()
.call(PeerSyncRequest::IncomingCoreSyncData(
InternalPeerID::Unknown(1),
handle.clone(),
CoreSyncData {
cumulative_difficulty: 10,
cumulative_difficulty_top64: 0,
current_height: 0,
pruning_seed: 0,
top_id: [0; 32],
top_version: 0,
},
))
.await
.unwrap();
assert_eq!(svc.peers.len(), 2);
assert_eq!(svc.cumulative_difficulties.len(), 2);
let PeerSyncResponse::PeersToSyncFrom(peers) = svc
.ready()
.await
.unwrap()
.call(PeerSyncRequest::PeersToSyncFrom {
block_needed: None,
current_cumulative_difficulty: 0,
})
.await
.unwrap()
else {
panic!("Wrong response for request.")
};
assert!(
peers.contains(&InternalPeerID::Unknown(0))
&& peers.contains(&InternalPeerID::Unknown(1))
)
}
}

27
p2p/dandelion/Cargo.toml Normal file
View file

@ -0,0 +1,27 @@
[package]
name = "dandelion_tower"
version = "0.1.0"
edition = "2021"
license = "MIT"
authors = ["Boog900"]
[features]
default = ["txpool"]
txpool = ["dep:rand_distr", "dep:tokio-util", "dep:tokio"]
[dependencies]
tower = { workspace = true, features = ["discover", "util"] }
tracing = { workspace = true, features = ["std"] }
futures = { workspace = true, features = ["std"] }
tokio = { workspace = true, features = ["rt", "sync", "macros"], optional = true}
tokio-util = { workspace = true, features = ["time"], optional = true }
rand = { workspace = true, features = ["std", "std_rng"] }
rand_distr = { workspace = true, features = ["std"], optional = true }
thiserror = { workspace = true }
[dev-dependencies]
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync"] }
proptest = { workspace = true, features = ["default"] }

Some files were not shown because too many files have changed in this diff Show more