mirror of
https://github.com/hinto-janai/cuprate.git
synced 2024-12-22 03:29:30 +00:00
This commit is contained in:
commit
648ec46f29
602 changed files with 38546 additions and 22463 deletions
4
.github/labeler.yml
vendored
4
.github/labeler.yml
vendored
|
@ -56,6 +56,10 @@ A-cryptonight:
|
|||
- changed-files:
|
||||
- any-glob-to-any-file: cryptonight/**
|
||||
|
||||
A-constants:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: constants/**
|
||||
|
||||
A-storage:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: storage/**
|
||||
|
|
40
.github/workflows/architecture-book.yml
vendored
Normal file
40
.github/workflows/architecture-book.yml
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
# This action attempts to build the architecture book, if changed.
|
||||
|
||||
name: Architecture mdBook
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths: ['books/architecture/**']
|
||||
pull_request:
|
||||
paths: ['books/architecture/**']
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Version of `mdbook` to install.
|
||||
MDBOOK_VERSION: 0.4.36
|
||||
# Version of `mdbook-last-changed` to install.
|
||||
# <https://github.com/badboy/mdbook-last-changed>.
|
||||
MDBOOK_LAST_CHANGED_VERSION: 0.1.4
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/mdbook
|
||||
~/.cargo/bin/mdbook-last-changed
|
||||
key: architecture-book
|
||||
|
||||
- name: Install mdBook
|
||||
run: |
|
||||
cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists"
|
||||
cargo install --locked --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed || echo "mdbook-last-changed already exists"
|
||||
|
||||
- name: Build
|
||||
run: mdbook build books/architecture
|
33
.github/workflows/audit.yml
vendored
33
.github/workflows/audit.yml
vendored
|
@ -1,33 +0,0 @@
|
|||
# This runs `cargo audit` on all dependencies (only if Cargo deps changed)
|
||||
|
||||
name: Audit
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo
|
||||
target
|
||||
key: audit
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install dependencies
|
||||
run: cargo install cargo-audit --locked
|
||||
- name: Audit
|
||||
run: cargo audit
|
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
|
@ -133,7 +133,12 @@ jobs:
|
|||
- name: Test
|
||||
run: |
|
||||
cargo test --all-features --workspace
|
||||
cargo test --package cuprate-blockchain --no-default-features --features redb --features service
|
||||
cargo test --package cuprate-blockchain --no-default-features --features redb
|
||||
|
||||
- name: Hack Check
|
||||
run: |
|
||||
cargo install cargo-hack --locked
|
||||
cargo hack --workspace check --feature-powerset --no-dev-deps
|
||||
|
||||
# TODO: upload binaries with `actions/upload-artifact@v3`
|
||||
- name: Build
|
||||
|
|
1
.github/workflows/deny.yml
vendored
1
.github/workflows/deny.yml
vendored
|
@ -7,6 +7,7 @@ on:
|
|||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
|
74
.github/workflows/doc.yml
vendored
Normal file
74
.github/workflows/doc.yml
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
# This builds `cargo doc` and uploads it to the repo's GitHub Pages.
|
||||
|
||||
name: Doc
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ] # Only deploy if `main` changes.
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Show colored output in CI.
|
||||
CARGO_TERM_COLOR: always
|
||||
# Generate an index page.
|
||||
RUSTDOCFLAGS: '--cfg docsrs --show-type-layout --enable-index-page -Zunstable-options'
|
||||
|
||||
jobs:
|
||||
# Build documentation.
|
||||
build:
|
||||
# FIXME: how to build and merge Windows + macOS docs
|
||||
# with Linux's? Similar to the OS toggle on docs.rs.
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
# Nightly required for some `cargo doc` settings.
|
||||
toolchain: nightly
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
# Don't cache actual doc files, just build files.
|
||||
# This is so that removed crates don't show up.
|
||||
path: target/debug
|
||||
key: doc
|
||||
|
||||
# Packages other than `Boost` used by `Monero` are listed here.
|
||||
# https://github.com/monero-project/monero/blob/c444a7e002036e834bfb4c68f04a121ce1af5825/.github/workflows/build.yml#L71
|
||||
|
||||
- name: Install dependencies (Linux)
|
||||
run: sudo apt install -y libboost-dev
|
||||
|
||||
- name: Documentation
|
||||
run: cargo +nightly doc --workspace --all-features
|
||||
|
||||
- name: Upload documentation
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: target/doc/
|
||||
|
||||
# Deployment job.
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
40
.github/workflows/monero-book.yml
vendored
Normal file
40
.github/workflows/monero-book.yml
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
# This action attempts to build the Monero book, if changed.
|
||||
|
||||
name: Monero mdBook
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths: ['books/protocol/**']
|
||||
pull_request:
|
||||
paths: ['books/protocol/**']
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Version of `mdbook` to install.
|
||||
MDBOOK_VERSION: 0.4.36
|
||||
# Version of `mdbook-svgbob` to install.
|
||||
# <https://github.com/boozook/mdbook-svgbob>.
|
||||
MDBOOK_SVGBOB_VERSION: 0.2.1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/mdbook
|
||||
~/.cargo/bin/mdbook-svgbob
|
||||
key: monero-book
|
||||
|
||||
- name: Install mdBook
|
||||
run: |
|
||||
cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists"
|
||||
cargo install --locked --version ${MDBOOK_SVGBOB_VERSION} mdbook-svgbob || echo "mdbook-svgbob already exists"
|
||||
|
||||
- name: Build
|
||||
run: mdbook build books/protocol
|
40
.github/workflows/user-book.yml
vendored
Normal file
40
.github/workflows/user-book.yml
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
# This action attempts to build the user book, if changed.
|
||||
|
||||
name: User mdBook
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths: ['books/user/**']
|
||||
pull_request:
|
||||
paths: ['books/user/**']
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Version of `mdbook` to install.
|
||||
MDBOOK_VERSION: 0.4.36
|
||||
# Version of `mdbook-last-changed` to install.
|
||||
# <https://github.com/badboy/mdbook-last-changed>.
|
||||
MDBOOK_LAST_CHANGED_VERSION: 0.1.4
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/mdbook
|
||||
~/.cargo/bin/mdbook-last-changed
|
||||
key: user-book
|
||||
|
||||
- name: Install mdBook
|
||||
run: |
|
||||
cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists"
|
||||
cargo install --locked --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed || echo "mdbook-last-changed already exists"
|
||||
|
||||
- name: Build
|
||||
run: mdbook build books/user
|
|
@ -120,12 +120,15 @@ Before pushing your code, please run the following at the root of the repository
|
|||
|
||||
After that, ensure all other CI passes by running:
|
||||
|
||||
| Command | Does what |
|
||||
|------------------------------------------------------------------------|-----------|
|
||||
| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK
|
||||
| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied
|
||||
| `cargo test --all-features --workspace` | Runs all tests
|
||||
| `cargo build --all-features --all-targets --workspace` | Builds all code
|
||||
| Command | Does what |
|
||||
|------------------------------------------------------------------------|-------------------------------------------------------------------------|
|
||||
| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK |
|
||||
| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied |
|
||||
| `cargo test --all-features --workspace` | Runs all tests |
|
||||
| `cargo build --all-features --all-targets --workspace` | Builds all code |
|
||||
| `cargo hack --workspace check --feature-powerset --no-dev-deps` | Uses `cargo hack` to check our crates build with different features set |
|
||||
|
||||
`cargo hack` can be installed with `cargo` from: https://github.com/taiki-e/cargo-hack.
|
||||
|
||||
**Note: in order for some tests to work, you will need to place a [`monerod`](https://www.getmonero.org/downloads/) binary at the root of the repository.**
|
||||
|
||||
|
@ -216,9 +219,9 @@ The description of pull requests should generally follow the template laid out i
|
|||
If your pull request is long and/or has sections that need clarifying, consider leaving a review on your own PR with comments explaining the changes.
|
||||
|
||||
## 5. Documentation
|
||||
Cuprate's crates (libraries) have inline documentation.
|
||||
Cuprate's crates (libraries) have inline documentation, they are published from the `main` branch at https://doc.cuprate.org.
|
||||
|
||||
These can be built and viewed using the `cargo` tool. For example, to build and view a specific crate's documentation, run the following command at the repository's root:
|
||||
Documentation can be built and viewed using the `cargo` tool. For example, to build and view a specific crate's documentation, run the following command at the repository's root:
|
||||
```bash
|
||||
cargo doc --open --package $CRATE
|
||||
```
|
||||
|
|
1930
Cargo.lock
generated
1930
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
384
Cargo.toml
384
Cargo.toml
|
@ -1,33 +1,61 @@
|
|||
[workspace]
|
||||
resolver = "2"
|
||||
|
||||
members = [
|
||||
# Binaries
|
||||
"binaries/cuprated",
|
||||
|
||||
# Benchmarks
|
||||
"benches/benchmark/bin",
|
||||
"benches/benchmark/lib",
|
||||
"benches/benchmark/example",
|
||||
"benches/criterion/example",
|
||||
"benches/criterion/cuprate-json-rpc",
|
||||
|
||||
# Consensus
|
||||
"consensus",
|
||||
"consensus/context",
|
||||
"consensus/fast-sync",
|
||||
"consensus/rules",
|
||||
"cryptonight",
|
||||
"helper",
|
||||
|
||||
# Net
|
||||
"net/epee-encoding",
|
||||
"net/fixed-bytes",
|
||||
"net/levin",
|
||||
"net/wire",
|
||||
|
||||
# P2P
|
||||
"p2p/p2p",
|
||||
"p2p/p2p-core",
|
||||
"p2p/bucket",
|
||||
"p2p/dandelion-tower",
|
||||
"p2p/async-buffer",
|
||||
"p2p/address-book",
|
||||
|
||||
# Storage
|
||||
"storage/blockchain",
|
||||
"storage/service",
|
||||
"storage/txpool",
|
||||
"storage/database",
|
||||
"pruning",
|
||||
"test-utils",
|
||||
"types",
|
||||
|
||||
# RPC
|
||||
"rpc/json-rpc",
|
||||
"rpc/types",
|
||||
"rpc/interface",
|
||||
|
||||
# ZMQ
|
||||
"zmq/types",
|
||||
|
||||
# Misc
|
||||
"constants",
|
||||
"cryptonight",
|
||||
"helper",
|
||||
"pruning",
|
||||
"test-utils",
|
||||
"types",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true # Build with LTO
|
||||
strip = "none" # Keep panic stack traces
|
||||
codegen-units = 1 # Optimize for binary speed over compile times
|
||||
|
@ -46,50 +74,91 @@ opt-level = 1
|
|||
opt-level = 3
|
||||
|
||||
[workspace.dependencies]
|
||||
async-trait = { version = "0.1.74", default-features = false }
|
||||
bitflags = { version = "2.4.2", default-features = false }
|
||||
borsh = { version = "1.2.1", default-features = false }
|
||||
bytemuck = { version = "1.14.3", default-features = false }
|
||||
bytes = { version = "1.5.0", default-features = false }
|
||||
cfg-if = { version = "1.0.0", default-features = false }
|
||||
clap = { version = "4.4.7", default-features = false }
|
||||
chrono = { version = "0.4.31", default-features = false }
|
||||
crypto-bigint = { version = "0.5.5", default-features = false }
|
||||
crossbeam = { version = "0.8.4", default-features = false }
|
||||
curve25519-dalek = { version = "4.1.3", default-features = false }
|
||||
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
dashmap = { version = "5.5.3", default-features = false }
|
||||
dirs = { version = "5.0.1", default-features = false }
|
||||
futures = { version = "0.3.29", default-features = false }
|
||||
hex = { version = "0.4.3", default-features = false }
|
||||
# Cuprate members
|
||||
cuprate-benchmark-lib = { path = "benches/benchmark/lib", default-features = false }
|
||||
cuprate-benchmark-example = { path = "benches/benchmark/example", default-features = false }
|
||||
cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false }
|
||||
cuprate-consensus-rules = { path = "consensus/rules", default-features = false }
|
||||
cuprate-constants = { path = "constants", default-features = false }
|
||||
cuprate-consensus = { path = "consensus", default-features = false }
|
||||
cuprate-consensus-context = { path = "consensus/context", default-features = false }
|
||||
cuprate-cryptonight = { path = "cryptonight", default-features = false }
|
||||
cuprate-helper = { path = "helper", default-features = false }
|
||||
cuprate-epee-encoding = { path = "net/epee-encoding", default-features = false }
|
||||
cuprate-fixed-bytes = { path = "net/fixed-bytes", default-features = false }
|
||||
cuprate-levin = { path = "net/levin", default-features = false }
|
||||
cuprate-wire = { path = "net/wire", default-features = false }
|
||||
cuprate-p2p = { path = "p2p/p2p", default-features = false }
|
||||
cuprate-p2p-core = { path = "p2p/p2p-core", default-features = false }
|
||||
cuprate-p2p-bucket = { path = "p2p/p2p-bucket", default-features = false }
|
||||
cuprate-dandelion-tower = { path = "p2p/dandelion-tower", default-features = false }
|
||||
cuprate-async-buffer = { path = "p2p/async-buffer", default-features = false }
|
||||
cuprate-address-book = { path = "p2p/address-book", default-features = false }
|
||||
cuprate-blockchain = { path = "storage/blockchain", default-features = false }
|
||||
cuprate-database = { path = "storage/database", default-features = false }
|
||||
cuprate-database-service = { path = "storage/service", default-features = false }
|
||||
cuprate-txpool = { path = "storage/txpool", default-features = false }
|
||||
cuprate-pruning = { path = "pruning", default-features = false }
|
||||
cuprate-test-utils = { path = "test-utils", default-features = false }
|
||||
cuprate-types = { path = "types", default-features = false }
|
||||
cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false }
|
||||
cuprate-rpc-types = { path = "rpc/types", default-features = false }
|
||||
cuprate-rpc-interface = { path = "rpc/interface", default-features = false }
|
||||
cuprate-zmq-types = { path = "zmq/types", default-features = false }
|
||||
|
||||
# External dependencies
|
||||
anyhow = { version = "1", default-features = false }
|
||||
arrayvec = { version = "0.7", default-features = false }
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
bitflags = { version = "2", default-features = false }
|
||||
blake3 = { version = "1", default-features = false }
|
||||
borsh = { version = "1", default-features = false }
|
||||
bytemuck = { version = "1", default-features = false }
|
||||
bytes = { version = "1", default-features = false }
|
||||
cfg-if = { version = "1", default-features = false }
|
||||
clap = { version = "4", default-features = false }
|
||||
chrono = { version = "0.4", default-features = false }
|
||||
crypto-bigint = { version = "0.5", default-features = false }
|
||||
crossbeam = { version = "0.8", default-features = false }
|
||||
const_format = { version = "0.2", default-features = false }
|
||||
curve25519-dalek = { version = "4", default-features = false }
|
||||
dashmap = { version = "6", default-features = false }
|
||||
dirs = { version = "5", default-features = false }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
hex = { version = "0.4", default-features = false }
|
||||
hex-literal = { version = "0.4", default-features = false }
|
||||
indexmap = { version = "2.2.5", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
paste = { version = "1.0.14", default-features = false }
|
||||
pin-project = { version = "1.1.3", default-features = false }
|
||||
indexmap = { version = "2", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false }
|
||||
paste = { version = "1", default-features = false }
|
||||
pin-project = { version = "1", default-features = false }
|
||||
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
|
||||
rand = { version = "0.8.5", default-features = false }
|
||||
rand_distr = { version = "0.4.3", default-features = false }
|
||||
rayon = { version = "1.9.0", default-features = false }
|
||||
serde_bytes = { version = "0.11.12", default-features = false }
|
||||
serde_json = { version = "1.0.108", default-features = false }
|
||||
serde = { version = "1.0.190", default-features = false }
|
||||
thiserror = { version = "1.0.50", default-features = false }
|
||||
thread_local = { version = "1.1.7", default-features = false }
|
||||
tokio-util = { version = "0.7.10", default-features = false }
|
||||
tokio-stream = { version = "0.1.14", default-features = false }
|
||||
tokio = { version = "1.33.0", default-features = false }
|
||||
tower = { version = "0.4.13", default-features = false }
|
||||
tracing-subscriber = { version = "0.3.17", default-features = false }
|
||||
tracing = { version = "0.1.40", default-features = false }
|
||||
rand = { version = "0.8", default-features = false }
|
||||
rand_distr = { version = "0.4", default-features = false }
|
||||
rayon = { version = "1", default-features = false }
|
||||
serde_bytes = { version = "0.11", default-features = false }
|
||||
serde_json = { version = "1", default-features = false }
|
||||
serde = { version = "1", default-features = false }
|
||||
strum = { version = "0.26", default-features = false }
|
||||
thiserror = { version = "1", default-features = false }
|
||||
thread_local = { version = "1", default-features = false }
|
||||
tokio-util = { version = "0.7", default-features = false }
|
||||
tokio-stream = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # <https://github.com/tower-rs/tower/pull/796>
|
||||
toml = { version = "0.8", default-features = false }
|
||||
tracing-subscriber = { version = "0.3", default-features = false }
|
||||
tracing = { version = "0.1", default-features = false }
|
||||
|
||||
## workspace.dev-dependencies
|
||||
tempfile = { version = "3" }
|
||||
pretty_assertions = { version = "1.4.0" }
|
||||
proptest = { version = "1" }
|
||||
proptest-derive = { version = "0.4.0" }
|
||||
tokio-test = { version = "0.4.4" }
|
||||
criterion = { version = "0.5" }
|
||||
function_name = { version = "0.3" }
|
||||
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||
tempfile = { version = "3" }
|
||||
pretty_assertions = { version = "1" }
|
||||
proptest = { version = "1" }
|
||||
proptest-derive = { version = "0.5" }
|
||||
tokio-test = { version = "0.4" }
|
||||
|
||||
## TODO:
|
||||
## Potential dependencies.
|
||||
|
@ -101,7 +170,222 @@ tokio-test = { version = "0.4.4" }
|
|||
# regex = { version = "1.10.2" } # Regular expressions | https://github.com/rust-lang/regex
|
||||
# ryu = { version = "1.0.15" } # Fast float to string formatting | https://github.com/dtolnay/ryu
|
||||
|
||||
# Maybe one day.
|
||||
# disk = { version = "*" } # (De)serialization to/from disk with various file formats | https://github.com/hinto-janai/disk
|
||||
# readable = { version = "*" } # Stack-based string formatting utilities | https://github.com/hinto-janai/readable
|
||||
# json-rpc = { git = "https://github.com/hinto-janai/json-rpc" } # JSON-RPC 2.0 types
|
||||
# Lints: cold, warm, hot: <https://github.com/Cuprate/cuprate/issues/131>
|
||||
[workspace.lints.clippy]
|
||||
# Cold
|
||||
borrow_as_ptr = "deny"
|
||||
case_sensitive_file_extension_comparisons = "deny"
|
||||
cast_lossless = "deny"
|
||||
cast_ptr_alignment = "deny"
|
||||
checked_conversions = "deny"
|
||||
cloned_instead_of_copied = "deny"
|
||||
const_is_empty = "deny"
|
||||
doc_lazy_continuation = "deny"
|
||||
doc_link_with_quotes = "deny"
|
||||
duplicated_attributes = "deny"
|
||||
empty_enum = "deny"
|
||||
enum_glob_use = "deny"
|
||||
expl_impl_clone_on_copy = "deny"
|
||||
explicit_into_iter_loop = "deny"
|
||||
filter_map_next = "deny"
|
||||
flat_map_option = "deny"
|
||||
from_iter_instead_of_collect = "deny"
|
||||
if_not_else = "deny"
|
||||
ignored_unit_patterns = "deny"
|
||||
inconsistent_struct_constructor = "deny"
|
||||
index_refutable_slice = "deny"
|
||||
inefficient_to_string = "deny"
|
||||
invalid_upcast_comparisons = "deny"
|
||||
iter_filter_is_ok = "deny"
|
||||
iter_filter_is_some = "deny"
|
||||
implicit_clone = "deny"
|
||||
legacy_numeric_constants = "deny"
|
||||
manual_c_str_literals = "deny"
|
||||
manual_pattern_char_comparison = "deny"
|
||||
manual_instant_elapsed = "deny"
|
||||
manual_inspect = "deny"
|
||||
manual_is_variant_and = "deny"
|
||||
manual_let_else = "deny"
|
||||
manual_ok_or = "deny"
|
||||
manual_string_new = "deny"
|
||||
manual_unwrap_or_default = "deny"
|
||||
map_unwrap_or = "deny"
|
||||
match_bool = "deny"
|
||||
match_same_arms = "deny"
|
||||
match_wildcard_for_single_variants = "deny"
|
||||
mismatching_type_param_order = "deny"
|
||||
missing_transmute_annotations = "deny"
|
||||
mut_mut = "deny"
|
||||
needless_bitwise_bool = "deny"
|
||||
needless_character_iteration = "deny"
|
||||
needless_continue = "deny"
|
||||
needless_for_each = "deny"
|
||||
needless_maybe_sized = "deny"
|
||||
needless_raw_string_hashes = "deny"
|
||||
no_effect_underscore_binding = "deny"
|
||||
no_mangle_with_rust_abi = "deny"
|
||||
option_as_ref_cloned = "deny"
|
||||
option_option = "deny"
|
||||
ptr_as_ptr = "deny"
|
||||
ptr_cast_constness = "deny"
|
||||
pub_underscore_fields = "deny"
|
||||
redundant_closure_for_method_calls = "deny"
|
||||
ref_as_ptr = "deny"
|
||||
ref_option_ref = "deny"
|
||||
same_functions_in_if_condition = "deny"
|
||||
semicolon_if_nothing_returned = "deny"
|
||||
trivially_copy_pass_by_ref = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unnecessary_join = "deny"
|
||||
unnested_or_patterns = "deny"
|
||||
unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
used_underscore_binding = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
as_ptr_cast_mut = "deny"
|
||||
clear_with_drain = "deny"
|
||||
collection_is_never_read = "deny"
|
||||
debug_assert_with_mut_call = "deny"
|
||||
derive_partial_eq_without_eq = "deny"
|
||||
empty_line_after_doc_comments = "deny"
|
||||
empty_line_after_outer_attr = "deny"
|
||||
equatable_if_let = "deny"
|
||||
iter_on_empty_collections = "deny"
|
||||
iter_on_single_items = "deny"
|
||||
iter_with_drain = "deny"
|
||||
needless_collect = "deny"
|
||||
needless_pass_by_ref_mut = "deny"
|
||||
negative_feature_names = "deny"
|
||||
non_send_fields_in_send_ty = "deny"
|
||||
nonstandard_macro_braces = "deny"
|
||||
path_buf_push_overwrite = "deny"
|
||||
read_zero_byte_vec = "deny"
|
||||
redundant_clone = "deny"
|
||||
redundant_feature_names = "deny"
|
||||
trailing_empty_array = "deny"
|
||||
trait_duplication_in_bounds = "deny"
|
||||
type_repetition_in_bounds = "deny"
|
||||
uninhabited_references = "deny"
|
||||
unnecessary_struct_initialization = "deny"
|
||||
unused_peekable = "deny"
|
||||
unused_rounding = "deny"
|
||||
use_self = "deny"
|
||||
useless_let_if_seq = "deny"
|
||||
wildcard_dependencies = "deny"
|
||||
unseparated_literal_suffix = "deny"
|
||||
unnecessary_safety_doc = "deny"
|
||||
unnecessary_safety_comment = "deny"
|
||||
unnecessary_self_imports = "deny"
|
||||
string_to_string = "deny"
|
||||
rest_pat_in_fully_bound_structs = "deny"
|
||||
redundant_type_annotations = "deny"
|
||||
infinite_loop = "deny"
|
||||
zero_repeat_side_effects = "deny"
|
||||
non_zero_suggestions = "deny"
|
||||
manual_is_power_of_two = "deny"
|
||||
used_underscore_items = "deny"
|
||||
|
||||
# Warm
|
||||
cast_possible_truncation = "deny"
|
||||
cast_possible_wrap = "deny"
|
||||
cast_precision_loss = "deny"
|
||||
cast_sign_loss = "deny"
|
||||
copy_iterator = "deny"
|
||||
doc_markdown = "deny"
|
||||
explicit_deref_methods = "deny"
|
||||
explicit_iter_loop = "deny"
|
||||
float_cmp = "deny"
|
||||
fn_params_excessive_bools = "deny"
|
||||
into_iter_without_iter = "deny"
|
||||
iter_without_into_iter = "deny"
|
||||
iter_not_returning_iterator = "deny"
|
||||
large_digit_groups = "deny"
|
||||
large_types_passed_by_value = "deny"
|
||||
manual_assert = "deny"
|
||||
maybe_infinite_iter = "deny"
|
||||
missing_fields_in_debug = "deny"
|
||||
needless_pass_by_value = "deny"
|
||||
range_minus_one = "deny"
|
||||
range_plus_one = "deny"
|
||||
redundant_else = "deny"
|
||||
ref_binding_to_reference = "deny"
|
||||
return_self_not_must_use = "deny"
|
||||
single_match_else = "deny"
|
||||
string_add_assign = "deny"
|
||||
transmute_ptr_to_ptr = "deny"
|
||||
unchecked_duration_subtraction = "deny"
|
||||
unnecessary_box_returns = "deny"
|
||||
unnecessary_wraps = "deny"
|
||||
branches_sharing_code = "deny"
|
||||
fallible_impl_from = "deny"
|
||||
missing_const_for_fn = "deny"
|
||||
significant_drop_in_scrutinee = "deny"
|
||||
significant_drop_tightening = "deny"
|
||||
try_err = "deny"
|
||||
lossy_float_literal = "deny"
|
||||
let_underscore_must_use = "deny"
|
||||
iter_over_hash_type = "deny"
|
||||
get_unwrap = "deny"
|
||||
error_impl_error = "deny"
|
||||
empty_structs_with_brackets = "deny"
|
||||
empty_enum_variants_with_brackets = "deny"
|
||||
empty_drop = "deny"
|
||||
clone_on_ref_ptr = "deny"
|
||||
upper_case_acronyms = "deny"
|
||||
allow_attributes = "deny"
|
||||
|
||||
# Hot
|
||||
# inline_always = "deny"
|
||||
# large_futures = "deny"
|
||||
# large_stack_arrays = "deny"
|
||||
# linkedlist = "deny"
|
||||
# missing_errors_doc = "deny"
|
||||
# missing_panics_doc = "deny"
|
||||
# should_panic_without_expect = "deny"
|
||||
# similar_names = "deny"
|
||||
# too_many_lines = "deny"
|
||||
# unreadable_literal = "deny"
|
||||
# wildcard_imports = "deny"
|
||||
# allow_attributes_without_reason = "deny"
|
||||
# missing_assert_message = "deny"
|
||||
# missing_docs_in_private_items = "deny"
|
||||
undocumented_unsafe_blocks = "deny"
|
||||
# multiple_unsafe_ops_per_block = "deny"
|
||||
# single_char_lifetime_names = "deny"
|
||||
# wildcard_enum_match_arm = "deny"
|
||||
|
||||
[workspace.lints.rust]
|
||||
# Cold
|
||||
future_incompatible = { level = "deny", priority = -1 }
|
||||
nonstandard_style = { level = "deny", priority = -1 }
|
||||
absolute_paths_not_starting_with_crate = "deny"
|
||||
explicit_outlives_requirements = "deny"
|
||||
keyword_idents_2018 = "deny"
|
||||
keyword_idents_2024 = "deny"
|
||||
missing_abi = "deny"
|
||||
non_ascii_idents = "deny"
|
||||
non_local_definitions = "deny"
|
||||
redundant_lifetimes = "deny"
|
||||
single_use_lifetimes = "deny"
|
||||
trivial_casts = "deny"
|
||||
trivial_numeric_casts = "deny"
|
||||
unsafe_op_in_unsafe_fn = "deny"
|
||||
unused_crate_dependencies = "deny"
|
||||
unused_import_braces = "deny"
|
||||
unused_lifetimes = "deny"
|
||||
unused_macro_rules = "deny"
|
||||
ambiguous_glob_imports = "deny"
|
||||
unused_unsafe = "deny"
|
||||
|
||||
# Warm
|
||||
let_underscore = { level = "deny", priority = -1 }
|
||||
unreachable_pub = "deny"
|
||||
unused_qualifications = "deny"
|
||||
variant_size_differences = "deny"
|
||||
non_camel_case_types = "deny"
|
||||
|
||||
# Hot
|
||||
# unused_results = "deny"
|
||||
# non_exhaustive_omitted_patterns = "deny"
|
||||
# missing_docs = "deny"
|
||||
# missing_copy_implementations = "deny"
|
||||
|
|
|
@ -49,7 +49,7 @@ Cuprate maintains various documentation books:
|
|||
| [Monero's protocol book](https://monero-book.cuprate.org) | Documents the Monero protocol |
|
||||
| [Cuprate's user book](https://user.cuprate.org) | Practical user-guide for using `cuprated` |
|
||||
|
||||
For crate (library) documentation, see the `Documentation` section in [`CONTRIBUTING.md`](CONTRIBUTING.md).
|
||||
For crate (library) documentation, see: https://doc.cuprate.org. This site holds documentation for Cuprate's crates and all dependencies. All Cuprate crates start with `cuprate_`, for example: [`cuprate_database`](https://doc.cuprate.org/cuprate_database).
|
||||
|
||||
## Contributing
|
||||
|
||||
|
|
|
@ -1 +1,5 @@
|
|||
# TODO
|
||||
# Benches
|
||||
This directory contains Cuprate's benchmarks and benchmarking utilities.
|
||||
|
||||
See the [`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html)
|
||||
to see how to create and run these benchmarks.
|
43
benches/benchmark/bin/Cargo.toml
Normal file
43
benches/benchmark/bin/Cargo.toml
Normal file
|
@ -0,0 +1,43 @@
|
|||
[package]
|
||||
name = "cuprate-benchmark"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
description = "Cuprate's benchmarking binary"
|
||||
license = "MIT"
|
||||
authors = ["hinto-janai"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin"
|
||||
keywords = ["cuprate", "benchmarking", "binary"]
|
||||
|
||||
[features]
|
||||
# All new benchmarks should be added here!
|
||||
all = ["example"]
|
||||
|
||||
# Non-benchmark features.
|
||||
default = []
|
||||
json = []
|
||||
trace = []
|
||||
debug = []
|
||||
warn = []
|
||||
info = []
|
||||
error = []
|
||||
|
||||
# Benchmark features.
|
||||
# New benchmarks should be added here!
|
||||
example = [
|
||||
"dep:cuprate-benchmark-example"
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
cuprate-benchmark-lib = { workspace = true }
|
||||
cuprate-benchmark-example = { workspace = true, optional = true }
|
||||
|
||||
cfg-if = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true, features = ["std"] }
|
||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
27
benches/benchmark/bin/README.md
Normal file
27
benches/benchmark/bin/README.md
Normal file
|
@ -0,0 +1,27 @@
|
|||
## `cuprate-benchmark`
|
||||
This crate links all benchmarks together into a single binary that can be run as: `cuprate-benchmark`.
|
||||
|
||||
`cuprate-benchmark` will run all enabled benchmarks sequentially and print data at the end.
|
||||
|
||||
## Benchmarks
|
||||
Benchmarks are opt-in and enabled via features.
|
||||
|
||||
| Feature | Enables which benchmark crate? |
|
||||
|----------|--------------------------------|
|
||||
| example | cuprate-benchmark-example |
|
||||
| database | cuprate-benchmark-database |
|
||||
|
||||
## Features
|
||||
These are features that aren't for enabling benchmarks, but rather for other things.
|
||||
|
||||
Since `cuprate-benchmark` is built right before it is ran,
|
||||
these features almost act like command line arguments.
|
||||
|
||||
| Features | Does what |
|
||||
|----------|-----------|
|
||||
| json | Prints JSON timings instead of a markdown table
|
||||
| trace | Use the `trace` log-level
|
||||
| debug | Use the `debug` log-level
|
||||
| warn | Use the `warn` log-level
|
||||
| info | Use the `info` log-level (default)
|
||||
| error | Use the `error` log-level
|
29
benches/benchmark/bin/src/log.rs
Normal file
29
benches/benchmark/bin/src/log.rs
Normal file
|
@ -0,0 +1,29 @@
|
|||
use cfg_if::cfg_if;
|
||||
use tracing::{info, instrument, Level};
|
||||
use tracing_subscriber::FmtSubscriber;
|
||||
|
||||
/// Initializes the `tracing` logger.
|
||||
#[instrument]
|
||||
pub(crate) fn init_logger() {
|
||||
const LOG_LEVEL: Level = {
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "trace")] {
|
||||
Level::TRACE
|
||||
} else if #[cfg(feature = "debug")] {
|
||||
Level::DEBUG
|
||||
} else if #[cfg(feature = "warn")] {
|
||||
Level::WARN
|
||||
} else if #[cfg(feature = "info")] {
|
||||
Level::INFO
|
||||
} else if #[cfg(feature = "error")] {
|
||||
Level::ERROR
|
||||
} else {
|
||||
Level::INFO
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
FmtSubscriber::builder().with_max_level(LOG_LEVEL).init();
|
||||
|
||||
info!("Log level: {LOG_LEVEL}");
|
||||
}
|
49
benches/benchmark/bin/src/main.rs
Normal file
49
benches/benchmark/bin/src/main.rs
Normal file
|
@ -0,0 +1,49 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
#![allow(
|
||||
unused_crate_dependencies,
|
||||
reason = "this crate imports many potentially unused dependencies"
|
||||
)]
|
||||
|
||||
mod log;
|
||||
mod print;
|
||||
mod run;
|
||||
mod timings;
|
||||
|
||||
use cfg_if::cfg_if;
|
||||
|
||||
/// What `main()` does:
|
||||
/// 1. Run all enabled benchmarks
|
||||
/// 2. Record benchmark timings
|
||||
/// 3. Print timing data
|
||||
///
|
||||
/// To add a new benchmark to be ran here:
|
||||
/// 1. Copy + paste a `cfg_if` block
|
||||
/// 2. Change it to your benchmark's feature flag
|
||||
/// 3. Change it to your benchmark's type
|
||||
#[allow(
|
||||
clippy::allow_attributes,
|
||||
unused_variables,
|
||||
unused_mut,
|
||||
unreachable_code,
|
||||
reason = "clippy does not account for all cfg()s"
|
||||
)]
|
||||
fn main() {
|
||||
log::init_logger();
|
||||
|
||||
let mut timings = timings::Timings::new();
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(not(any(feature = "example")))] {
|
||||
println!("No feature specified. Use `--features $BENCHMARK_FEATURE` when building.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "example")] {
|
||||
run::run_benchmark::<cuprate_benchmark_example::Example>(&mut timings);
|
||||
}
|
||||
}
|
||||
|
||||
print::print_timings(&timings);
|
||||
}
|
38
benches/benchmark/bin/src/print.rs
Normal file
38
benches/benchmark/bin/src/print.rs
Normal file
|
@ -0,0 +1,38 @@
|
|||
#![expect(dead_code, reason = "code hidden behind feature flags")]
|
||||
|
||||
use cfg_if::cfg_if;
|
||||
|
||||
use crate::timings::Timings;
|
||||
|
||||
/// Print the final the final markdown table of benchmark timings.
|
||||
pub(crate) fn print_timings(timings: &Timings) {
|
||||
println!("\nFinished all benchmarks, printing results:");
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "json")] {
|
||||
print_timings_json(timings);
|
||||
} else {
|
||||
print_timings_markdown(timings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Default timing formatting.
|
||||
pub(crate) fn print_timings_markdown(timings: &Timings) {
|
||||
let mut s = String::new();
|
||||
s.push_str("| Benchmark | Time (seconds) |\n");
|
||||
s.push_str("|------------------------------------|----------------|");
|
||||
|
||||
#[expect(clippy::iter_over_hash_type)]
|
||||
for (k, v) in timings {
|
||||
s += &format!("\n| {k:<34} | {v:<14} |");
|
||||
}
|
||||
|
||||
println!("\n{s}");
|
||||
}
|
||||
|
||||
/// Enabled via `json` feature.
|
||||
pub(crate) fn print_timings_json(timings: &Timings) {
|
||||
let json = serde_json::to_string_pretty(timings).unwrap();
|
||||
println!("\n{json}");
|
||||
}
|
36
benches/benchmark/bin/src/run.rs
Normal file
36
benches/benchmark/bin/src/run.rs
Normal file
|
@ -0,0 +1,36 @@
|
|||
use tracing::{info, instrument, trace};
|
||||
|
||||
use cuprate_benchmark_lib::Benchmark;
|
||||
|
||||
use crate::timings::Timings;
|
||||
|
||||
/// Run a [`Benchmark`] and record its timing.
|
||||
#[instrument(skip_all)]
|
||||
pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) {
|
||||
// Get the benchmark name.
|
||||
let name = B::name();
|
||||
trace!("Running benchmark: {name}");
|
||||
|
||||
// Setup the benchmark input.
|
||||
let input = B::SETUP();
|
||||
|
||||
// Sleep before running the benchmark.
|
||||
trace!("Pre-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
|
||||
std::thread::sleep(B::PRE_SLEEP_DURATION);
|
||||
|
||||
// Run/time the benchmark.
|
||||
let now = std::time::Instant::now();
|
||||
B::MAIN(input);
|
||||
let time = now.elapsed().as_secs_f32();
|
||||
|
||||
// Print the benchmark timings.
|
||||
info!("{name:>34} ... {time}");
|
||||
assert!(
|
||||
timings.insert(name, time).is_none(),
|
||||
"There were 2 benchmarks with the same name - this collides the final output: {name}",
|
||||
);
|
||||
|
||||
// Sleep for a cooldown period after the benchmark run.
|
||||
trace!("Post-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
|
||||
std::thread::sleep(B::POST_SLEEP_DURATION);
|
||||
}
|
5
benches/benchmark/bin/src/timings.rs
Normal file
5
benches/benchmark/bin/src/timings.rs
Normal file
|
@ -0,0 +1,5 @@
|
|||
/// Benchmark timing data.
|
||||
///
|
||||
/// - Key = benchmark name
|
||||
/// - Value = benchmark time in seconds
|
||||
pub(crate) type Timings = std::collections::HashMap<&'static str, f32>;
|
17
benches/benchmark/example/Cargo.toml
Normal file
17
benches/benchmark/example/Cargo.toml
Normal file
|
@ -0,0 +1,17 @@
|
|||
[package]
|
||||
name = "cuprate-benchmark-example"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
description = "Example showcasing Cuprate's benchmarking harness"
|
||||
license = "MIT"
|
||||
authors = ["hinto-janai"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example"
|
||||
keywords = ["cuprate", "benchmarking", "example"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-benchmark-lib = { path = "../lib" }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
3
benches/benchmark/example/README.md
Normal file
3
benches/benchmark/example/README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
## `cuprate-benchmark-example`
|
||||
This crate contains a short example benchmark that shows how to implement and use
|
||||
`cuprate-benchmark-lib` so that it can be ran by `cuprate-benchmark`.
|
42
benches/benchmark/example/src/lib.rs
Normal file
42
benches/benchmark/example/src/lib.rs
Normal file
|
@ -0,0 +1,42 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use std::hint::black_box;
|
||||
|
||||
use cuprate_benchmark_lib::Benchmark;
|
||||
|
||||
/// Marker struct that implements [`Benchmark`]
|
||||
pub struct Example;
|
||||
|
||||
/// The input to our benchmark function.
|
||||
pub type ExampleBenchmarkInput = u64;
|
||||
|
||||
/// The setup function that creates the input.
|
||||
pub const fn example_benchmark_setup() -> ExampleBenchmarkInput {
|
||||
1
|
||||
}
|
||||
|
||||
/// The main benchmarking function.
|
||||
#[expect(clippy::unit_arg)]
|
||||
pub fn example_benchmark_main(input: ExampleBenchmarkInput) {
|
||||
// In this case, we're simply benchmarking the
|
||||
// performance of simple arithmetic on the input data.
|
||||
|
||||
fn math(input: ExampleBenchmarkInput, number: u64) {
|
||||
let x = input;
|
||||
let x = black_box(x * number);
|
||||
let x = black_box(x / number);
|
||||
let x = black_box(x + number);
|
||||
let _ = black_box(x - number);
|
||||
}
|
||||
|
||||
for number in 1..100_000_000 {
|
||||
black_box(math(input, number));
|
||||
}
|
||||
}
|
||||
|
||||
// This implementation will be run by `cuprate-benchmark`.
|
||||
impl Benchmark for Example {
|
||||
type Input = ExampleBenchmarkInput;
|
||||
const SETUP: fn() -> Self::Input = example_benchmark_setup;
|
||||
const MAIN: fn(Self::Input) = example_benchmark_main;
|
||||
}
|
18
benches/benchmark/lib/Cargo.toml
Normal file
18
benches/benchmark/lib/Cargo.toml
Normal file
|
@ -0,0 +1,18 @@
|
|||
[package]
|
||||
name = "cuprate-benchmark-lib"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
description = "Cuprate's benchmarking library"
|
||||
license = "MIT"
|
||||
authors = ["hinto-janai"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib"
|
||||
keywords = ["cuprate", "benchmarking", "library"]
|
||||
|
||||
[features]
|
||||
|
||||
[dependencies]
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
15
benches/benchmark/lib/README.md
Normal file
15
benches/benchmark/lib/README.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
## `cuprate-benchmark-lib`
|
||||
This crate is the glue between
|
||||
[`cuprate-benchmark`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/bin)
|
||||
and all the benchmark crates.
|
||||
|
||||
It defines the [`crate::Benchmark`] trait, which is the behavior of all benchmarks.
|
||||
|
||||
See the [`cuprate-benchmark-example`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/example)
|
||||
crate to see an example implementation of this trait.
|
||||
|
||||
After implementing this trait, a few steps must
|
||||
be done such that the `cuprate-benchmark` binary
|
||||
can actually run your benchmark crate; see the
|
||||
[`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html)
|
||||
to see how to do this.
|
45
benches/benchmark/lib/src/benchmark.rs
Normal file
45
benches/benchmark/lib/src/benchmark.rs
Normal file
|
@ -0,0 +1,45 @@
|
|||
//! Benchmarking trait.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
/// A benchmarking function and its inputs.
|
||||
pub trait Benchmark {
|
||||
/// The benchmark's name.
|
||||
///
|
||||
/// This is automatically implemented
|
||||
/// as the name of the [`Self`] type.
|
||||
//
|
||||
// FIXME: use `const` instead of `fn` when stable
|
||||
// <https://github.com/rust-lang/rust/issues/63084>
|
||||
fn name() -> &'static str {
|
||||
std::any::type_name::<Self>()
|
||||
}
|
||||
|
||||
/// Input to the main benchmarking function.
|
||||
///
|
||||
/// This is passed to [`Self::MAIN`].
|
||||
type Input;
|
||||
|
||||
/// Setup function to generate the input.
|
||||
///
|
||||
/// This function is not timed.
|
||||
const SETUP: fn() -> Self::Input;
|
||||
|
||||
/// The main function to benchmark.
|
||||
///
|
||||
/// The start of the timer begins right before
|
||||
/// this function is called and ends after the
|
||||
/// function returns.
|
||||
const MAIN: fn(Self::Input);
|
||||
|
||||
/// `cuprate-benchmark` will sleep for this [`Duration`] after
|
||||
/// creating the [`Self::Input`], but before starting [`Self::MAIN`].
|
||||
///
|
||||
/// 1 second by default.
|
||||
const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1);
|
||||
|
||||
/// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::MAIN`].
|
||||
///
|
||||
/// 1 second by default.
|
||||
const POST_SLEEP_DURATION: Duration = Duration::from_secs(1);
|
||||
}
|
5
benches/benchmark/lib/src/lib.rs
Normal file
5
benches/benchmark/lib/src/lib.rs
Normal file
|
@ -0,0 +1,5 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
|
||||
mod benchmark;
|
||||
|
||||
pub use benchmark::Benchmark;
|
23
benches/criterion/cuprate-json-rpc/Cargo.toml
Normal file
23
benches/criterion/cuprate-json-rpc/Cargo.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[package]
|
||||
name = "cuprate-criterion-json-rpc"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
description = "Criterion benchmarking for cuprate-json-rpc"
|
||||
license = "MIT"
|
||||
authors = ["hinto-janai"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc"
|
||||
keywords = ["cuprate", "json-rpc", "criterion", "benchmark"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-json-rpc = { workspace = true }
|
||||
|
||||
criterion = { workspace = true }
|
||||
function_name = { workspace = true }
|
||||
serde_json = { workspace = true, features = ["default"] }
|
||||
|
||||
[[bench]]
|
||||
name = "main"
|
||||
harness = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
8
benches/criterion/cuprate-json-rpc/benches/main.rs
Normal file
8
benches/criterion/cuprate-json-rpc/benches/main.rs
Normal file
|
@ -0,0 +1,8 @@
|
|||
//! Benchmarks for `cuprate-json-rpc`.
|
||||
#![allow(unused_crate_dependencies)]
|
||||
|
||||
mod response;
|
||||
|
||||
criterion::criterion_main! {
|
||||
response::serde,
|
||||
}
|
110
benches/criterion/cuprate-json-rpc/benches/response.rs
Normal file
110
benches/criterion/cuprate-json-rpc/benches/response.rs
Normal file
|
@ -0,0 +1,110 @@
|
|||
//! Benchmarks for [`Response`].
|
||||
#![allow(unused_attributes, unused_crate_dependencies)]
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use function_name::named;
|
||||
use serde_json::{from_str, to_string_pretty};
|
||||
|
||||
use cuprate_json_rpc::{Id, Response};
|
||||
|
||||
// `serde` benchmarks on `Response`.
|
||||
//
|
||||
// These are benchmarked as `Response` has a custom serde implementation.
|
||||
criterion_group! {
|
||||
name = serde;
|
||||
config = Criterion::default();
|
||||
targets =
|
||||
response_from_str_u8,
|
||||
response_from_str_u64,
|
||||
response_from_str_string_5_len,
|
||||
response_from_str_string_10_len,
|
||||
response_from_str_string_100_len,
|
||||
response_from_str_string_500_len,
|
||||
response_to_string_pretty_u8,
|
||||
response_to_string_pretty_u64,
|
||||
response_to_string_pretty_string_5_len,
|
||||
response_to_string_pretty_string_10_len,
|
||||
response_to_string_pretty_string_100_len,
|
||||
response_to_string_pretty_string_500_len,
|
||||
response_from_str_bad_field_1,
|
||||
response_from_str_bad_field_5,
|
||||
response_from_str_bad_field_10,
|
||||
response_from_str_bad_field_100,
|
||||
response_from_str_missing_field,
|
||||
}
|
||||
criterion_main!(serde);
|
||||
|
||||
/// Generate `from_str` deserialization benchmark functions for [`Response`].
|
||||
macro_rules! impl_from_str_benchmark {
|
||||
(
|
||||
$(
|
||||
$fn_name:ident => $request_type:ty => $request_string:literal,
|
||||
)*
|
||||
) => {
|
||||
$(
|
||||
#[named]
|
||||
fn $fn_name(c: &mut Criterion) {
|
||||
let request_string = $request_string;
|
||||
|
||||
c.bench_function(function_name!(), |b| {
|
||||
b.iter(|| {
|
||||
let _r = from_str::<Response<$request_type>>(
|
||||
black_box(request_string)
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
impl_from_str_benchmark! {
|
||||
response_from_str_u8 => u8 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#,
|
||||
response_from_str_u64 => u64 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#,
|
||||
response_from_str_string_5_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hello"}"#,
|
||||
response_from_str_string_10_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hellohello"}"#,
|
||||
response_from_str_string_100_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#,
|
||||
response_from_str_string_500_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#,
|
||||
|
||||
// The custom serde currently looks at all fields.
|
||||
// These are for testing the performance if the serde
|
||||
// has to parse through a bunch of unrelated fields.
|
||||
response_from_str_bad_field_1 => u8 => r#"{"bad_field":0,"jsonrpc":"2.0","id":123,"result":0}"#,
|
||||
response_from_str_bad_field_5 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"jsonrpc":"2.0","id":123,"result":0}"#,
|
||||
response_from_str_bad_field_10 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"bad_field_6":0,"bad_field_7":0,"bad_field_8":0,"bad_field_9":0,"bad_field_10":0,"jsonrpc":"2.0","id":123,"result":0}"#,
|
||||
response_from_str_bad_field_100 => u8 => r#"{"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0,"81":0,"82":0,"83":0,"84":0,"85":0,"86":0,"87":0,"88":0,"89":0,"90":0,"91":0,"92":0,"93":0,"94":0,"95":0,"96":0,"97":0,"98":0,"99":0,"100":0,"jsonrpc":"2.0","id":123,"result":0}"#,
|
||||
|
||||
// These are missing the `jsonrpc` field.
|
||||
response_from_str_missing_field => u8 => r#"{"id":123,"result":0}"#,
|
||||
}
|
||||
|
||||
/// Generate `to_string_pretty` serialization benchmark functions for [`Response`].
|
||||
macro_rules! impl_to_string_pretty_benchmark {
|
||||
(
|
||||
$(
|
||||
$fn_name:ident => $request_constructor:expr_2021,
|
||||
)*
|
||||
) => {
|
||||
$(
|
||||
#[named]
|
||||
fn $fn_name(c: &mut Criterion) {
|
||||
let request = $request_constructor;
|
||||
|
||||
c.bench_function(function_name!(), |b| {
|
||||
b.iter(|| {
|
||||
let _s = to_string_pretty(black_box(&request)).unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
impl_to_string_pretty_benchmark! {
|
||||
response_to_string_pretty_u8 => Response::<u8>::ok(Id::Null, 0),
|
||||
response_to_string_pretty_u64 => Response::<u64>::ok(Id::Null, 0),
|
||||
response_to_string_pretty_string_5_len => Response::ok(Id::Null, String::from("hello")),
|
||||
response_to_string_pretty_string_10_len => Response::ok(Id::Null, String::from("hellohello")),
|
||||
response_to_string_pretty_string_100_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")),
|
||||
response_to_string_pretty_string_500_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")),
|
||||
}
|
2
benches/criterion/cuprate-json-rpc/src/lib.rs
Normal file
2
benches/criterion/cuprate-json-rpc/src/lib.rs
Normal file
|
@ -0,0 +1,2 @@
|
|||
//! Benchmark lib for `cuprate-json-rpc`.
|
||||
#![allow(unused_crate_dependencies, reason = "used in benchmarks")]
|
21
benches/criterion/example/Cargo.toml
Normal file
21
benches/criterion/example/Cargo.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[package]
|
||||
name = "cuprate-criterion-example"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
description = "Criterion benchmarking example for Cuprate"
|
||||
license = "MIT"
|
||||
authors = ["hinto-janai"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example"
|
||||
keywords = ["cuprate", "criterion", "benchmark", "example"]
|
||||
|
||||
[dependencies]
|
||||
criterion = { workspace = true }
|
||||
function_name = { workspace = true }
|
||||
serde_json = { workspace = true, features = ["default"] }
|
||||
|
||||
[[bench]]
|
||||
name = "main"
|
||||
harness = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
14
benches/criterion/example/README.md
Normal file
14
benches/criterion/example/README.md
Normal file
|
@ -0,0 +1,14 @@
|
|||
## `cuprate-criterion-example`
|
||||
An example of using Criterion for benchmarking Cuprate crates.
|
||||
|
||||
Consider copy+pasting this crate to use as a base when creating new Criterion benchmark crates.
|
||||
|
||||
## `src/`
|
||||
Benchmark crates have a `benches/` ran by `cargo bench`, but they are also crates themselves,
|
||||
as in, they have a `src` folder that `benches/` can pull code from.
|
||||
|
||||
The `src` directories in these benchmarking crates are usually filled with
|
||||
helper functions, types, etc, that are used repeatedly in the benchmarks.
|
||||
|
||||
## `benches/`
|
||||
These are the actual benchmarks ran by `cargo bench`.
|
48
benches/criterion/example/benches/example.rs
Normal file
48
benches/criterion/example/benches/example.rs
Normal file
|
@ -0,0 +1,48 @@
|
|||
//! Benchmarks.
|
||||
#![allow(unused_attributes, unused_crate_dependencies)]
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||
use function_name::named;
|
||||
|
||||
use cuprate_criterion_example::SomeHardToCreateObject;
|
||||
|
||||
// This is how you register criterion benchmarks.
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default();
|
||||
targets = benchmark_1, benchmark_range,
|
||||
}
|
||||
criterion_main!(benches);
|
||||
|
||||
/// Benchmark a single input.
|
||||
///
|
||||
/// <https://bheisler.github.io/criterion.rs/book/user_guide/benchmarking_with_inputs.html#benchmarking-with-one-input>
|
||||
#[named]
|
||||
fn benchmark_1(c: &mut Criterion) {
|
||||
// It is recommended to use `function_name!()` as a benchmark
|
||||
// identifier instead of manually re-typing the function name.
|
||||
c.bench_function(function_name!(), |b| {
|
||||
b.iter(|| {
|
||||
black_box(SomeHardToCreateObject::from(1));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/// Benchmark a range of inputs.
|
||||
///
|
||||
/// <https://bheisler.github.io/criterion.rs/book/user_guide/benchmarking_with_inputs.html#benchmarking-with-a-range-of-values>
|
||||
#[named]
|
||||
fn benchmark_range(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group(function_name!());
|
||||
|
||||
for i in 0..4 {
|
||||
group.throughput(Throughput::Elements(i));
|
||||
group.bench_with_input(BenchmarkId::from_parameter(i), &i, |b, &i| {
|
||||
b.iter(|| {
|
||||
black_box(SomeHardToCreateObject::from(i));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
10
benches/criterion/example/benches/main.rs
Normal file
10
benches/criterion/example/benches/main.rs
Normal file
|
@ -0,0 +1,10 @@
|
|||
//! Benchmarks examples.
|
||||
#![allow(unused_crate_dependencies)]
|
||||
|
||||
// All modules within `benches/` are `mod`ed here.
|
||||
mod example;
|
||||
|
||||
// And all the Criterion benchmarks are registered like so:
|
||||
criterion::criterion_main! {
|
||||
example::benches,
|
||||
}
|
13
benches/criterion/example/src/lib.rs
Normal file
13
benches/criterion/example/src/lib.rs
Normal file
|
@ -0,0 +1,13 @@
|
|||
#![doc = include_str!("../README.md")] // See the README for crate documentation.
|
||||
#![allow(unused_crate_dependencies, reason = "used in benchmarks")]
|
||||
|
||||
/// Shared type that all benchmarks can use.
|
||||
#[expect(dead_code)]
|
||||
pub struct SomeHardToCreateObject(u64);
|
||||
|
||||
impl From<u64> for SomeHardToCreateObject {
|
||||
/// Shared function that all benchmarks can use.
|
||||
fn from(value: u64) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
80
binaries/cuprated/Cargo.toml
Normal file
80
binaries/cuprated/Cargo.toml
Normal file
|
@ -0,0 +1,80 @@
|
|||
[package]
|
||||
name = "cuprated"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
description = "The Cuprate Rust Monero node."
|
||||
license = "AGPL-3.0-only"
|
||||
authors = ["Boog900", "hinto-janai", "SyntheticBird45"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
|
||||
|
||||
[dependencies]
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
cuprate-consensus = { workspace = true }
|
||||
cuprate-fast-sync = { workspace = true }
|
||||
cuprate-consensus-context = { workspace = true }
|
||||
cuprate-consensus-rules = { workspace = true }
|
||||
cuprate-cryptonight = { workspace = true }
|
||||
cuprate-helper = { workspace = true, features = ["serde"] }
|
||||
cuprate-epee-encoding = { workspace = true }
|
||||
cuprate-fixed-bytes = { workspace = true }
|
||||
cuprate-levin = { workspace = true }
|
||||
cuprate-wire = { workspace = true }
|
||||
cuprate-p2p = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true }
|
||||
cuprate-dandelion-tower = { workspace = true, features = ["txpool"] }
|
||||
cuprate-async-buffer = { workspace = true }
|
||||
cuprate-address-book = { workspace = true }
|
||||
cuprate-blockchain = { workspace = true }
|
||||
cuprate-database-service = { workspace = true, features = ["serde"] }
|
||||
cuprate-txpool = { workspace = true }
|
||||
cuprate-database = { workspace = true, features = ["serde"] }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-test-utils = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
cuprate-json-rpc = { workspace = true }
|
||||
cuprate-rpc-interface = { workspace = true }
|
||||
cuprate-rpc-types = { workspace = true }
|
||||
|
||||
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
borsh = { workspace = true }
|
||||
bytemuck = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
cfg-if = { workspace = true }
|
||||
clap = { workspace = true, features = ["cargo", "help", "wrap_help"] }
|
||||
chrono = { workspace = true }
|
||||
crypto-bigint = { workspace = true }
|
||||
crossbeam = { workspace = true }
|
||||
curve25519-dalek = { workspace = true }
|
||||
const_format = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
hex-literal = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
monero-serai = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
pin-project = { workspace = true }
|
||||
randomx-rs = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
rand_distr = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
serde_bytes = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
thread_local = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
toml = { workspace = true, features = ["parse", "display"]}
|
||||
tower = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] }
|
||||
tracing = { workspace = true, features = ["default"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
67
binaries/cuprated/Cuprated.toml
Normal file
67
binaries/cuprated/Cuprated.toml
Normal file
|
@ -0,0 +1,67 @@
|
|||
# ____ _
|
||||
# / ___| _ _ __ _ __ __ _| |_ ___
|
||||
# | | | | | | '_ \| '__/ _` | __/ _ \
|
||||
# | |__| |_| | |_) | | | (_| | || __/
|
||||
# \____\__,_| .__/|_| \__,_|\__\___|
|
||||
# |_|
|
||||
#
|
||||
|
||||
## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet".
|
||||
network = "Mainnet"
|
||||
|
||||
## Tracing config.
|
||||
[tracing]
|
||||
## The minimum level for log events to be displayed.
|
||||
level = "info"
|
||||
|
||||
## Clear-net config.
|
||||
[p2p.clear_net]
|
||||
## The number of outbound connections we should make and maintain.
|
||||
outbound_connections = 64
|
||||
## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing.
|
||||
extra_outbound_connections = 8
|
||||
## The maximum number of incoming we should allow.
|
||||
max_inbound_connections = 128
|
||||
## The percent of outbound connections that should be to nodes we have not connected to before.
|
||||
gray_peers_percent = 0.7
|
||||
## The port to accept connections on, if left `0` no connections will be accepted.
|
||||
p2p_port = 0
|
||||
## The IP address to listen to connections on.
|
||||
listen_on = "0.0.0.0"
|
||||
|
||||
## The Clear-net addressbook config.
|
||||
[p2p.clear_net.address_book_config]
|
||||
## The size of the white peer list, which contains peers we have made a connection to before.
|
||||
max_white_list_length = 1_000
|
||||
## The size of the gray peer list, which contains peers we have not made a connection to before.
|
||||
max_gray_list_length = 5_000
|
||||
## The amount of time between address book saves.
|
||||
peer_save_period = { secs = 90, nanos = 0 }
|
||||
|
||||
## The block downloader config.
|
||||
[p2p.block_downloader]
|
||||
## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes).
|
||||
buffer_bytes = 50_000_000
|
||||
## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes).
|
||||
in_progress_queue_bytes = 50_000_000
|
||||
## The target size of a batch of blocks (bytes), must not exceed 100MB.
|
||||
target_batch_bytes= 5_000_000
|
||||
## The amount of time between checking the pool of connected peers for free peers to download blocks.
|
||||
check_client_pool_interval = { secs = 30, nanos = 0 }
|
||||
|
||||
## Storage config
|
||||
[storage]
|
||||
## The amount of reader threads to spawn.
|
||||
reader_threads = "OnePerThread"
|
||||
|
||||
## Txpool storage config.
|
||||
[storage.txpool]
|
||||
## The database sync mode for the txpool.
|
||||
sync_mode = "Async"
|
||||
## The maximum size of all the txs in the pool (bytes).
|
||||
max_txpool_byte_size = 100_000_000
|
||||
|
||||
## Blockchain storage config.
|
||||
[storage.blockchain]
|
||||
## The database sync mode for the blockchain.
|
||||
sync_mode = "Async"
|
2
binaries/cuprated/README.md
Normal file
2
binaries/cuprated/README.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# `cuprated`
|
||||
TODO
|
101
binaries/cuprated/src/blockchain.rs
Normal file
101
binaries/cuprated/src/blockchain.rs
Normal file
|
@ -0,0 +1,101 @@
|
|||
//! Blockchain
|
||||
//!
|
||||
//! Contains the blockchain manager, syncer and an interface to mutate the blockchain.
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::FutureExt;
|
||||
use tokio::sync::{mpsc, Notify};
|
||||
use tower::{BoxError, Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::{generate_genesis_block, BlockChainContextService, ContextConfig};
|
||||
use cuprate_cryptonight::cryptonight_hash_v0;
|
||||
use cuprate_p2p::{block_downloader::BlockDownloaderConfig, NetworkInterface};
|
||||
use cuprate_p2p_core::{ClearNet, Network};
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainWriteRequest},
|
||||
VerifiedBlockInformation,
|
||||
};
|
||||
|
||||
use crate::constants::PANIC_CRITICAL_SERVICE_ERROR;
|
||||
|
||||
mod chain_service;
|
||||
pub mod interface;
|
||||
mod manager;
|
||||
mod syncer;
|
||||
mod types;
|
||||
|
||||
pub use types::{
|
||||
ConcreteBlockVerifierService, ConcreteTxVerifierService, ConsensusBlockchainReadHandle,
|
||||
};
|
||||
|
||||
/// Checks if the genesis block is in the blockchain and adds it if not.
|
||||
pub async fn check_add_genesis(
|
||||
blockchain_read_handle: &mut BlockchainReadHandle,
|
||||
blockchain_write_handle: &mut BlockchainWriteHandle,
|
||||
network: Network,
|
||||
) {
|
||||
// Try to get the chain height, will fail if the genesis block is not in the DB.
|
||||
if blockchain_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainReadRequest::ChainHeight)
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let genesis = generate_genesis_block(network);
|
||||
|
||||
assert_eq!(genesis.miner_transaction.prefix().outputs.len(), 1);
|
||||
assert!(genesis.transactions.is_empty());
|
||||
|
||||
blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::WriteBlock(
|
||||
VerifiedBlockInformation {
|
||||
block_blob: genesis.serialize(),
|
||||
txs: vec![],
|
||||
block_hash: genesis.hash(),
|
||||
pow_hash: cryptonight_hash_v0(&genesis.serialize_pow_hash()),
|
||||
height: 0,
|
||||
generated_coins: genesis.miner_transaction.prefix().outputs[0]
|
||||
.amount
|
||||
.unwrap(),
|
||||
weight: genesis.miner_transaction.weight(),
|
||||
long_term_weight: genesis.miner_transaction.weight(),
|
||||
cumulative_difficulty: 1,
|
||||
block: genesis,
|
||||
},
|
||||
))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
|
||||
/// Initializes the consensus services.
|
||||
pub async fn init_consensus(
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
context_config: ContextConfig,
|
||||
) -> Result<
|
||||
(
|
||||
ConcreteBlockVerifierService,
|
||||
ConcreteTxVerifierService,
|
||||
BlockChainContextService,
|
||||
),
|
||||
BoxError,
|
||||
> {
|
||||
let read_handle = ConsensusBlockchainReadHandle::new(blockchain_read_handle, BoxError::from);
|
||||
|
||||
let ctx_service =
|
||||
cuprate_consensus::initialize_blockchain_context(context_config, read_handle.clone())
|
||||
.await?;
|
||||
|
||||
let (block_verifier_svc, tx_verifier_svc) =
|
||||
cuprate_consensus::initialize_verifier(read_handle, ctx_service.clone());
|
||||
|
||||
Ok((block_verifier_svc, tx_verifier_svc, ctx_service))
|
||||
}
|
72
binaries/cuprated/src/blockchain/chain_service.rs
Normal file
72
binaries/cuprated/src/blockchain/chain_service.rs
Normal file
|
@ -0,0 +1,72 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::{future::BoxFuture, FutureExt, TryFutureExt};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_p2p::block_downloader::{ChainSvcRequest, ChainSvcResponse};
|
||||
use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse};
|
||||
|
||||
/// That service that allows retrieving the chain state to give to the P2P crates, so we can figure out
|
||||
/// what blocks we need.
|
||||
///
|
||||
/// This has a more minimal interface than [`BlockchainReadRequest`] to make using the p2p crates easier.
|
||||
#[derive(Clone)]
|
||||
pub struct ChainService(pub BlockchainReadHandle);
|
||||
|
||||
impl Service<ChainSvcRequest> for ChainService {
|
||||
type Response = ChainSvcResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: ChainSvcRequest) -> Self::Future {
|
||||
let map_res = |res: BlockchainResponse| match res {
|
||||
BlockchainResponse::CompactChainHistory {
|
||||
block_ids,
|
||||
cumulative_difficulty,
|
||||
} => ChainSvcResponse::CompactHistory {
|
||||
block_ids,
|
||||
cumulative_difficulty,
|
||||
},
|
||||
BlockchainResponse::FindFirstUnknown(res) => ChainSvcResponse::FindFirstUnknown(res),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
match req {
|
||||
ChainSvcRequest::CompactHistory => self
|
||||
.0
|
||||
.call(BlockchainReadRequest::CompactChainHistory)
|
||||
.map_ok(map_res)
|
||||
.map_err(Into::into)
|
||||
.boxed(),
|
||||
ChainSvcRequest::FindFirstUnknown(req) => self
|
||||
.0
|
||||
.call(BlockchainReadRequest::FindFirstUnknown(req))
|
||||
.map_ok(map_res)
|
||||
.map_err(Into::into)
|
||||
.boxed(),
|
||||
ChainSvcRequest::CumulativeDifficulty => self
|
||||
.0
|
||||
.call(BlockchainReadRequest::CompactChainHistory)
|
||||
.map_ok(|res| {
|
||||
// TODO create a custom request instead of hijacking this one.
|
||||
// TODO: use the context cache.
|
||||
let BlockchainResponse::CompactChainHistory {
|
||||
cumulative_difficulty,
|
||||
..
|
||||
} = res
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
ChainSvcResponse::CumulativeDifficulty(cumulative_difficulty)
|
||||
})
|
||||
.map_err(Into::into)
|
||||
.boxed(),
|
||||
}
|
||||
}
|
||||
}
|
188
binaries/cuprated/src/blockchain/interface.rs
Normal file
188
binaries/cuprated/src/blockchain/interface.rs
Normal file
|
@ -0,0 +1,188 @@
|
|||
//! The blockchain manager interface.
|
||||
//!
|
||||
//! This module contains all the functions to mutate the blockchain's state in any way, through the
|
||||
//! blockchain manager.
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{LazyLock, Mutex, OnceLock},
|
||||
};
|
||||
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::transactions::new_tx_verification_data;
|
||||
use cuprate_txpool::service::{
|
||||
interface::{TxpoolReadRequest, TxpoolReadResponse},
|
||||
TxpoolReadHandle,
|
||||
};
|
||||
use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse};
|
||||
|
||||
use crate::{
|
||||
blockchain::manager::{BlockchainManagerCommand, IncomingBlockOk},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
};
|
||||
|
||||
/// The channel used to send [`BlockchainManagerCommand`]s to the blockchain manager.
|
||||
///
|
||||
/// This channel is initialized in [`init_blockchain_manager`](super::manager::init_blockchain_manager), the functions
|
||||
/// in this file document what happens if this is not initialized when they are called.
|
||||
pub(super) static COMMAND_TX: OnceLock<mpsc::Sender<BlockchainManagerCommand>> = OnceLock::new();
|
||||
|
||||
/// An error that can be returned from [`handle_incoming_block`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum IncomingBlockError {
|
||||
/// Some transactions in the block were unknown.
|
||||
///
|
||||
/// The inner values are the block hash and the indexes of the missing txs in the block.
|
||||
#[error("Unknown transactions in block.")]
|
||||
UnknownTransactions([u8; 32], Vec<usize>),
|
||||
/// We are missing the block's parent.
|
||||
#[error("The block has an unknown parent.")]
|
||||
Orphan,
|
||||
/// The block was invalid.
|
||||
#[error(transparent)]
|
||||
InvalidBlock(anyhow::Error),
|
||||
}
|
||||
|
||||
/// Try to add a new block to the blockchain.
|
||||
///
|
||||
/// On success returns [`IncomingBlockOk`].
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an error if:
|
||||
/// - the block was invalid
|
||||
/// - we are missing transactions
|
||||
/// - the block's parent is unknown
|
||||
pub async fn handle_incoming_block(
|
||||
block: Block,
|
||||
mut given_txs: HashMap<[u8; 32], Transaction>,
|
||||
blockchain_read_handle: &mut BlockchainReadHandle,
|
||||
txpool_read_handle: &mut TxpoolReadHandle,
|
||||
) -> Result<IncomingBlockOk, IncomingBlockError> {
|
||||
/// A [`HashSet`] of block hashes that the blockchain manager is currently handling.
|
||||
///
|
||||
/// This lock prevents sending the same block to the blockchain manager from multiple connections
|
||||
/// before one of them actually gets added to the chain, allowing peers to do other things.
|
||||
///
|
||||
/// This is used over something like a dashmap as we expect a lot of collisions in a short amount of
|
||||
/// time for new blocks, so we would lose the benefit of sharded locks. A dashmap is made up of `RwLocks`
|
||||
/// which are also more expensive than `Mutex`s.
|
||||
static BLOCKS_BEING_HANDLED: LazyLock<Mutex<HashSet<[u8; 32]>>> =
|
||||
LazyLock::new(|| Mutex::new(HashSet::new()));
|
||||
|
||||
if given_txs.len() > block.transactions.len() {
|
||||
return Err(IncomingBlockError::InvalidBlock(anyhow::anyhow!(
|
||||
"Too many transactions given for block"
|
||||
)));
|
||||
}
|
||||
|
||||
if !block_exists(block.header.previous, blockchain_read_handle)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
{
|
||||
return Err(IncomingBlockError::Orphan);
|
||||
}
|
||||
|
||||
let block_hash = block.hash();
|
||||
|
||||
if block_exists(block_hash, blockchain_read_handle)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
{
|
||||
return Ok(IncomingBlockOk::AlreadyHave);
|
||||
}
|
||||
|
||||
let TxpoolReadResponse::TxsForBlock { mut txs, missing } = txpool_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolReadRequest::TxsForBlock(block.transactions.clone()))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
if !missing.is_empty() {
|
||||
let needed_hashes = missing.iter().map(|index| block.transactions[*index]);
|
||||
|
||||
for needed_hash in needed_hashes {
|
||||
let Some(tx) = given_txs.remove(&needed_hash) else {
|
||||
// We return back the indexes of all txs missing from our pool, not taking into account the txs
|
||||
// that were given with the block, as these txs will be dropped. It is not worth it to try to add
|
||||
// these txs to the pool as this will only happen with a misbehaving peer or if the txpool reaches
|
||||
// the size limit.
|
||||
return Err(IncomingBlockError::UnknownTransactions(block_hash, missing));
|
||||
};
|
||||
|
||||
txs.insert(
|
||||
needed_hash,
|
||||
new_tx_verification_data(tx)
|
||||
.map_err(|e| IncomingBlockError::InvalidBlock(e.into()))?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let Some(incoming_block_tx) = COMMAND_TX.get() else {
|
||||
// We could still be starting up the blockchain manager.
|
||||
return Ok(IncomingBlockOk::NotReady);
|
||||
};
|
||||
|
||||
// Add the blocks hash to the blocks being handled.
|
||||
if !BLOCKS_BEING_HANDLED.lock().unwrap().insert(block_hash) {
|
||||
// If another place is already adding this block then we can stop.
|
||||
return Ok(IncomingBlockOk::AlreadyHave);
|
||||
}
|
||||
|
||||
// We must remove the block hash from `BLOCKS_BEING_HANDLED`.
|
||||
let _guard = {
|
||||
struct RemoveFromBlocksBeingHandled {
|
||||
block_hash: [u8; 32],
|
||||
}
|
||||
impl Drop for RemoveFromBlocksBeingHandled {
|
||||
fn drop(&mut self) {
|
||||
BLOCKS_BEING_HANDLED
|
||||
.lock()
|
||||
.unwrap()
|
||||
.remove(&self.block_hash);
|
||||
}
|
||||
}
|
||||
RemoveFromBlocksBeingHandled { block_hash }
|
||||
};
|
||||
|
||||
let (response_tx, response_rx) = oneshot::channel();
|
||||
|
||||
incoming_block_tx
|
||||
.send(BlockchainManagerCommand::AddBlock {
|
||||
block,
|
||||
prepped_txs: txs,
|
||||
response_tx,
|
||||
})
|
||||
.await
|
||||
.expect("TODO: don't actually panic here, an err means we are shutting down");
|
||||
|
||||
response_rx
|
||||
.await
|
||||
.expect("The blockchain manager will always respond")
|
||||
.map_err(IncomingBlockError::InvalidBlock)
|
||||
}
|
||||
|
||||
/// Check if we have a block with the given hash.
|
||||
async fn block_exists(
|
||||
block_hash: [u8; 32],
|
||||
blockchain_read_handle: &mut BlockchainReadHandle,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
let BlockchainResponse::FindBlock(chain) = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::FindBlock(block_hash))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(chain.is_some())
|
||||
}
|
149
binaries/cuprated/src/blockchain/manager.rs
Normal file
149
binaries/cuprated/src/blockchain/manager.rs
Normal file
|
@ -0,0 +1,149 @@
|
|||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use futures::StreamExt;
|
||||
use monero_serai::block::Block;
|
||||
use tokio::sync::{mpsc, oneshot, Notify};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::error;
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::{
|
||||
BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService,
|
||||
BlockVerifierService, ExtendedConsensusError, TxVerifierService, VerifyBlockRequest,
|
||||
VerifyBlockResponse, VerifyTxRequest, VerifyTxResponse,
|
||||
};
|
||||
use cuprate_consensus_context::RawBlockChainContext;
|
||||
use cuprate_p2p::{
|
||||
block_downloader::{BlockBatch, BlockDownloaderConfig},
|
||||
BroadcastSvc, NetworkInterface,
|
||||
};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::TxpoolWriteHandle;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, TransactionVerificationData,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
blockchain::{
|
||||
chain_service::ChainService,
|
||||
interface::COMMAND_TX,
|
||||
syncer,
|
||||
types::{ConcreteBlockVerifierService, ConsensusBlockchainReadHandle},
|
||||
},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
};
|
||||
|
||||
mod commands;
|
||||
mod handler;
|
||||
|
||||
pub use commands::{BlockchainManagerCommand, IncomingBlockOk};
|
||||
|
||||
/// Initialize the blockchain manager.
|
||||
///
|
||||
/// This function sets up the [`BlockchainManager`] and the [`syncer`] so that the functions in [`interface`](super::interface)
|
||||
/// can be called.
|
||||
pub async fn init_blockchain_manager(
|
||||
clearnet_interface: NetworkInterface<ClearNet>,
|
||||
blockchain_write_handle: BlockchainWriteHandle,
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
txpool_write_handle: TxpoolWriteHandle,
|
||||
mut blockchain_context_service: BlockChainContextService,
|
||||
block_verifier_service: ConcreteBlockVerifierService,
|
||||
block_downloader_config: BlockDownloaderConfig,
|
||||
) {
|
||||
// TODO: find good values for these size limits
|
||||
let (batch_tx, batch_rx) = mpsc::channel(1);
|
||||
let stop_current_block_downloader = Arc::new(Notify::new());
|
||||
let (command_tx, command_rx) = mpsc::channel(3);
|
||||
|
||||
COMMAND_TX.set(command_tx).unwrap();
|
||||
|
||||
tokio::spawn(syncer::syncer(
|
||||
blockchain_context_service.clone(),
|
||||
ChainService(blockchain_read_handle.clone()),
|
||||
clearnet_interface.clone(),
|
||||
batch_tx,
|
||||
Arc::clone(&stop_current_block_downloader),
|
||||
block_downloader_config,
|
||||
));
|
||||
|
||||
let BlockChainContextResponse::Context(blockchain_context) = blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let manager = BlockchainManager {
|
||||
blockchain_write_handle,
|
||||
blockchain_read_handle,
|
||||
txpool_write_handle,
|
||||
blockchain_context_service,
|
||||
cached_blockchain_context: blockchain_context.unchecked_blockchain_context().clone(),
|
||||
block_verifier_service,
|
||||
stop_current_block_downloader,
|
||||
broadcast_svc: clearnet_interface.broadcast_svc(),
|
||||
};
|
||||
|
||||
tokio::spawn(manager.run(batch_rx, command_rx));
|
||||
}
|
||||
|
||||
/// The blockchain manager.
|
||||
///
|
||||
/// This handles all mutation of the blockchain, anything that changes the state of the blockchain must
|
||||
/// go through this.
|
||||
///
|
||||
/// Other parts of Cuprate can interface with this by using the functions in [`interface`](super::interface).
|
||||
pub struct BlockchainManager {
|
||||
/// The [`BlockchainWriteHandle`], this is the _only_ part of Cuprate where a [`BlockchainWriteHandle`]
|
||||
/// is held.
|
||||
blockchain_write_handle: BlockchainWriteHandle,
|
||||
/// A [`BlockchainReadHandle`].
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
/// A [`TxpoolWriteHandle`].
|
||||
txpool_write_handle: TxpoolWriteHandle,
|
||||
// TODO: Improve the API of the cache service.
|
||||
// TODO: rename the cache service -> `BlockchainContextService`.
|
||||
/// The blockchain context cache, this caches the current state of the blockchain to quickly calculate/retrieve
|
||||
/// values without needing to go to a [`BlockchainReadHandle`].
|
||||
blockchain_context_service: BlockChainContextService,
|
||||
/// A cached context representing the current state.
|
||||
cached_blockchain_context: RawBlockChainContext,
|
||||
/// The block verifier service, to verify incoming blocks.
|
||||
block_verifier_service: ConcreteBlockVerifierService,
|
||||
/// A [`Notify`] to tell the [syncer](syncer::syncer) that we want to cancel this current download
|
||||
/// attempt.
|
||||
stop_current_block_downloader: Arc<Notify>,
|
||||
/// The broadcast service, to broadcast new blocks.
|
||||
broadcast_svc: BroadcastSvc<ClearNet>,
|
||||
}
|
||||
|
||||
impl BlockchainManager {
|
||||
/// The [`BlockchainManager`] task.
|
||||
pub async fn run(
|
||||
mut self,
|
||||
mut block_batch_rx: mpsc::Receiver<BlockBatch>,
|
||||
mut command_rx: mpsc::Receiver<BlockchainManagerCommand>,
|
||||
) {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(batch) = block_batch_rx.recv() => {
|
||||
self.handle_incoming_block_batch(
|
||||
batch,
|
||||
).await;
|
||||
}
|
||||
Some(incoming_command) = command_rx.recv() => {
|
||||
self.handle_command(incoming_command).await;
|
||||
}
|
||||
else => {
|
||||
todo!("TODO: exit the BC manager")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
32
binaries/cuprated/src/blockchain/manager/commands.rs
Normal file
32
binaries/cuprated/src/blockchain/manager/commands.rs
Normal file
|
@ -0,0 +1,32 @@
|
|||
//! This module contains the commands for the blockchain manager.
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monero_serai::block::Block;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use cuprate_types::TransactionVerificationData;
|
||||
|
||||
/// The blockchain manager commands.
|
||||
pub enum BlockchainManagerCommand {
|
||||
/// Attempt to add a new block to the blockchain.
|
||||
AddBlock {
|
||||
/// The [`Block`] to add.
|
||||
block: Block,
|
||||
/// All the transactions defined in [`Block::transactions`].
|
||||
prepped_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
/// The channel to send the response down.
|
||||
response_tx: oneshot::Sender<Result<IncomingBlockOk, anyhow::Error>>,
|
||||
},
|
||||
}
|
||||
|
||||
/// The [`Ok`] response for an incoming block.
|
||||
pub enum IncomingBlockOk {
|
||||
/// The block was added to the main-chain.
|
||||
AddedToMainChain,
|
||||
/// The blockchain manager is not ready yet.
|
||||
NotReady,
|
||||
/// The block was added to an alt-chain.
|
||||
AddedToAltChain,
|
||||
/// We already have the block.
|
||||
AlreadyHave,
|
||||
}
|
505
binaries/cuprated/src/blockchain/manager/handler.rs
Normal file
505
binaries/cuprated/src/blockchain/manager/handler.rs
Normal file
|
@ -0,0 +1,505 @@
|
|||
//! The blockchain manager handler functions.
|
||||
use bytes::Bytes;
|
||||
use futures::{TryFutureExt, TryStreamExt};
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
transaction::{Input, Transaction},
|
||||
};
|
||||
use rayon::prelude::*;
|
||||
use std::ops::ControlFlow;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::info;
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::{
|
||||
block::PreparedBlock, transactions::new_tx_verification_data, BlockChainContextRequest,
|
||||
BlockChainContextResponse, BlockVerifierService, ExtendedConsensusError, VerifyBlockRequest,
|
||||
VerifyBlockResponse, VerifyTxRequest, VerifyTxResponse,
|
||||
};
|
||||
use cuprate_consensus_context::NewBlockData;
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_p2p::{block_downloader::BlockBatch, constants::LONG_BAN, BroadcastRequest};
|
||||
use cuprate_txpool::service::interface::TxpoolWriteRequest;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest},
|
||||
AltBlockInformation, HardFork, TransactionVerificationData, VerifiedBlockInformation,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
blockchain::manager::commands::{BlockchainManagerCommand, IncomingBlockOk},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
signals::REORG_LOCK,
|
||||
};
|
||||
|
||||
impl super::BlockchainManager {
|
||||
/// Handle an incoming command from another part of Cuprate.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
pub async fn handle_command(&mut self, command: BlockchainManagerCommand) {
|
||||
match command {
|
||||
BlockchainManagerCommand::AddBlock {
|
||||
block,
|
||||
prepped_txs,
|
||||
response_tx,
|
||||
} => {
|
||||
let res = self.handle_incoming_block(block, prepped_txs).await;
|
||||
|
||||
drop(response_tx.send(res));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Broadcast a valid block to the network.
|
||||
async fn broadcast_block(&mut self, block_bytes: Bytes, blockchain_height: usize) {
|
||||
self.broadcast_svc
|
||||
.ready()
|
||||
.await
|
||||
.expect("Broadcast service is Infallible.")
|
||||
.call(BroadcastRequest::Block {
|
||||
block_bytes,
|
||||
current_blockchain_height: usize_to_u64(blockchain_height),
|
||||
})
|
||||
.await
|
||||
.expect("Broadcast service is Infallible.");
|
||||
}
|
||||
|
||||
/// Handle an incoming [`Block`].
|
||||
///
|
||||
/// This function will route to [`Self::handle_incoming_alt_block`] if the block does not follow
|
||||
/// the top of the main chain.
|
||||
///
|
||||
/// Otherwise, this function will validate and add the block to the main chain.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
pub async fn handle_incoming_block(
|
||||
&mut self,
|
||||
block: Block,
|
||||
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
) -> Result<IncomingBlockOk, anyhow::Error> {
|
||||
if block.header.previous != self.cached_blockchain_context.top_hash {
|
||||
self.handle_incoming_alt_block(block, prepared_txs).await?;
|
||||
return Ok(IncomingBlockOk::AddedToAltChain);
|
||||
}
|
||||
|
||||
let VerifyBlockResponse::MainChain(verified_block) = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::MainChain {
|
||||
block,
|
||||
prepared_txs,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let block_blob = Bytes::copy_from_slice(&verified_block.block_blob);
|
||||
self.add_valid_block_to_main_chain(verified_block).await;
|
||||
|
||||
self.broadcast_block(block_blob, self.cached_blockchain_context.chain_height)
|
||||
.await;
|
||||
|
||||
Ok(IncomingBlockOk::AddedToMainChain)
|
||||
}
|
||||
|
||||
/// Handle an incoming [`BlockBatch`].
|
||||
///
|
||||
/// This function will route to [`Self::handle_incoming_block_batch_main_chain`] or [`Self::handle_incoming_block_batch_alt_chain`]
|
||||
/// depending on if the first block in the batch follows from the top of our chain.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if the batch is empty or if any internal service returns an unexpected
|
||||
/// error that we cannot recover from or if the incoming batch contains no blocks.
|
||||
pub async fn handle_incoming_block_batch(&mut self, batch: BlockBatch) {
|
||||
let (first_block, _) = batch
|
||||
.blocks
|
||||
.first()
|
||||
.expect("Block batch should not be empty");
|
||||
|
||||
if first_block.header.previous == self.cached_blockchain_context.top_hash {
|
||||
self.handle_incoming_block_batch_main_chain(batch).await;
|
||||
} else {
|
||||
self.handle_incoming_block_batch_alt_chain(batch).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles an incoming [`BlockBatch`] that follows the main chain.
|
||||
///
|
||||
/// This function will handle validating the blocks in the batch and adding them to the blockchain
|
||||
/// database and context cache.
|
||||
///
|
||||
/// This function will also handle banning the peer and canceling the block downloader if the
|
||||
/// block is invalid.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from or if the incoming batch contains no blocks.
|
||||
async fn handle_incoming_block_batch_main_chain(&mut self, batch: BlockBatch) {
|
||||
info!(
|
||||
"Handling batch to main chain height: {}",
|
||||
batch.blocks.first().unwrap().0.number().unwrap()
|
||||
);
|
||||
|
||||
let batch_prep_res = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::MainChainBatchPrepareBlocks {
|
||||
blocks: batch.blocks,
|
||||
})
|
||||
.await;
|
||||
|
||||
let prepped_blocks = match batch_prep_res {
|
||||
Ok(VerifyBlockResponse::MainChainBatchPrepped(prepped_blocks)) => prepped_blocks,
|
||||
Err(_) => {
|
||||
batch.peer_handle.ban_peer(LONG_BAN);
|
||||
self.stop_current_block_downloader.notify_one();
|
||||
return;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
for (block, txs) in prepped_blocks {
|
||||
let verify_res = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::MainChainPrepped { block, txs })
|
||||
.await;
|
||||
|
||||
let verified_block = match verify_res {
|
||||
Ok(VerifyBlockResponse::MainChain(verified_block)) => verified_block,
|
||||
Err(_) => {
|
||||
batch.peer_handle.ban_peer(LONG_BAN);
|
||||
self.stop_current_block_downloader.notify_one();
|
||||
return;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
self.add_valid_block_to_main_chain(verified_block).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles an incoming [`BlockBatch`] that does not follow the main-chain.
|
||||
///
|
||||
/// This function will handle validating the alt-blocks to add them to our cache and reorging the
|
||||
/// chain if the alt-chain has a higher cumulative difficulty.
|
||||
///
|
||||
/// This function will also handle banning the peer and canceling the block downloader if the
|
||||
/// alt block is invalid or if a reorg fails.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn handle_incoming_block_batch_alt_chain(&mut self, mut batch: BlockBatch) {
|
||||
// TODO: this needs testing (this whole section does but alt-blocks specifically).
|
||||
|
||||
let mut blocks = batch.blocks.into_iter();
|
||||
|
||||
while let Some((block, txs)) = blocks.next() {
|
||||
// async blocks work as try blocks.
|
||||
let res = async {
|
||||
let txs = txs
|
||||
.into_par_iter()
|
||||
.map(|tx| {
|
||||
let tx = new_tx_verification_data(tx)?;
|
||||
Ok((tx.tx_hash, tx))
|
||||
})
|
||||
.collect::<Result<_, anyhow::Error>>()?;
|
||||
|
||||
let reorged = self.handle_incoming_alt_block(block, txs).await?;
|
||||
|
||||
Ok::<_, anyhow::Error>(reorged)
|
||||
}
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Err(e) => {
|
||||
batch.peer_handle.ban_peer(LONG_BAN);
|
||||
self.stop_current_block_downloader.notify_one();
|
||||
return;
|
||||
}
|
||||
Ok(AddAltBlock::Reorged) => {
|
||||
// Collect the remaining blocks and add them to the main chain instead.
|
||||
batch.blocks = blocks.collect();
|
||||
self.handle_incoming_block_batch_main_chain(batch).await;
|
||||
return;
|
||||
}
|
||||
// continue adding alt blocks.
|
||||
Ok(AddAltBlock::Cached) => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles an incoming alt [`Block`].
|
||||
///
|
||||
/// This function will do some pre-validation of the alt block, then if the cumulative difficulty
|
||||
/// of the alt chain is higher than the main chain it will attempt a reorg otherwise it will add
|
||||
/// the alt block to the alt block cache.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This will return an [`Err`] if:
|
||||
/// - The alt block was invalid.
|
||||
/// - An attempt to reorg the chain failed.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn handle_incoming_alt_block(
|
||||
&mut self,
|
||||
block: Block,
|
||||
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
) -> Result<AddAltBlock, anyhow::Error> {
|
||||
let VerifyBlockResponse::AltChain(alt_block_info) = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::AltChain {
|
||||
block,
|
||||
prepared_txs,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
// TODO: check in consensus crate if alt block with this hash already exists.
|
||||
|
||||
// If this alt chain
|
||||
if alt_block_info.cumulative_difficulty
|
||||
> self.cached_blockchain_context.cumulative_difficulty
|
||||
{
|
||||
self.try_do_reorg(alt_block_info).await?;
|
||||
return Ok(AddAltBlock::Reorged);
|
||||
}
|
||||
|
||||
self.blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::WriteAltBlock(alt_block_info))
|
||||
.await?;
|
||||
|
||||
Ok(AddAltBlock::Cached)
|
||||
}
|
||||
|
||||
/// Attempt a re-org with the given top block of the alt-chain.
|
||||
///
|
||||
/// This function will take a write lock on [`REORG_LOCK`] and then set up the blockchain database
|
||||
/// and context cache to verify the alt-chain. It will then attempt to verify and add each block
|
||||
/// in the alt-chain to the main-chain. Releasing the lock on [`REORG_LOCK`] when finished.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an [`Err`] if the re-org was unsuccessful, if this happens the chain
|
||||
/// will be returned back into its state it was at when then function was called.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn try_do_reorg(
|
||||
&mut self,
|
||||
top_alt_block: AltBlockInformation,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let _guard = REORG_LOCK.write().await;
|
||||
|
||||
let BlockchainResponse::AltBlocksInChain(mut alt_blocks) = self
|
||||
.blockchain_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainReadRequest::AltBlocksInChain(
|
||||
top_alt_block.chain_id,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
alt_blocks.push(top_alt_block);
|
||||
|
||||
let split_height = alt_blocks[0].height;
|
||||
let current_main_chain_height = self.cached_blockchain_context.chain_height;
|
||||
|
||||
let BlockchainResponse::PopBlocks(old_main_chain_id) = self
|
||||
.blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::PopBlocks(
|
||||
current_main_chain_height - split_height + 1,
|
||||
))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
self.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::PopBlocks {
|
||||
numb_blocks: current_main_chain_height - split_height + 1,
|
||||
})
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
|
||||
let reorg_res = self.verify_add_alt_blocks_to_main_chain(alt_blocks).await;
|
||||
|
||||
match reorg_res {
|
||||
Ok(()) => Ok(()),
|
||||
Err(e) => {
|
||||
todo!("Reverse reorg")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify and add a list of [`AltBlockInformation`]s to the main-chain.
|
||||
///
|
||||
/// This function assumes the first [`AltBlockInformation`] is the next block in the blockchain
|
||||
/// for the blockchain database and the context cache, or in other words that the blockchain database
|
||||
/// and context cache have already had the top blocks popped to where the alt-chain meets the main-chain.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an [`Err`] if the alt-blocks were invalid, in this case the re-org should
|
||||
/// be aborted and the chain should be returned to its previous state.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn verify_add_alt_blocks_to_main_chain(
|
||||
&mut self,
|
||||
alt_blocks: Vec<AltBlockInformation>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
for mut alt_block in alt_blocks {
|
||||
let prepped_txs = alt_block
|
||||
.txs
|
||||
.drain(..)
|
||||
.map(|tx| Ok(Arc::new(tx.try_into()?)))
|
||||
.collect::<Result<_, anyhow::Error>>()?;
|
||||
|
||||
let prepped_block = PreparedBlock::new_alt_block(alt_block)?;
|
||||
|
||||
let VerifyBlockResponse::MainChain(verified_block) = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::MainChainPrepped {
|
||||
block: prepped_block,
|
||||
txs: prepped_txs,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
self.add_valid_block_to_main_chain(verified_block).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adds a [`VerifiedBlockInformation`] to the main-chain.
|
||||
///
|
||||
/// This function will update the blockchain database and the context cache, it will also
|
||||
/// update [`Self::cached_blockchain_context`].
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
pub async fn add_valid_block_to_main_chain(
|
||||
&mut self,
|
||||
verified_block: VerifiedBlockInformation,
|
||||
) {
|
||||
// FIXME: this is pretty inefficient, we should probably return the KI map created in the consensus crate.
|
||||
let spent_key_images = verified_block
|
||||
.txs
|
||||
.iter()
|
||||
.flat_map(|tx| {
|
||||
tx.tx.prefix().inputs.iter().map(|input| match input {
|
||||
Input::ToKey { key_image, .. } => key_image.compress().0,
|
||||
Input::Gen(_) => unreachable!(),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<[u8; 32]>>();
|
||||
|
||||
self.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Update(NewBlockData {
|
||||
block_hash: verified_block.block_hash,
|
||||
height: verified_block.height,
|
||||
timestamp: verified_block.block.header.timestamp,
|
||||
weight: verified_block.weight,
|
||||
long_term_weight: verified_block.long_term_weight,
|
||||
generated_coins: verified_block.generated_coins,
|
||||
vote: HardFork::from_vote(verified_block.block.header.hardfork_signal),
|
||||
cumulative_difficulty: verified_block.cumulative_difficulty,
|
||||
}))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
|
||||
self.blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::WriteBlock(verified_block))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
|
||||
let BlockChainContextResponse::Context(blockchain_context) = self
|
||||
.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
self.cached_blockchain_context = blockchain_context.unchecked_blockchain_context().clone();
|
||||
|
||||
self.txpool_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolWriteRequest::NewBlock { spent_key_images })
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
/// The result from successfully adding an alt-block.
|
||||
enum AddAltBlock {
|
||||
/// The alt-block was cached.
|
||||
Cached,
|
||||
/// The chain was reorged.
|
||||
Reorged,
|
||||
}
|
147
binaries/cuprated/src/blockchain/syncer.rs
Normal file
147
binaries/cuprated/src/blockchain/syncer.rs
Normal file
|
@ -0,0 +1,147 @@
|
|||
// FIXME: This whole module is not great and should be rewritten when the PeerSet is made.
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use futures::StreamExt;
|
||||
use tokio::{
|
||||
sync::{mpsc, Notify},
|
||||
time::interval,
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_consensus::{BlockChainContext, BlockChainContextRequest, BlockChainContextResponse};
|
||||
use cuprate_p2p::{
|
||||
block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse},
|
||||
NetworkInterface, PeerSetRequest, PeerSetResponse,
|
||||
};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
|
||||
const CHECK_SYNC_FREQUENCY: Duration = Duration::from_secs(30);
|
||||
|
||||
/// An error returned from the [`syncer`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SyncerError {
|
||||
#[error("Incoming block channel closed.")]
|
||||
IncomingBlockChannelClosed,
|
||||
#[error("One of our services returned an error: {0}.")]
|
||||
ServiceError(#[from] tower::BoxError),
|
||||
}
|
||||
|
||||
/// The syncer tasks that makes sure we are fully synchronised with our connected peers.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn syncer<C, CN>(
|
||||
mut context_svc: C,
|
||||
our_chain: CN,
|
||||
mut clearnet_interface: NetworkInterface<ClearNet>,
|
||||
incoming_block_batch_tx: mpsc::Sender<BlockBatch>,
|
||||
stop_current_block_downloader: Arc<Notify>,
|
||||
block_downloader_config: BlockDownloaderConfig,
|
||||
) -> Result<(), SyncerError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
>,
|
||||
C::Future: Send + 'static,
|
||||
CN: Service<ChainSvcRequest, Response = ChainSvcResponse, Error = tower::BoxError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
CN::Future: Send + 'static,
|
||||
{
|
||||
tracing::info!("Starting blockchain syncer");
|
||||
|
||||
let mut check_sync_interval = interval(CHECK_SYNC_FREQUENCY);
|
||||
|
||||
let BlockChainContextResponse::Context(mut blockchain_ctx) = context_svc
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
tracing::debug!("Waiting for new sync info in top sync channel");
|
||||
|
||||
loop {
|
||||
check_sync_interval.tick().await;
|
||||
|
||||
tracing::trace!("Checking connected peers to see if we are behind",);
|
||||
|
||||
check_update_blockchain_context(&mut context_svc, &mut blockchain_ctx).await?;
|
||||
let raw_blockchain_context = blockchain_ctx.unchecked_blockchain_context();
|
||||
|
||||
let PeerSetResponse::MostPoWSeen {
|
||||
cumulative_difficulty,
|
||||
..
|
||||
} = clearnet_interface
|
||||
.peer_set()
|
||||
.ready()
|
||||
.await?
|
||||
.call(PeerSetRequest::MostPoWSeen)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
if cumulative_difficulty <= raw_blockchain_context.cumulative_difficulty {
|
||||
continue;
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
"We are behind peers claimed cumulative difficulty, starting block downloader"
|
||||
);
|
||||
let mut block_batch_stream =
|
||||
clearnet_interface.block_downloader(our_chain.clone(), block_downloader_config);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
() = stop_current_block_downloader.notified() => {
|
||||
tracing::info!("Stopping block downloader");
|
||||
break;
|
||||
}
|
||||
batch = block_batch_stream.next() => {
|
||||
let Some(batch) = batch else {
|
||||
break;
|
||||
};
|
||||
|
||||
tracing::debug!("Got batch, len: {}", batch.blocks.len());
|
||||
if incoming_block_batch_tx.send(batch).await.is_err() {
|
||||
return Err(SyncerError::IncomingBlockChannelClosed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if we should update the given [`BlockChainContext`] and updates it if needed.
|
||||
async fn check_update_blockchain_context<C>(
|
||||
context_svc: C,
|
||||
old_context: &mut BlockChainContext,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
>,
|
||||
C::Future: Send + 'static,
|
||||
{
|
||||
if old_context.blockchain_context().is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let BlockChainContextResponse::Context(ctx) = context_svc
|
||||
.oneshot(BlockChainContextRequest::Context)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
*old_context = ctx;
|
||||
|
||||
Ok(())
|
||||
}
|
18
binaries/cuprated/src/blockchain/types.rs
Normal file
18
binaries/cuprated/src/blockchain/types.rs
Normal file
|
@ -0,0 +1,18 @@
|
|||
use tower::util::MapErr;
|
||||
|
||||
use cuprate_blockchain::{cuprate_database::RuntimeError, service::BlockchainReadHandle};
|
||||
use cuprate_consensus::{BlockChainContextService, BlockVerifierService, TxVerifierService};
|
||||
|
||||
/// The [`BlockVerifierService`] with all generic types defined.
|
||||
pub type ConcreteBlockVerifierService = BlockVerifierService<
|
||||
BlockChainContextService,
|
||||
ConcreteTxVerifierService,
|
||||
ConsensusBlockchainReadHandle,
|
||||
>;
|
||||
|
||||
/// The [`TxVerifierService`] with all generic types defined.
|
||||
pub type ConcreteTxVerifierService = TxVerifierService<ConsensusBlockchainReadHandle>;
|
||||
|
||||
/// The [`BlockchainReadHandle`] with the [`tower::Service::Error`] mapped to conform to what the consensus crate requires.
|
||||
pub type ConsensusBlockchainReadHandle =
|
||||
MapErr<BlockchainReadHandle, fn(RuntimeError) -> tower::BoxError>;
|
159
binaries/cuprated/src/config.rs
Normal file
159
binaries/cuprated/src/config.rs
Normal file
|
@ -0,0 +1,159 @@
|
|||
//! cuprated config
|
||||
use std::{
|
||||
fs::{read_to_string, File},
|
||||
io,
|
||||
path::Path,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use clap::Parser;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_consensus::ContextConfig;
|
||||
use cuprate_helper::{
|
||||
fs::{CUPRATE_CONFIG_DIR, DEFAULT_CONFIG_FILE_NAME},
|
||||
network::Network,
|
||||
};
|
||||
use cuprate_p2p::block_downloader::BlockDownloaderConfig;
|
||||
use cuprate_p2p_core::{ClearNet, ClearNetServerCfg};
|
||||
|
||||
mod args;
|
||||
mod fs;
|
||||
mod p2p;
|
||||
mod storage;
|
||||
mod tracing_config;
|
||||
|
||||
use crate::config::fs::FileSystemConfig;
|
||||
use p2p::P2PConfig;
|
||||
use storage::StorageConfig;
|
||||
use tracing_config::TracingConfig;
|
||||
|
||||
/// Reads the args & config file, returning a [`Config`].
|
||||
pub fn read_config_and_args() -> Config {
|
||||
let args = args::Args::parse();
|
||||
args.do_quick_requests();
|
||||
|
||||
let config: Config = if let Some(config_file) = &args.config_file {
|
||||
// If a config file was set in the args try to read it and exit if we can't.
|
||||
match Config::read_from_path(config_file) {
|
||||
Ok(config) => config,
|
||||
Err(e) => {
|
||||
eprintln!("Failed to read config from file: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// First attempt to read the config file from the current directory.
|
||||
std::env::current_dir()
|
||||
.map(|path| path.join(DEFAULT_CONFIG_FILE_NAME))
|
||||
.map_err(Into::into)
|
||||
.and_then(Config::read_from_path)
|
||||
.inspect_err(|e| tracing::debug!("Failed to read config from current dir: {e}"))
|
||||
// otherwise try the main config directory.
|
||||
.or_else(|_| {
|
||||
let file = CUPRATE_CONFIG_DIR.join(DEFAULT_CONFIG_FILE_NAME);
|
||||
Config::read_from_path(file)
|
||||
})
|
||||
.inspect_err(|e| {
|
||||
tracing::debug!("Failed to read config from config dir: {e}");
|
||||
eprintln!("Failed to find/read config file, using default config.");
|
||||
})
|
||||
.unwrap_or_default()
|
||||
};
|
||||
|
||||
args.apply_args(config)
|
||||
}
|
||||
|
||||
/// The config for all of Cuprate.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct Config {
|
||||
/// The network we should run on.
|
||||
network: Network,
|
||||
|
||||
/// [`tracing`] config.
|
||||
tracing: TracingConfig,
|
||||
|
||||
/// The P2P network config.
|
||||
p2p: P2PConfig,
|
||||
|
||||
/// The storage config.
|
||||
storage: StorageConfig,
|
||||
|
||||
fs: FileSystemConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Attempts to read a config file in [`toml`] format from the given [`Path`].
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Will return an [`Err`] if the file cannot be read or if the file is not a valid [`toml`] config.
|
||||
fn read_from_path(file: impl AsRef<Path>) -> Result<Self, anyhow::Error> {
|
||||
let file_text = read_to_string(file.as_ref())?;
|
||||
|
||||
Ok(toml::from_str(&file_text)
|
||||
.inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy()))
|
||||
.inspect_err(|e| {
|
||||
eprintln!("{e}");
|
||||
eprintln!(
|
||||
"Failed to parse config file at: {}",
|
||||
file.as_ref().to_string_lossy()
|
||||
);
|
||||
})?)
|
||||
}
|
||||
|
||||
/// Returns the current [`Network`] we are running on.
|
||||
pub const fn network(&self) -> Network {
|
||||
self.network
|
||||
}
|
||||
|
||||
/// The [`ClearNet`], [`cuprate_p2p::P2PConfig`].
|
||||
pub fn clearnet_p2p_config(&self) -> cuprate_p2p::P2PConfig<ClearNet> {
|
||||
cuprate_p2p::P2PConfig {
|
||||
network: self.network,
|
||||
seeds: p2p::clear_net_seed_nodes(self.network),
|
||||
outbound_connections: self.p2p.clear_net.general.outbound_connections,
|
||||
extra_outbound_connections: self.p2p.clear_net.general.extra_outbound_connections,
|
||||
max_inbound_connections: self.p2p.clear_net.general.max_inbound_connections,
|
||||
gray_peers_percent: self.p2p.clear_net.general.gray_peers_percent,
|
||||
server_config: Some(ClearNetServerCfg {
|
||||
ip: self.p2p.clear_net.listen_on,
|
||||
}),
|
||||
p2p_port: self.p2p.clear_net.general.p2p_port,
|
||||
// TODO: set this if a public RPC server is set.
|
||||
rpc_port: 0,
|
||||
address_book_config: self
|
||||
.p2p
|
||||
.clear_net
|
||||
.general
|
||||
.address_book_config(&self.fs.cache_directory, self.network),
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`ContextConfig`].
|
||||
pub const fn context_config(&self) -> ContextConfig {
|
||||
match self.network {
|
||||
Network::Mainnet => ContextConfig::main_net(),
|
||||
Network::Stagenet => ContextConfig::stage_net(),
|
||||
Network::Testnet => ContextConfig::test_net(),
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`cuprate_blockchain`] config.
|
||||
pub fn blockchain_config(&self) -> cuprate_blockchain::config::Config {
|
||||
let blockchain = &self.storage.blockchain;
|
||||
|
||||
// We don't set reader threads as we manually make the reader threadpool.
|
||||
cuprate_blockchain::config::ConfigBuilder::default()
|
||||
.network(self.network)
|
||||
.data_directory(self.fs.data_directory.clone())
|
||||
.sync_mode(blockchain.shared.sync_mode)
|
||||
.build()
|
||||
}
|
||||
|
||||
/// The [`BlockDownloaderConfig`].
|
||||
pub fn block_downloader_config(&self) -> BlockDownloaderConfig {
|
||||
self.p2p.block_downloader.clone().into()
|
||||
}
|
||||
}
|
55
binaries/cuprated/src/config/args.rs
Normal file
55
binaries/cuprated/src/config/args.rs
Normal file
|
@ -0,0 +1,55 @@
|
|||
use std::{io::Write, path::PathBuf, process::exit};
|
||||
|
||||
use clap::builder::TypedValueParser;
|
||||
|
||||
use cuprate_helper::network::Network;
|
||||
|
||||
use crate::{config::Config, constants::EXAMPLE_CONFIG};
|
||||
|
||||
/// Cuprate Args.
|
||||
#[derive(clap::Parser, Debug)]
|
||||
#[command(version, about)]
|
||||
pub struct Args {
|
||||
/// The network to run on.
|
||||
#[arg(
|
||||
long,
|
||||
default_value_t = Network::Mainnet,
|
||||
value_parser = clap::builder::PossibleValuesParser::new(["mainnet", "testnet", "stagenet"])
|
||||
.map(|s| s.parse::<Network>().unwrap()),
|
||||
)]
|
||||
pub network: Network,
|
||||
/// The amount of outbound clear-net connections to maintain.
|
||||
#[arg(long)]
|
||||
pub outbound_connections: Option<usize>,
|
||||
/// The PATH of the `cuprated` config file.
|
||||
#[arg(long)]
|
||||
pub config_file: Option<PathBuf>,
|
||||
/// Generate a config file and print it to stdout.
|
||||
#[arg(long)]
|
||||
pub generate_config: bool,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
/// Complete any quick requests asked for in [`Args`].
|
||||
///
|
||||
/// May cause the process to [`exit`].
|
||||
pub fn do_quick_requests(&self) {
|
||||
if self.generate_config {
|
||||
println!("{EXAMPLE_CONFIG}");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the [`Args`] to the given [`Config`].
|
||||
///
|
||||
/// This may exit the program if a config value was set that requires an early exit.
|
||||
pub const fn apply_args(&self, mut config: Config) -> Config {
|
||||
config.network = self.network;
|
||||
|
||||
if let Some(outbound_connections) = self.outbound_connections {
|
||||
config.p2p.clear_net.general.outbound_connections = outbound_connections;
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
}
|
21
binaries/cuprated/src/config/fs.rs
Normal file
21
binaries/cuprated/src/config/fs.rs
Normal file
|
@ -0,0 +1,21 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_helper::fs::{CUPRATE_CACHE_DIR, CUPRATE_DATA_DIR};
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct FileSystemConfig {
|
||||
pub data_directory: PathBuf,
|
||||
pub cache_directory: PathBuf,
|
||||
}
|
||||
|
||||
impl Default for FileSystemConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
data_directory: CUPRATE_DATA_DIR.to_path_buf(),
|
||||
cache_directory: CUPRATE_CACHE_DIR.to_path_buf(),
|
||||
}
|
||||
}
|
||||
}
|
178
binaries/cuprated/src/config/p2p.rs
Normal file
178
binaries/cuprated/src/config/p2p.rs
Normal file
|
@ -0,0 +1,178 @@
|
|||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
|
||||
path::Path,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_helper::{fs::address_book_path, network::Network};
|
||||
|
||||
/// P2P config.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct P2PConfig {
|
||||
/// Clear-net config.
|
||||
pub clear_net: ClearNetConfig,
|
||||
/// Block downloader config.
|
||||
pub block_downloader: BlockDownloaderConfig,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct BlockDownloaderConfig {
|
||||
/// The size in bytes of the buffer between the block downloader and the place which
|
||||
/// is consuming the downloaded blocks.
|
||||
pub buffer_bytes: usize,
|
||||
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
|
||||
pub in_progress_queue_bytes: usize,
|
||||
/// The [`Duration`] between checking the client pool for free peers.
|
||||
pub check_client_pool_interval: Duration,
|
||||
/// The target size of a single batch of blocks (in bytes).
|
||||
pub target_batch_bytes: usize,
|
||||
}
|
||||
|
||||
impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloaderConfig {
|
||||
fn from(value: BlockDownloaderConfig) -> Self {
|
||||
Self {
|
||||
buffer_bytes: value.buffer_bytes,
|
||||
in_progress_queue_bytes: value.in_progress_queue_bytes,
|
||||
check_client_pool_interval: value.check_client_pool_interval,
|
||||
target_batch_bytes: value.target_batch_bytes,
|
||||
initial_batch_len: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BlockDownloaderConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
buffer_bytes: 50_000_000,
|
||||
in_progress_queue_bytes: 50_000_000,
|
||||
check_client_pool_interval: Duration::from_secs(30),
|
||||
target_batch_bytes: 5_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The config values for P2P clear-net.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct ClearNetConfig {
|
||||
/// The server config.
|
||||
pub listen_on: IpAddr,
|
||||
#[serde(flatten)]
|
||||
pub general: SharedNetConfig,
|
||||
}
|
||||
|
||||
impl Default for ClearNetConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
listen_on: IpAddr::V4(Ipv4Addr::UNSPECIFIED),
|
||||
general: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Network config values shared between all network zones.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct SharedNetConfig {
|
||||
/// The number of outbound connections to make and try keep.
|
||||
pub outbound_connections: usize,
|
||||
/// The amount of extra connections we can make if we are under load from the rest of Cuprate.
|
||||
pub extra_outbound_connections: usize,
|
||||
/// The maximum amount of inbound connections
|
||||
pub max_inbound_connections: usize,
|
||||
/// The percent of connections that should be to peers we haven't connected to before.
|
||||
pub gray_peers_percent: f64,
|
||||
/// port to use to accept p2p connections.
|
||||
pub p2p_port: u16,
|
||||
/// The address book config.
|
||||
address_book_config: AddressBookConfig,
|
||||
}
|
||||
|
||||
impl SharedNetConfig {
|
||||
/// Returns the [`AddressBookConfig`].
|
||||
pub fn address_book_config(
|
||||
&self,
|
||||
cache_dir: &Path,
|
||||
network: Network,
|
||||
) -> cuprate_address_book::AddressBookConfig {
|
||||
cuprate_address_book::AddressBookConfig {
|
||||
max_white_list_length: self.address_book_config.max_white_list_length,
|
||||
max_gray_list_length: self.address_book_config.max_gray_list_length,
|
||||
peer_store_directory: address_book_path(cache_dir, network),
|
||||
peer_save_period: self.address_book_config.peer_save_period,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SharedNetConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
outbound_connections: 64,
|
||||
extra_outbound_connections: 8,
|
||||
max_inbound_connections: 128,
|
||||
gray_peers_percent: 0.7,
|
||||
p2p_port: 0,
|
||||
address_book_config: AddressBookConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct AddressBookConfig {
|
||||
max_white_list_length: usize,
|
||||
max_gray_list_length: usize,
|
||||
peer_save_period: Duration,
|
||||
}
|
||||
|
||||
impl Default for AddressBookConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_white_list_length: 1_000,
|
||||
max_gray_list_length: 5_000,
|
||||
peer_save_period: Duration::from_secs(30),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Seed nodes for [`ClearNet`](cuprate_p2p_core::ClearNet).
|
||||
pub fn clear_net_seed_nodes(network: Network) -> Vec<SocketAddr> {
|
||||
let seeds = match network {
|
||||
Network::Mainnet => [
|
||||
"176.9.0.187:18080",
|
||||
"88.198.163.90:18080",
|
||||
"66.85.74.134:18080",
|
||||
"51.79.173.165:18080",
|
||||
"192.99.8.110:18080",
|
||||
"37.187.74.171:18080",
|
||||
"77.172.183.193:18080",
|
||||
]
|
||||
.as_slice(),
|
||||
Network::Stagenet => [
|
||||
"176.9.0.187:38080",
|
||||
"51.79.173.165:38080",
|
||||
"192.99.8.110:38080",
|
||||
"37.187.74.171:38080",
|
||||
"77.172.183.193:38080",
|
||||
]
|
||||
.as_slice(),
|
||||
Network::Testnet => [
|
||||
"176.9.0.187:28080",
|
||||
"51.79.173.165:28080",
|
||||
"192.99.8.110:28080",
|
||||
"37.187.74.171:28080",
|
||||
"77.172.183.193:28080",
|
||||
]
|
||||
.as_slice(),
|
||||
};
|
||||
|
||||
seeds
|
||||
.iter()
|
||||
.map(|s| s.parse())
|
||||
.collect::<Result<_, _>>()
|
||||
.unwrap()
|
||||
}
|
67
binaries/cuprated/src/config/storage.rs
Normal file
67
binaries/cuprated/src/config/storage.rs
Normal file
|
@ -0,0 +1,67 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_database::config::SyncMode;
|
||||
use cuprate_database_service::ReaderThreads;
|
||||
use cuprate_helper::fs::CUPRATE_DATA_DIR;
|
||||
|
||||
/// The storage config.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct StorageConfig {
|
||||
/// The amount of reader threads to spawn between the tx-pool and blockchain.
|
||||
pub reader_threads: ReaderThreads,
|
||||
/// The tx-pool config.
|
||||
pub txpool: TxpoolConfig,
|
||||
/// The blockchain config.
|
||||
pub blockchain: BlockchainConfig,
|
||||
}
|
||||
|
||||
/// The blockchain config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct BlockchainConfig {
|
||||
#[serde(flatten)]
|
||||
pub shared: SharedStorageConfig,
|
||||
}
|
||||
|
||||
impl Default for BlockchainConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shared: SharedStorageConfig {
|
||||
sync_mode: SyncMode::Async,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The tx-pool config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct TxpoolConfig {
|
||||
#[serde(flatten)]
|
||||
pub shared: SharedStorageConfig,
|
||||
|
||||
/// The maximum size of the tx-pool.
|
||||
pub max_txpool_byte_size: usize,
|
||||
}
|
||||
|
||||
impl Default for TxpoolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shared: SharedStorageConfig {
|
||||
sync_mode: SyncMode::Async,
|
||||
},
|
||||
max_txpool_byte_size: 100_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Config values shared between the tx-pool and blockchain.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct SharedStorageConfig {
|
||||
/// The [`SyncMode`] of the database.
|
||||
pub sync_mode: SyncMode,
|
||||
}
|
42
binaries/cuprated/src/config/tracing_config.rs
Normal file
42
binaries/cuprated/src/config/tracing_config.rs
Normal file
|
@ -0,0 +1,42 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
/// [`tracing`] config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct TracingConfig {
|
||||
/// The default minimum log level.
|
||||
#[serde(with = "level_filter_serde")]
|
||||
level: LevelFilter,
|
||||
}
|
||||
|
||||
impl Default for TracingConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
level: LevelFilter::INFO,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod level_filter_serde {
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
#[expect(clippy::trivially_copy_pass_by_ref, reason = "serde")]
|
||||
pub fn serialize<S>(level_filter: &LevelFilter, s: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
s.serialize_str(&level_filter.to_string())
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(d: D) -> Result<LevelFilter, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(d)?;
|
||||
LevelFilter::from_str(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
46
binaries/cuprated/src/constants.rs
Normal file
46
binaries/cuprated/src/constants.rs
Normal file
|
@ -0,0 +1,46 @@
|
|||
//! General constants used throughout `cuprated`.
|
||||
|
||||
use const_format::formatcp;
|
||||
|
||||
/// `cuprated`'s semantic version (`MAJOR.MINOR.PATCH`) as string.
|
||||
pub const VERSION: &str = clap::crate_version!();
|
||||
|
||||
/// [`VERSION`] + the build type.
|
||||
///
|
||||
/// If a debug build, the suffix is `-debug`, else it is `-release`.
|
||||
pub const VERSION_BUILD: &str = if cfg!(debug_assertions) {
|
||||
formatcp!("{VERSION}-debug")
|
||||
} else {
|
||||
formatcp!("{VERSION}-release")
|
||||
};
|
||||
|
||||
/// The panic message used when cuprated encounters a critical service error.
|
||||
pub const PANIC_CRITICAL_SERVICE_ERROR: &str =
|
||||
"A service critical to Cuprate's function returned an unexpected error.";
|
||||
|
||||
pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml");
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::config::Config;
|
||||
|
||||
#[test]
|
||||
fn version() {
|
||||
assert_eq!(VERSION, "0.0.1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn version_build() {
|
||||
if cfg!(debug_assertions) {
|
||||
assert_eq!(VERSION_BUILD, "0.0.1-debug");
|
||||
} else {
|
||||
assert_eq!(VERSION_BUILD, "0.0.1-release");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generate_config_text_is_valid() {
|
||||
let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap();
|
||||
}
|
||||
}
|
36
binaries/cuprated/src/main.rs
Normal file
36
binaries/cuprated/src/main.rs
Normal file
|
@ -0,0 +1,36 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![allow(
|
||||
unused_imports,
|
||||
unreachable_pub,
|
||||
unreachable_code,
|
||||
unused_crate_dependencies,
|
||||
dead_code,
|
||||
unused_variables,
|
||||
clippy::needless_pass_by_value,
|
||||
clippy::unused_async,
|
||||
clippy::diverging_sub_expression,
|
||||
unused_mut,
|
||||
clippy::let_unit_value,
|
||||
clippy::needless_pass_by_ref_mut,
|
||||
reason = "TODO: remove after v1.0.0"
|
||||
)]
|
||||
|
||||
mod blockchain;
|
||||
mod config;
|
||||
mod constants;
|
||||
mod p2p;
|
||||
mod rpc;
|
||||
mod signals;
|
||||
mod statics;
|
||||
mod txpool;
|
||||
|
||||
fn main() {
|
||||
// Initialize global static `LazyLock` data.
|
||||
statics::init_lazylock_statics();
|
||||
|
||||
let _config = config::read_config_and_args();
|
||||
|
||||
// TODO: everything else.
|
||||
todo!()
|
||||
}
|
57
binaries/cuprated/src/p2p.rs
Normal file
57
binaries/cuprated/src/p2p.rs
Normal file
|
@ -0,0 +1,57 @@
|
|||
//! P2P
|
||||
//!
|
||||
//! Will handle initiating the P2P and contains a protocol request handler.
|
||||
use futures::{FutureExt, TryFutureExt};
|
||||
use tokio::sync::oneshot;
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::BlockChainContextService;
|
||||
use cuprate_p2p::{NetworkInterface, P2PConfig};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::TxpoolReadHandle;
|
||||
|
||||
use crate::txpool::IncomingTxHandler;
|
||||
|
||||
mod core_sync_service;
|
||||
mod network_address;
|
||||
pub mod request_handler;
|
||||
|
||||
pub use network_address::CrossNetworkInternalPeerId;
|
||||
|
||||
/// Starts the P2P clearnet network, returning a [`NetworkInterface`] to interact with it.
|
||||
///
|
||||
/// A [`oneshot::Sender`] is also returned to provide the [`IncomingTxHandler`], until this is provided network
|
||||
/// handshakes can not be completed.
|
||||
pub async fn start_clearnet_p2p(
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
blockchain_context_service: BlockChainContextService,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
config: P2PConfig<ClearNet>,
|
||||
) -> Result<
|
||||
(
|
||||
NetworkInterface<ClearNet>,
|
||||
oneshot::Sender<IncomingTxHandler>,
|
||||
),
|
||||
tower::BoxError,
|
||||
> {
|
||||
let (incoming_tx_handler_tx, incoming_tx_handler_rx) = oneshot::channel();
|
||||
|
||||
let request_handler_maker = request_handler::P2pProtocolRequestHandlerMaker {
|
||||
blockchain_read_handle,
|
||||
blockchain_context_service: blockchain_context_service.clone(),
|
||||
txpool_read_handle,
|
||||
incoming_tx_handler: None,
|
||||
incoming_tx_handler_fut: incoming_tx_handler_rx.shared(),
|
||||
};
|
||||
|
||||
Ok((
|
||||
cuprate_p2p::initialize_network(
|
||||
request_handler_maker.map_response(|s| s.map_err(Into::into)),
|
||||
core_sync_service::CoreSyncService(blockchain_context_service),
|
||||
config,
|
||||
)
|
||||
.await?,
|
||||
incoming_tx_handler_tx,
|
||||
))
|
||||
}
|
49
binaries/cuprated/src/p2p/core_sync_service.rs
Normal file
49
binaries/cuprated/src/p2p/core_sync_service.rs
Normal file
|
@ -0,0 +1,49 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::{future::BoxFuture, FutureExt, TryFutureExt};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_consensus::{
|
||||
BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService,
|
||||
};
|
||||
use cuprate_helper::{cast::usize_to_u64, map::split_u128_into_low_high_bits};
|
||||
use cuprate_p2p_core::services::{CoreSyncDataRequest, CoreSyncDataResponse};
|
||||
use cuprate_wire::CoreSyncData;
|
||||
|
||||
/// The core sync service.
|
||||
#[derive(Clone)]
|
||||
pub struct CoreSyncService(pub BlockChainContextService);
|
||||
|
||||
impl Service<CoreSyncDataRequest> for CoreSyncService {
|
||||
type Response = CoreSyncDataResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future {
|
||||
self.0
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.map_ok(|res| {
|
||||
let BlockChainContextResponse::Context(context) = res else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let context = context.unchecked_blockchain_context();
|
||||
let (cumulative_difficulty, cumulative_difficulty_top64) =
|
||||
split_u128_into_low_high_bits(context.cumulative_difficulty);
|
||||
|
||||
CoreSyncDataResponse(CoreSyncData {
|
||||
cumulative_difficulty,
|
||||
cumulative_difficulty_top64,
|
||||
current_height: usize_to_u64(context.chain_height),
|
||||
pruning_seed: 0,
|
||||
top_id: context.top_hash,
|
||||
top_version: context.current_hf.as_u8(),
|
||||
})
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
}
|
16
binaries/cuprated/src/p2p/network_address.rs
Normal file
16
binaries/cuprated/src/p2p/network_address.rs
Normal file
|
@ -0,0 +1,16 @@
|
|||
use std::net::SocketAddr;
|
||||
|
||||
use cuprate_p2p_core::{client::InternalPeerID, ClearNet, NetworkZone};
|
||||
|
||||
/// An identifier for a P2P peer on any network.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum CrossNetworkInternalPeerId {
|
||||
/// A clear-net peer.
|
||||
ClearNet(InternalPeerID<<ClearNet as NetworkZone>::Addr>),
|
||||
}
|
||||
|
||||
impl From<InternalPeerID<<ClearNet as NetworkZone>::Addr>> for CrossNetworkInternalPeerId {
|
||||
fn from(addr: InternalPeerID<<ClearNet as NetworkZone>::Addr>) -> Self {
|
||||
Self::ClearNet(addr)
|
||||
}
|
||||
}
|
422
binaries/cuprated/src/p2p/request_handler.rs
Normal file
422
binaries/cuprated/src/p2p/request_handler.rs
Normal file
|
@ -0,0 +1,422 @@
|
|||
use std::{
|
||||
collections::HashSet,
|
||||
future::{ready, Ready},
|
||||
hash::Hash,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{
|
||||
future::{BoxFuture, Shared},
|
||||
FutureExt,
|
||||
};
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use tokio::sync::{broadcast, oneshot, watch};
|
||||
use tokio_stream::wrappers::WatchStream;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::{
|
||||
transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse,
|
||||
BlockChainContextService,
|
||||
};
|
||||
use cuprate_dandelion_tower::TxState;
|
||||
use cuprate_fixed_bytes::ByteArrayVec;
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
use cuprate_helper::{
|
||||
asynch::rayon_spawn_async,
|
||||
cast::usize_to_u64,
|
||||
map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits},
|
||||
};
|
||||
use cuprate_p2p::constants::{
|
||||
MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MAX_BLOCK_BATCH_LEN, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN,
|
||||
};
|
||||
use cuprate_p2p_core::{
|
||||
client::{InternalPeerID, PeerInformation},
|
||||
NetZoneAddress, NetworkZone, ProtocolRequest, ProtocolResponse,
|
||||
};
|
||||
use cuprate_txpool::service::TxpoolReadHandle;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
BlockCompleteEntry, TransactionBlobs, TxsInBlock,
|
||||
};
|
||||
use cuprate_wire::protocol::{
|
||||
ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
|
||||
GetObjectsResponse, NewFluffyBlock, NewTransactions,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
blockchain::interface::{self as blockchain_interface, IncomingBlockError},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
p2p::CrossNetworkInternalPeerId,
|
||||
txpool::{IncomingTxError, IncomingTxHandler, IncomingTxs},
|
||||
};
|
||||
|
||||
/// The P2P protocol request handler [`MakeService`](tower::MakeService).
|
||||
#[derive(Clone)]
|
||||
pub struct P2pProtocolRequestHandlerMaker {
|
||||
pub blockchain_read_handle: BlockchainReadHandle,
|
||||
pub blockchain_context_service: BlockChainContextService,
|
||||
pub txpool_read_handle: TxpoolReadHandle,
|
||||
|
||||
/// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`]
|
||||
/// and the [`IncomingTxHandler`].
|
||||
pub incoming_tx_handler: Option<IncomingTxHandler>,
|
||||
|
||||
/// A [`Future`](std::future::Future) that produces the [`IncomingTxHandler`].
|
||||
pub incoming_tx_handler_fut: Shared<oneshot::Receiver<IncomingTxHandler>>,
|
||||
}
|
||||
|
||||
impl<A: NetZoneAddress> Service<PeerInformation<A>> for P2pProtocolRequestHandlerMaker
|
||||
where
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
type Response = P2pProtocolRequestHandler<A>;
|
||||
type Error = tower::BoxError;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.incoming_tx_handler.is_none() {
|
||||
return self
|
||||
.incoming_tx_handler_fut
|
||||
.poll_unpin(cx)
|
||||
.map(|incoming_tx_handler| {
|
||||
self.incoming_tx_handler = Some(incoming_tx_handler?);
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, peer_information: PeerInformation<A>) -> Self::Future {
|
||||
let Some(incoming_tx_handler) = self.incoming_tx_handler.clone() else {
|
||||
panic!("poll_ready was not called or did not return `Poll::Ready`")
|
||||
};
|
||||
|
||||
// TODO: check sync info?
|
||||
|
||||
let blockchain_read_handle = self.blockchain_read_handle.clone();
|
||||
let txpool_read_handle = self.txpool_read_handle.clone();
|
||||
|
||||
ready(Ok(P2pProtocolRequestHandler {
|
||||
peer_information,
|
||||
blockchain_read_handle,
|
||||
blockchain_context_service: self.blockchain_context_service.clone(),
|
||||
txpool_read_handle,
|
||||
incoming_tx_handler,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// The P2P protocol request handler.
|
||||
#[derive(Clone)]
|
||||
pub struct P2pProtocolRequestHandler<N: NetZoneAddress> {
|
||||
peer_information: PeerInformation<N>,
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
blockchain_context_service: BlockChainContextService,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
incoming_tx_handler: IncomingTxHandler,
|
||||
}
|
||||
|
||||
impl<A: NetZoneAddress> Service<ProtocolRequest> for P2pProtocolRequestHandler<A>
|
||||
where
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
type Response = ProtocolResponse;
|
||||
type Error = anyhow::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: ProtocolRequest) -> Self::Future {
|
||||
match request {
|
||||
ProtocolRequest::GetObjects(r) => {
|
||||
get_objects(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::GetChain(r) => {
|
||||
get_chain(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::FluffyMissingTxs(r) => {
|
||||
fluffy_missing_txs(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::NewBlock(_) => ready(Err(anyhow::anyhow!(
|
||||
"Peer sent a full block when we support fluffy blocks"
|
||||
)))
|
||||
.boxed(),
|
||||
ProtocolRequest::NewFluffyBlock(r) => new_fluffy_block(
|
||||
self.peer_information.clone(),
|
||||
r,
|
||||
self.blockchain_read_handle.clone(),
|
||||
self.txpool_read_handle.clone(),
|
||||
)
|
||||
.boxed(),
|
||||
ProtocolRequest::NewTransactions(r) => new_transactions(
|
||||
self.peer_information.clone(),
|
||||
r,
|
||||
self.blockchain_context_service.clone(),
|
||||
self.incoming_tx_handler.clone(),
|
||||
)
|
||||
.boxed(),
|
||||
ProtocolRequest::GetTxPoolCompliment(_) => ready(Ok(ProtocolResponse::NA)).boxed(), // TODO: should we support this?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Handler functions
|
||||
|
||||
/// [`ProtocolRequest::GetObjects`]
|
||||
async fn get_objects(
|
||||
request: GetObjectsRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
if request.blocks.len() > MAX_BLOCK_BATCH_LEN {
|
||||
anyhow::bail!("Peer requested more blocks than allowed.")
|
||||
}
|
||||
|
||||
let block_hashes: Vec<[u8; 32]> = (&request.blocks).into();
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::BlockCompleteEntries {
|
||||
blocks,
|
||||
missing_hashes,
|
||||
blockchain_height,
|
||||
} = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockCompleteEntries(block_hashes))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(ProtocolResponse::GetObjects(GetObjectsResponse {
|
||||
blocks,
|
||||
missed_ids: ByteArrayVec::from(missing_hashes),
|
||||
current_blockchain_height: usize_to_u64(blockchain_height),
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::GetChain`]
|
||||
async fn get_chain(
|
||||
request: ChainRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
if request.block_ids.len() > MAX_BLOCKS_IDS_IN_CHAIN_ENTRY {
|
||||
anyhow::bail!("Peer sent too many block hashes in chain request.")
|
||||
}
|
||||
|
||||
let block_hashes: Vec<[u8; 32]> = (&request.block_ids).into();
|
||||
let want_pruned_data = request.prune;
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::NextChainEntry {
|
||||
start_height,
|
||||
chain_height,
|
||||
block_ids,
|
||||
block_weights,
|
||||
cumulative_difficulty,
|
||||
first_block_blob,
|
||||
} = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let Some(start_height) = start_height else {
|
||||
anyhow::bail!("The peers chain has a different genesis block than ours.");
|
||||
};
|
||||
|
||||
let (cumulative_difficulty_low64, cumulative_difficulty_top64) =
|
||||
split_u128_into_low_high_bits(cumulative_difficulty);
|
||||
|
||||
Ok(ProtocolResponse::GetChain(ChainResponse {
|
||||
start_height: usize_to_u64(std::num::NonZero::get(start_height)),
|
||||
total_height: usize_to_u64(chain_height),
|
||||
cumulative_difficulty_low64,
|
||||
cumulative_difficulty_top64,
|
||||
m_block_ids: ByteArrayVec::from(block_ids),
|
||||
first_block: first_block_blob.map_or(Bytes::new(), Bytes::from),
|
||||
// only needed when pruned
|
||||
m_block_weights: if want_pruned_data {
|
||||
block_weights.into_iter().map(usize_to_u64).collect()
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::FluffyMissingTxs`]
|
||||
async fn fluffy_missing_txs(
|
||||
mut request: FluffyMissingTransactionsRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
let tx_indexes = std::mem::take(&mut request.missing_tx_indices);
|
||||
let block_hash: [u8; 32] = *request.block_hash;
|
||||
let current_blockchain_height = request.current_blockchain_height;
|
||||
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::TxsInBlock(res) = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::TxsInBlock {
|
||||
block_hash,
|
||||
tx_indexes,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let Some(TxsInBlock { block, txs }) = res else {
|
||||
anyhow::bail!("The peer requested txs out of range.");
|
||||
};
|
||||
|
||||
Ok(ProtocolResponse::NewFluffyBlock(NewFluffyBlock {
|
||||
b: BlockCompleteEntry {
|
||||
block: Bytes::from(block),
|
||||
txs: TransactionBlobs::Normal(txs.into_iter().map(Bytes::from).collect()),
|
||||
pruned: false,
|
||||
// only needed for pruned blocks.
|
||||
block_weight: 0,
|
||||
},
|
||||
current_blockchain_height,
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::NewFluffyBlock`]
|
||||
async fn new_fluffy_block<A: NetZoneAddress>(
|
||||
peer_information: PeerInformation<A>,
|
||||
request: NewFluffyBlock,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
mut txpool_read_handle: TxpoolReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
// TODO: check context service here and ignore the block?
|
||||
let current_blockchain_height = request.current_blockchain_height;
|
||||
|
||||
peer_information
|
||||
.core_sync_data
|
||||
.lock()
|
||||
.unwrap()
|
||||
.current_height = current_blockchain_height;
|
||||
|
||||
let (block, txs) = rayon_spawn_async(move || -> Result<_, anyhow::Error> {
|
||||
let block = Block::read(&mut request.b.block.as_ref())?;
|
||||
|
||||
let tx_blobs = request
|
||||
.b
|
||||
.txs
|
||||
.take_normal()
|
||||
.ok_or(anyhow::anyhow!("Peer sent pruned txs in fluffy block"))?;
|
||||
|
||||
let txs = tx_blobs
|
||||
.into_iter()
|
||||
.map(|tx_blob| {
|
||||
if tx_blob.len() > MAX_TRANSACTION_BLOB_SIZE {
|
||||
anyhow::bail!("Peer sent a transaction over the size limit.");
|
||||
}
|
||||
|
||||
let tx = Transaction::read(&mut tx_blob.as_ref())?;
|
||||
|
||||
Ok((tx.hash(), tx))
|
||||
})
|
||||
.collect::<Result<_, anyhow::Error>>()?;
|
||||
|
||||
// The backing `Bytes` will be deallocated when this closure returns.
|
||||
|
||||
Ok((block, txs))
|
||||
})
|
||||
.await?;
|
||||
|
||||
let res = blockchain_interface::handle_incoming_block(
|
||||
block,
|
||||
txs,
|
||||
&mut blockchain_read_handle,
|
||||
&mut txpool_read_handle,
|
||||
)
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Ok(_) => Ok(ProtocolResponse::NA),
|
||||
Err(IncomingBlockError::UnknownTransactions(block_hash, missing_tx_indices)) => Ok(
|
||||
ProtocolResponse::FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest {
|
||||
block_hash: block_hash.into(),
|
||||
current_blockchain_height,
|
||||
missing_tx_indices: missing_tx_indices.into_iter().map(usize_to_u64).collect(),
|
||||
}),
|
||||
),
|
||||
Err(IncomingBlockError::Orphan) => {
|
||||
// Block's parent was unknown, could be syncing?
|
||||
Ok(ProtocolResponse::NA)
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::NewTransactions`]
|
||||
async fn new_transactions<A>(
|
||||
peer_information: PeerInformation<A>,
|
||||
request: NewTransactions,
|
||||
mut blockchain_context_service: BlockChainContextService,
|
||||
mut incoming_tx_handler: IncomingTxHandler,
|
||||
) -> anyhow::Result<ProtocolResponse>
|
||||
where
|
||||
A: NetZoneAddress,
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
let BlockChainContextResponse::Context(context) = blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let context = context.unchecked_blockchain_context();
|
||||
|
||||
// If we are more than 2 blocks behind the peer then ignore the txs - we are probably still syncing.
|
||||
if usize_to_u64(context.chain_height + 2)
|
||||
< peer_information
|
||||
.core_sync_data
|
||||
.lock()
|
||||
.unwrap()
|
||||
.current_height
|
||||
{
|
||||
return Ok(ProtocolResponse::NA);
|
||||
}
|
||||
|
||||
let state = if request.dandelionpp_fluff {
|
||||
TxState::Fluff
|
||||
} else {
|
||||
TxState::Stem {
|
||||
from: peer_information.id.into(),
|
||||
}
|
||||
};
|
||||
|
||||
// Drop all the data except the stuff we still need.
|
||||
let NewTransactions { txs, .. } = request;
|
||||
|
||||
let res = incoming_tx_handler
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(IncomingTxs { txs, state })
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Ok(()) => Ok(ProtocolResponse::NA),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
12
binaries/cuprated/src/rpc.rs
Normal file
12
binaries/cuprated/src/rpc.rs
Normal file
|
@ -0,0 +1,12 @@
|
|||
//! RPC
|
||||
//!
|
||||
//! Will contain the code to initiate the RPC and a request handler.
|
||||
|
||||
mod bin;
|
||||
mod constants;
|
||||
mod handler;
|
||||
mod json;
|
||||
mod other;
|
||||
mod request;
|
||||
|
||||
pub use handler::CupratedRpcHandler;
|
85
binaries/cuprated/src/rpc/bin.rs
Normal file
85
binaries/cuprated/src/rpc/bin.rs
Normal file
|
@ -0,0 +1,85 @@
|
|||
use anyhow::Error;
|
||||
|
||||
use cuprate_rpc_types::{
|
||||
bin::{
|
||||
BinRequest, BinResponse, GetBlocksByHeightRequest, GetBlocksByHeightResponse,
|
||||
GetBlocksRequest, GetBlocksResponse, GetHashesRequest, GetHashesResponse,
|
||||
GetOutputIndexesRequest, GetOutputIndexesResponse, GetOutsRequest, GetOutsResponse,
|
||||
GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse,
|
||||
},
|
||||
json::{GetOutputDistributionRequest, GetOutputDistributionResponse},
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandler;
|
||||
|
||||
/// Map a [`BinRequest`] to the function that will lead to a [`BinResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandler,
|
||||
request: BinRequest,
|
||||
) -> Result<BinResponse, Error> {
|
||||
use BinRequest as Req;
|
||||
use BinResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetBlocks(r) => Resp::GetBlocks(get_blocks(state, r).await?),
|
||||
Req::GetBlocksByHeight(r) => Resp::GetBlocksByHeight(get_blocks_by_height(state, r).await?),
|
||||
Req::GetHashes(r) => Resp::GetHashes(get_hashes(state, r).await?),
|
||||
Req::GetOutputIndexes(r) => Resp::GetOutputIndexes(get_output_indexes(state, r).await?),
|
||||
Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?),
|
||||
Req::GetTransactionPoolHashes(r) => {
|
||||
Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?)
|
||||
}
|
||||
Req::GetOutputDistribution(r) => {
|
||||
Resp::GetOutputDistribution(get_output_distribution(state, r).await?)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_blocks(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlocksRequest,
|
||||
) -> Result<GetBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_blocks_by_height(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlocksByHeightRequest,
|
||||
) -> Result<GetBlocksByHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_hashes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetHashesRequest,
|
||||
) -> Result<GetHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_indexes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutputIndexesRequest,
|
||||
) -> Result<GetOutputIndexesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_outs(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutsRequest,
|
||||
) -> Result<GetOutsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_hashes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolHashesRequest,
|
||||
) -> Result<GetTransactionPoolHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_distribution(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutputDistributionRequest,
|
||||
) -> Result<GetOutputDistributionResponse, Error> {
|
||||
todo!()
|
||||
}
|
5
binaries/cuprated/src/rpc/constants.rs
Normal file
5
binaries/cuprated/src/rpc/constants.rs
Normal file
|
@ -0,0 +1,5 @@
|
|||
//! Constants used within RPC.
|
||||
|
||||
/// The string message used in RPC response fields for when
|
||||
/// `cuprated` does not support a field that `monerod` has.
|
||||
pub(super) const FIELD_NOT_SUPPORTED: &str = "`cuprated` does not support this field.";
|
233
binaries/cuprated/src/rpc/handler.rs
Normal file
233
binaries/cuprated/src/rpc/handler.rs
Normal file
|
@ -0,0 +1,233 @@
|
|||
//! Dummy implementation of [`RpcHandler`].
|
||||
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::BoxFuture;
|
||||
use monero_serai::block::Block;
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::BlockChainContextService;
|
||||
use cuprate_pruning::PruningSeed;
|
||||
use cuprate_rpc_interface::RpcHandler;
|
||||
use cuprate_rpc_types::{
|
||||
bin::{BinRequest, BinResponse},
|
||||
json::{JsonRpcRequest, JsonRpcResponse},
|
||||
other::{OtherRequest, OtherResponse},
|
||||
};
|
||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
||||
use cuprate_types::{AddAuxPow, AuxPow, HardFork};
|
||||
|
||||
use crate::rpc::{bin, json, other};
|
||||
|
||||
/// TODO: use real type when public.
|
||||
#[derive(Clone)]
|
||||
#[expect(clippy::large_enum_variant)]
|
||||
pub enum BlockchainManagerRequest {
|
||||
/// Pop blocks off the top of the blockchain.
|
||||
///
|
||||
/// Input is the amount of blocks to pop.
|
||||
PopBlocks { amount: usize },
|
||||
|
||||
/// Start pruning the blockchain.
|
||||
Prune,
|
||||
|
||||
/// Is the blockchain pruned?
|
||||
Pruned,
|
||||
|
||||
/// Relay a block to the network.
|
||||
RelayBlock(Block),
|
||||
|
||||
/// Is the blockchain in the middle of syncing?
|
||||
///
|
||||
/// This returning `false` does not necessarily
|
||||
/// mean [`BlockchainManagerRequest::Synced`] will
|
||||
/// return `true`, for example, if the network has been
|
||||
/// cut off and we have no peers, this will return `false`,
|
||||
/// however, [`BlockchainManagerRequest::Synced`] may return
|
||||
/// `true` if the latest known chain tip is equal to our height.
|
||||
Syncing,
|
||||
|
||||
/// Is the blockchain fully synced?
|
||||
Synced,
|
||||
|
||||
/// Current target block time.
|
||||
Target,
|
||||
|
||||
/// The height of the next block in the chain.
|
||||
TargetHeight,
|
||||
|
||||
/// Generate new blocks.
|
||||
///
|
||||
/// This request is only for regtest, see RPC's `generateblocks`.
|
||||
GenerateBlocks {
|
||||
/// Number of the blocks to be generated.
|
||||
amount_of_blocks: u64,
|
||||
/// The previous block's hash.
|
||||
prev_block: [u8; 32],
|
||||
/// The starting value for the nonce.
|
||||
starting_nonce: u32,
|
||||
/// The address that will receive the coinbase reward.
|
||||
wallet_address: String,
|
||||
},
|
||||
|
||||
// // TODO: the below requests actually belong to the block downloader/syncer:
|
||||
// // <https://github.com/Cuprate/cuprate/pull/320#discussion_r1811089758>
|
||||
// /// Get [`Span`] data.
|
||||
// ///
|
||||
// /// This is data that describes an active downloading process,
|
||||
// /// if we are fully synced, this will return an empty [`Vec`].
|
||||
// Spans,
|
||||
|
||||
//
|
||||
/// Get the next [`PruningSeed`] needed for a pruned sync.
|
||||
NextNeededPruningSeed,
|
||||
}
|
||||
|
||||
/// TODO: use real type when public.
|
||||
#[derive(Clone)]
|
||||
pub enum BlockchainManagerResponse {
|
||||
/// General OK response.
|
||||
///
|
||||
/// Response to:
|
||||
/// - [`BlockchainManagerRequest::Prune`]
|
||||
/// - [`BlockchainManagerRequest::RelayBlock`]
|
||||
Ok,
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::PopBlocks`]
|
||||
PopBlocks { new_height: usize },
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Prune`]
|
||||
Prune(PruningSeed),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Pruned`]
|
||||
Pruned(bool),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Syncing`]
|
||||
Syncing(bool),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Synced`]
|
||||
Synced(bool),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Target`]
|
||||
Target(std::time::Duration),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::TargetHeight`]
|
||||
TargetHeight { height: usize },
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::GenerateBlocks`]
|
||||
GenerateBlocks {
|
||||
/// Hashes of the blocks generated.
|
||||
blocks: Vec<[u8; 32]>,
|
||||
/// The new top height. (TODO: is this correct?)
|
||||
height: usize,
|
||||
},
|
||||
|
||||
// /// Response to [`BlockchainManagerRequest::Spans`].
|
||||
// Spans(Vec<Span<Z::Addr>>),
|
||||
/// Response to [`BlockchainManagerRequest::NextNeededPruningSeed`].
|
||||
NextNeededPruningSeed(PruningSeed),
|
||||
}
|
||||
|
||||
/// TODO: use real type when public.
|
||||
pub type BlockchainManagerHandle = cuprate_database_service::DatabaseReadService<
|
||||
BlockchainManagerRequest,
|
||||
BlockchainManagerResponse,
|
||||
>;
|
||||
|
||||
/// TODO
|
||||
#[derive(Clone)]
|
||||
pub struct CupratedRpcHandler {
|
||||
/// Should this RPC server be [restricted](RpcHandler::restricted)?
|
||||
///
|
||||
/// This is not `pub` on purpose, as it should not be mutated after [`Self::new`].
|
||||
restricted: bool,
|
||||
|
||||
/// Read handle to the blockchain database.
|
||||
pub blockchain_read: BlockchainReadHandle,
|
||||
|
||||
/// Handle to the blockchain context service.
|
||||
pub blockchain_context: BlockChainContextService,
|
||||
|
||||
/// Handle to the blockchain manager.
|
||||
pub blockchain_manager: BlockchainManagerHandle,
|
||||
|
||||
/// Read handle to the transaction pool database.
|
||||
pub txpool_read: TxpoolReadHandle,
|
||||
|
||||
/// TODO: handle to txpool service.
|
||||
pub txpool_manager: std::convert::Infallible,
|
||||
}
|
||||
|
||||
impl CupratedRpcHandler {
|
||||
/// Create a new [`Self`].
|
||||
pub const fn new(
|
||||
restricted: bool,
|
||||
blockchain_read: BlockchainReadHandle,
|
||||
blockchain_context: BlockChainContextService,
|
||||
blockchain_manager: BlockchainManagerHandle,
|
||||
txpool_read: TxpoolReadHandle,
|
||||
txpool_manager: std::convert::Infallible,
|
||||
) -> Self {
|
||||
Self {
|
||||
restricted,
|
||||
blockchain_read,
|
||||
blockchain_context,
|
||||
blockchain_manager,
|
||||
txpool_read,
|
||||
txpool_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RpcHandler for CupratedRpcHandler {
|
||||
fn restricted(&self) -> bool {
|
||||
self.restricted
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<JsonRpcRequest> for CupratedRpcHandler {
|
||||
type Response = JsonRpcResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<JsonRpcResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: JsonRpcRequest) -> Self::Future {
|
||||
let state = self.clone();
|
||||
Box::pin(json::map_request(state, request))
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<BinRequest> for CupratedRpcHandler {
|
||||
type Response = BinResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<BinResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: BinRequest) -> Self::Future {
|
||||
let state = self.clone();
|
||||
Box::pin(bin::map_request(state, request))
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<OtherRequest> for CupratedRpcHandler {
|
||||
type Response = OtherResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<OtherResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: OtherRequest) -> Self::Future {
|
||||
let state = self.clone();
|
||||
Box::pin(other::map_request(state, request))
|
||||
}
|
||||
}
|
294
binaries/cuprated/src/rpc/json.rs
Normal file
294
binaries/cuprated/src/rpc/json.rs
Normal file
|
@ -0,0 +1,294 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Error;
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_rpc_types::json::{
|
||||
AddAuxPowRequest, AddAuxPowResponse, BannedRequest, BannedResponse, CalcPowRequest,
|
||||
CalcPowResponse, FlushCacheRequest, FlushCacheResponse, FlushTransactionPoolRequest,
|
||||
FlushTransactionPoolResponse, GenerateBlocksRequest, GenerateBlocksResponse,
|
||||
GetAlternateChainsRequest, GetAlternateChainsResponse, GetBansRequest, GetBansResponse,
|
||||
GetBlockCountRequest, GetBlockCountResponse, GetBlockHeaderByHashRequest,
|
||||
GetBlockHeaderByHashResponse, GetBlockHeaderByHeightRequest, GetBlockHeaderByHeightResponse,
|
||||
GetBlockHeadersRangeRequest, GetBlockHeadersRangeResponse, GetBlockRequest, GetBlockResponse,
|
||||
GetCoinbaseTxSumRequest, GetCoinbaseTxSumResponse, GetConnectionsRequest,
|
||||
GetConnectionsResponse, GetFeeEstimateRequest, GetFeeEstimateResponse, GetInfoRequest,
|
||||
GetInfoResponse, GetLastBlockHeaderRequest, GetLastBlockHeaderResponse, GetMinerDataRequest,
|
||||
GetMinerDataResponse, GetOutputHistogramRequest, GetOutputHistogramResponse,
|
||||
GetTransactionPoolBacklogRequest, GetTransactionPoolBacklogResponse, GetTxIdsLooseRequest,
|
||||
GetTxIdsLooseResponse, GetVersionRequest, GetVersionResponse, HardForkInfoRequest,
|
||||
HardForkInfoResponse, JsonRpcRequest, JsonRpcResponse, OnGetBlockHashRequest,
|
||||
OnGetBlockHashResponse, PruneBlockchainRequest, PruneBlockchainResponse, RelayTxRequest,
|
||||
RelayTxResponse, SetBansRequest, SetBansResponse, SubmitBlockRequest, SubmitBlockResponse,
|
||||
SyncInfoRequest, SyncInfoResponse,
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandler;
|
||||
|
||||
/// Map a [`JsonRpcRequest`] to the function that will lead to a [`JsonRpcResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandler,
|
||||
request: JsonRpcRequest,
|
||||
) -> Result<JsonRpcResponse, Error> {
|
||||
use JsonRpcRequest as Req;
|
||||
use JsonRpcResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetBlockCount(r) => Resp::GetBlockCount(get_block_count(state, r).await?),
|
||||
Req::OnGetBlockHash(r) => Resp::OnGetBlockHash(on_get_block_hash(state, r).await?),
|
||||
Req::SubmitBlock(r) => Resp::SubmitBlock(submit_block(state, r).await?),
|
||||
Req::GenerateBlocks(r) => Resp::GenerateBlocks(generate_blocks(state, r).await?),
|
||||
Req::GetLastBlockHeader(r) => {
|
||||
Resp::GetLastBlockHeader(get_last_block_header(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeaderByHash(r) => {
|
||||
Resp::GetBlockHeaderByHash(get_block_header_by_hash(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeaderByHeight(r) => {
|
||||
Resp::GetBlockHeaderByHeight(get_block_header_by_height(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeadersRange(r) => {
|
||||
Resp::GetBlockHeadersRange(get_block_headers_range(state, r).await?)
|
||||
}
|
||||
Req::GetBlock(r) => Resp::GetBlock(get_block(state, r).await?),
|
||||
Req::GetConnections(r) => Resp::GetConnections(get_connections(state, r).await?),
|
||||
Req::GetInfo(r) => Resp::GetInfo(get_info(state, r).await?),
|
||||
Req::HardForkInfo(r) => Resp::HardForkInfo(hard_fork_info(state, r).await?),
|
||||
Req::SetBans(r) => Resp::SetBans(set_bans(state, r).await?),
|
||||
Req::GetBans(r) => Resp::GetBans(get_bans(state, r).await?),
|
||||
Req::Banned(r) => Resp::Banned(banned(state, r).await?),
|
||||
Req::FlushTransactionPool(r) => {
|
||||
Resp::FlushTransactionPool(flush_transaction_pool(state, r).await?)
|
||||
}
|
||||
Req::GetOutputHistogram(r) => {
|
||||
Resp::GetOutputHistogram(get_output_histogram(state, r).await?)
|
||||
}
|
||||
Req::GetCoinbaseTxSum(r) => Resp::GetCoinbaseTxSum(get_coinbase_tx_sum(state, r).await?),
|
||||
Req::GetVersion(r) => Resp::GetVersion(get_version(state, r).await?),
|
||||
Req::GetFeeEstimate(r) => Resp::GetFeeEstimate(get_fee_estimate(state, r).await?),
|
||||
Req::GetAlternateChains(r) => {
|
||||
Resp::GetAlternateChains(get_alternate_chains(state, r).await?)
|
||||
}
|
||||
Req::RelayTx(r) => Resp::RelayTx(relay_tx(state, r).await?),
|
||||
Req::SyncInfo(r) => Resp::SyncInfo(sync_info(state, r).await?),
|
||||
Req::GetTransactionPoolBacklog(r) => {
|
||||
Resp::GetTransactionPoolBacklog(get_transaction_pool_backlog(state, r).await?)
|
||||
}
|
||||
Req::GetMinerData(r) => Resp::GetMinerData(get_miner_data(state, r).await?),
|
||||
Req::PruneBlockchain(r) => Resp::PruneBlockchain(prune_blockchain(state, r).await?),
|
||||
Req::CalcPow(r) => Resp::CalcPow(calc_pow(state, r).await?),
|
||||
Req::FlushCache(r) => Resp::FlushCache(flush_cache(state, r).await?),
|
||||
Req::AddAuxPow(r) => Resp::AddAuxPow(add_aux_pow(state, r).await?),
|
||||
Req::GetTxIdsLoose(r) => Resp::GetTxIdsLoose(get_tx_ids_loose(state, r).await?),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_block_count(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockCountRequest,
|
||||
) -> Result<GetBlockCountResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn on_get_block_hash(
|
||||
state: CupratedRpcHandler,
|
||||
request: OnGetBlockHashRequest,
|
||||
) -> Result<OnGetBlockHashResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn submit_block(
|
||||
state: CupratedRpcHandler,
|
||||
request: SubmitBlockRequest,
|
||||
) -> Result<SubmitBlockResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn generate_blocks(
|
||||
state: CupratedRpcHandler,
|
||||
request: GenerateBlocksRequest,
|
||||
) -> Result<GenerateBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_last_block_header(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetLastBlockHeaderRequest,
|
||||
) -> Result<GetLastBlockHeaderResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_header_by_hash(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockHeaderByHashRequest,
|
||||
) -> Result<GetBlockHeaderByHashResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_header_by_height(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockHeaderByHeightRequest,
|
||||
) -> Result<GetBlockHeaderByHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_headers_range(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockHeadersRangeRequest,
|
||||
) -> Result<GetBlockHeadersRangeResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockRequest,
|
||||
) -> Result<GetBlockResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_connections(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetConnectionsRequest,
|
||||
) -> Result<GetConnectionsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_info(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetInfoRequest,
|
||||
) -> Result<GetInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn hard_fork_info(
|
||||
state: CupratedRpcHandler,
|
||||
request: HardForkInfoRequest,
|
||||
) -> Result<HardForkInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_bans(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetBansRequest,
|
||||
) -> Result<SetBansResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_bans(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBansRequest,
|
||||
) -> Result<GetBansResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn banned(
|
||||
state: CupratedRpcHandler,
|
||||
request: BannedRequest,
|
||||
) -> Result<BannedResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn flush_transaction_pool(
|
||||
state: CupratedRpcHandler,
|
||||
request: FlushTransactionPoolRequest,
|
||||
) -> Result<FlushTransactionPoolResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_histogram(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutputHistogramRequest,
|
||||
) -> Result<GetOutputHistogramResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_coinbase_tx_sum(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetCoinbaseTxSumRequest,
|
||||
) -> Result<GetCoinbaseTxSumResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_version(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetVersionRequest,
|
||||
) -> Result<GetVersionResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_fee_estimate(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetFeeEstimateRequest,
|
||||
) -> Result<GetFeeEstimateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_alternate_chains(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetAlternateChainsRequest,
|
||||
) -> Result<GetAlternateChainsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn relay_tx(
|
||||
state: CupratedRpcHandler,
|
||||
request: RelayTxRequest,
|
||||
) -> Result<RelayTxResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn sync_info(
|
||||
state: CupratedRpcHandler,
|
||||
request: SyncInfoRequest,
|
||||
) -> Result<SyncInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_backlog(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolBacklogRequest,
|
||||
) -> Result<GetTransactionPoolBacklogResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_miner_data(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetMinerDataRequest,
|
||||
) -> Result<GetMinerDataResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn prune_blockchain(
|
||||
state: CupratedRpcHandler,
|
||||
request: PruneBlockchainRequest,
|
||||
) -> Result<PruneBlockchainResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn calc_pow(
|
||||
state: CupratedRpcHandler,
|
||||
request: CalcPowRequest,
|
||||
) -> Result<CalcPowResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn flush_cache(
|
||||
state: CupratedRpcHandler,
|
||||
request: FlushCacheRequest,
|
||||
) -> Result<FlushCacheResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn add_aux_pow(
|
||||
state: CupratedRpcHandler,
|
||||
request: AddAuxPowRequest,
|
||||
) -> Result<AddAuxPowResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_tx_ids_loose(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTxIdsLooseRequest,
|
||||
) -> Result<GetTxIdsLooseResponse, Error> {
|
||||
todo!()
|
||||
}
|
260
binaries/cuprated/src/rpc/other.rs
Normal file
260
binaries/cuprated/src/rpc/other.rs
Normal file
|
@ -0,0 +1,260 @@
|
|||
use anyhow::Error;
|
||||
|
||||
use cuprate_rpc_types::other::{
|
||||
GetAltBlocksHashesRequest, GetAltBlocksHashesResponse, GetHeightRequest, GetHeightResponse,
|
||||
GetLimitRequest, GetLimitResponse, GetNetStatsRequest, GetNetStatsResponse, GetOutsRequest,
|
||||
GetOutsResponse, GetPeerListRequest, GetPeerListResponse, GetPublicNodesRequest,
|
||||
GetPublicNodesResponse, GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse,
|
||||
GetTransactionPoolRequest, GetTransactionPoolResponse, GetTransactionPoolStatsRequest,
|
||||
GetTransactionPoolStatsResponse, GetTransactionsRequest, GetTransactionsResponse,
|
||||
InPeersRequest, InPeersResponse, IsKeyImageSpentRequest, IsKeyImageSpentResponse,
|
||||
MiningStatusRequest, MiningStatusResponse, OtherRequest, OtherResponse, OutPeersRequest,
|
||||
OutPeersResponse, PopBlocksRequest, PopBlocksResponse, SaveBcRequest, SaveBcResponse,
|
||||
SendRawTransactionRequest, SendRawTransactionResponse, SetBootstrapDaemonRequest,
|
||||
SetBootstrapDaemonResponse, SetLimitRequest, SetLimitResponse, SetLogCategoriesRequest,
|
||||
SetLogCategoriesResponse, SetLogHashRateRequest, SetLogHashRateResponse, SetLogLevelRequest,
|
||||
SetLogLevelResponse, StartMiningRequest, StartMiningResponse, StopDaemonRequest,
|
||||
StopDaemonResponse, StopMiningRequest, StopMiningResponse, UpdateRequest, UpdateResponse,
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandler;
|
||||
|
||||
/// Map a [`OtherRequest`] to the function that will lead to a [`OtherResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandler,
|
||||
request: OtherRequest,
|
||||
) -> Result<OtherResponse, Error> {
|
||||
use OtherRequest as Req;
|
||||
use OtherResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetHeight(r) => Resp::GetHeight(get_height(state, r).await?),
|
||||
Req::GetTransactions(r) => Resp::GetTransactions(get_transactions(state, r).await?),
|
||||
Req::GetAltBlocksHashes(r) => {
|
||||
Resp::GetAltBlocksHashes(get_alt_blocks_hashes(state, r).await?)
|
||||
}
|
||||
Req::IsKeyImageSpent(r) => Resp::IsKeyImageSpent(is_key_image_spent(state, r).await?),
|
||||
Req::SendRawTransaction(r) => {
|
||||
Resp::SendRawTransaction(send_raw_transaction(state, r).await?)
|
||||
}
|
||||
Req::StartMining(r) => Resp::StartMining(start_mining(state, r).await?),
|
||||
Req::StopMining(r) => Resp::StopMining(stop_mining(state, r).await?),
|
||||
Req::MiningStatus(r) => Resp::MiningStatus(mining_status(state, r).await?),
|
||||
Req::SaveBc(r) => Resp::SaveBc(save_bc(state, r).await?),
|
||||
Req::GetPeerList(r) => Resp::GetPeerList(get_peer_list(state, r).await?),
|
||||
Req::SetLogHashRate(r) => Resp::SetLogHashRate(set_log_hash_rate(state, r).await?),
|
||||
Req::SetLogLevel(r) => Resp::SetLogLevel(set_log_level(state, r).await?),
|
||||
Req::SetLogCategories(r) => Resp::SetLogCategories(set_log_categories(state, r).await?),
|
||||
Req::SetBootstrapDaemon(r) => {
|
||||
Resp::SetBootstrapDaemon(set_bootstrap_daemon(state, r).await?)
|
||||
}
|
||||
Req::GetTransactionPool(r) => {
|
||||
Resp::GetTransactionPool(get_transaction_pool(state, r).await?)
|
||||
}
|
||||
Req::GetTransactionPoolStats(r) => {
|
||||
Resp::GetTransactionPoolStats(get_transaction_pool_stats(state, r).await?)
|
||||
}
|
||||
Req::StopDaemon(r) => Resp::StopDaemon(stop_daemon(state, r).await?),
|
||||
Req::GetLimit(r) => Resp::GetLimit(get_limit(state, r).await?),
|
||||
Req::SetLimit(r) => Resp::SetLimit(set_limit(state, r).await?),
|
||||
Req::OutPeers(r) => Resp::OutPeers(out_peers(state, r).await?),
|
||||
Req::InPeers(r) => Resp::InPeers(in_peers(state, r).await?),
|
||||
Req::GetNetStats(r) => Resp::GetNetStats(get_net_stats(state, r).await?),
|
||||
Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?),
|
||||
Req::Update(r) => Resp::Update(update(state, r).await?),
|
||||
Req::PopBlocks(r) => Resp::PopBlocks(pop_blocks(state, r).await?),
|
||||
Req::GetTransactionPoolHashes(r) => {
|
||||
Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?)
|
||||
}
|
||||
Req::GetPublicNodes(r) => Resp::GetPublicNodes(get_public_nodes(state, r).await?),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_height(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetHeightRequest,
|
||||
) -> Result<GetHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transactions(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionsRequest,
|
||||
) -> Result<GetTransactionsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_alt_blocks_hashes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetAltBlocksHashesRequest,
|
||||
) -> Result<GetAltBlocksHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn is_key_image_spent(
|
||||
state: CupratedRpcHandler,
|
||||
request: IsKeyImageSpentRequest,
|
||||
) -> Result<IsKeyImageSpentResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn send_raw_transaction(
|
||||
state: CupratedRpcHandler,
|
||||
request: SendRawTransactionRequest,
|
||||
) -> Result<SendRawTransactionResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn start_mining(
|
||||
state: CupratedRpcHandler,
|
||||
request: StartMiningRequest,
|
||||
) -> Result<StartMiningResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn stop_mining(
|
||||
state: CupratedRpcHandler,
|
||||
request: StopMiningRequest,
|
||||
) -> Result<StopMiningResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn mining_status(
|
||||
state: CupratedRpcHandler,
|
||||
request: MiningStatusRequest,
|
||||
) -> Result<MiningStatusResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn save_bc(
|
||||
state: CupratedRpcHandler,
|
||||
request: SaveBcRequest,
|
||||
) -> Result<SaveBcResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_peer_list(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetPeerListRequest,
|
||||
) -> Result<GetPeerListResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_hash_rate(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetLogHashRateRequest,
|
||||
) -> Result<SetLogHashRateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_level(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetLogLevelRequest,
|
||||
) -> Result<SetLogLevelResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_categories(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetLogCategoriesRequest,
|
||||
) -> Result<SetLogCategoriesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_bootstrap_daemon(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetBootstrapDaemonRequest,
|
||||
) -> Result<SetBootstrapDaemonResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolRequest,
|
||||
) -> Result<GetTransactionPoolResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_stats(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolStatsRequest,
|
||||
) -> Result<GetTransactionPoolStatsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn stop_daemon(
|
||||
state: CupratedRpcHandler,
|
||||
request: StopDaemonRequest,
|
||||
) -> Result<StopDaemonResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_limit(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetLimitRequest,
|
||||
) -> Result<GetLimitResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_limit(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetLimitRequest,
|
||||
) -> Result<SetLimitResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn out_peers(
|
||||
state: CupratedRpcHandler,
|
||||
request: OutPeersRequest,
|
||||
) -> Result<OutPeersResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn in_peers(
|
||||
state: CupratedRpcHandler,
|
||||
request: InPeersRequest,
|
||||
) -> Result<InPeersResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_net_stats(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetNetStatsRequest,
|
||||
) -> Result<GetNetStatsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_outs(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutsRequest,
|
||||
) -> Result<GetOutsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn update(
|
||||
state: CupratedRpcHandler,
|
||||
request: UpdateRequest,
|
||||
) -> Result<UpdateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn pop_blocks(
|
||||
state: CupratedRpcHandler,
|
||||
request: PopBlocksRequest,
|
||||
) -> Result<PopBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_hashes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolHashesRequest,
|
||||
) -> Result<GetTransactionPoolHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_public_nodes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetPublicNodesRequest,
|
||||
) -> Result<GetPublicNodesResponse, Error> {
|
||||
todo!()
|
||||
}
|
19
binaries/cuprated/src/rpc/request.rs
Normal file
19
binaries/cuprated/src/rpc/request.rs
Normal file
|
@ -0,0 +1,19 @@
|
|||
//! Convenience functions for requests/responses.
|
||||
//!
|
||||
//! This module implements many methods for
|
||||
//! [`CupratedRpcHandler`](crate::rpc::CupratedRpcHandler)
|
||||
//! that are simple wrappers around the request/response API provided
|
||||
//! by the multiple [`tower::Service`]s.
|
||||
//!
|
||||
//! These exist to prevent noise like `unreachable!()`
|
||||
//! from being everywhere in the actual handler functions.
|
||||
//!
|
||||
//! Each module implements methods for a specific API, e.g.
|
||||
//! the [`blockchain`] modules contains methods for the
|
||||
//! blockchain database [`tower::Service`] API.
|
||||
|
||||
mod address_book;
|
||||
mod blockchain;
|
||||
mod blockchain_context;
|
||||
mod blockchain_manager;
|
||||
mod txpool;
|
168
binaries/cuprated/src/rpc/request/address_book.rs
Normal file
168
binaries/cuprated/src/rpc/request/address_book.rs
Normal file
|
@ -0,0 +1,168 @@
|
|||
//! Functions for TODO: doc enum message.
|
||||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::{anyhow, Error};
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_p2p_core::{
|
||||
services::{AddressBookRequest, AddressBookResponse},
|
||||
types::{BanState, ConnectionId},
|
||||
AddressBook, NetworkZone,
|
||||
};
|
||||
use cuprate_pruning::PruningSeed;
|
||||
use cuprate_rpc_types::misc::{ConnectionInfo, Span};
|
||||
|
||||
use crate::rpc::constants::FIELD_NOT_SUPPORTED;
|
||||
|
||||
// FIXME: use `anyhow::Error` over `tower::BoxError` in address book.
|
||||
|
||||
/// [`AddressBookRequest::PeerlistSize`]
|
||||
pub(crate) async fn peerlist_size<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let AddressBookResponse::PeerlistSize { white, grey } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::PeerlistSize)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((usize_to_u64(white), usize_to_u64(grey)))
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::ConnectionInfo`]
|
||||
pub(crate) async fn connection_info<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<Vec<ConnectionInfo>, Error> {
|
||||
let AddressBookResponse::ConnectionInfo(vec) = address_book
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::ConnectionInfo)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
// FIXME: impl this map somewhere instead of inline.
|
||||
let vec = vec
|
||||
.into_iter()
|
||||
.map(|info| {
|
||||
let (ip, port) = match info.socket_addr {
|
||||
Some(socket) => (socket.ip().to_string(), socket.port().to_string()),
|
||||
None => (String::new(), String::new()),
|
||||
};
|
||||
|
||||
ConnectionInfo {
|
||||
address: info.address.to_string(),
|
||||
address_type: info.address_type,
|
||||
avg_download: info.avg_download,
|
||||
avg_upload: info.avg_upload,
|
||||
connection_id: String::from(ConnectionId::DEFAULT_STR),
|
||||
current_download: info.current_download,
|
||||
current_upload: info.current_upload,
|
||||
height: info.height,
|
||||
host: info.host,
|
||||
incoming: info.incoming,
|
||||
ip,
|
||||
live_time: info.live_time,
|
||||
localhost: info.localhost,
|
||||
local_ip: info.local_ip,
|
||||
peer_id: hex::encode(info.peer_id.to_ne_bytes()),
|
||||
port,
|
||||
pruning_seed: info.pruning_seed.compress(),
|
||||
recv_count: info.recv_count,
|
||||
recv_idle_time: info.recv_idle_time,
|
||||
rpc_credits_per_hash: info.rpc_credits_per_hash,
|
||||
rpc_port: info.rpc_port,
|
||||
send_count: info.send_count,
|
||||
send_idle_time: info.send_idle_time,
|
||||
state: info.state,
|
||||
support_flags: info.support_flags,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::ConnectionCount`]
|
||||
pub(crate) async fn connection_count<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let AddressBookResponse::ConnectionCount { incoming, outgoing } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::ConnectionCount)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((usize_to_u64(incoming), usize_to_u64(outgoing)))
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::SetBan`]
|
||||
pub(crate) async fn set_ban<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
set_ban: cuprate_p2p_core::types::SetBan<Z::Addr>,
|
||||
) -> Result<(), Error> {
|
||||
let AddressBookResponse::Ok = address_book
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::SetBan(set_ban))
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::GetBan`]
|
||||
pub(crate) async fn get_ban<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
peer: Z::Addr,
|
||||
) -> Result<Option<std::time::Instant>, Error> {
|
||||
let AddressBookResponse::GetBan { unban_instant } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::GetBan(peer))
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(unban_instant)
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::GetBans`]
|
||||
pub(crate) async fn get_bans<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<Vec<BanState<Z::Addr>>, Error> {
|
||||
let AddressBookResponse::GetBans(bans) = address_book
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::GetBans)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(bans)
|
||||
}
|
377
binaries/cuprated/src/rpc/request/blockchain.rs
Normal file
377
binaries/cuprated/src/rpc/request/blockchain.rs
Normal file
|
@ -0,0 +1,377 @@
|
|||
//! Functions for [`BlockchainReadRequest`].
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
ops::Range,
|
||||
};
|
||||
|
||||
use anyhow::Error;
|
||||
use monero_serai::block::Block;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::{service::BlockchainReadHandle, types::AltChainInfo};
|
||||
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, ChainInfo, CoinbaseTxSum, ExtendedBlockHeader, HardFork, MinerData,
|
||||
OutputHistogramEntry, OutputHistogramInput, OutputOnChain,
|
||||
};
|
||||
|
||||
/// [`BlockchainReadRequest::Block`].
|
||||
pub(crate) async fn block(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
height: u64,
|
||||
) -> Result<Block, Error> {
|
||||
let BlockchainResponse::Block(block) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::Block {
|
||||
height: u64_to_usize(height),
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockByHash`].
|
||||
pub(crate) async fn block_by_hash(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
hash: [u8; 32],
|
||||
) -> Result<Block, Error> {
|
||||
let BlockchainResponse::Block(block) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockByHash(hash))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockExtendedHeader`].
|
||||
pub(crate) async fn block_extended_header(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
height: u64,
|
||||
) -> Result<ExtendedBlockHeader, Error> {
|
||||
let BlockchainResponse::BlockExtendedHeader(header) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockExtendedHeader(u64_to_usize(
|
||||
height,
|
||||
)))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockHash`].
|
||||
pub(crate) async fn block_hash(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
height: u64,
|
||||
chain: Chain,
|
||||
) -> Result<[u8; 32], Error> {
|
||||
let BlockchainResponse::BlockHash(hash) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockHash(
|
||||
u64_to_usize(height),
|
||||
chain,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FindBlock`].
|
||||
pub(crate) async fn find_block(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
block_hash: [u8; 32],
|
||||
) -> Result<Option<(Chain, usize)>, Error> {
|
||||
let BlockchainResponse::FindBlock(option) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::FindBlock(block_hash))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(option)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FilterUnknownHashes`].
|
||||
pub(crate) async fn filter_unknown_hashes(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
block_hashes: HashSet<[u8; 32]>,
|
||||
) -> Result<HashSet<[u8; 32]>, Error> {
|
||||
let BlockchainResponse::FilterUnknownHashes(output) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::FilterUnknownHashes(block_hashes))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockExtendedHeaderInRange`]
|
||||
pub(crate) async fn block_extended_header_in_range(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
range: Range<usize>,
|
||||
chain: Chain,
|
||||
) -> Result<Vec<ExtendedBlockHeader>, Error> {
|
||||
let BlockchainResponse::BlockExtendedHeaderInRange(output) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockExtendedHeaderInRange(
|
||||
range, chain,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::ChainHeight`].
|
||||
pub(crate) async fn chain_height(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<(u64, [u8; 32]), Error> {
|
||||
let BlockchainResponse::ChainHeight(height, hash) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((usize_to_u64(height), hash))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::GeneratedCoins`].
|
||||
pub(crate) async fn generated_coins(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
block_height: u64,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainResponse::GeneratedCoins(generated_coins) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::GeneratedCoins(u64_to_usize(
|
||||
block_height,
|
||||
)))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(generated_coins)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::Outputs`]
|
||||
pub(crate) async fn outputs(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
outputs: HashMap<u64, HashSet<u64>>,
|
||||
) -> Result<HashMap<u64, HashMap<u64, OutputOnChain>>, Error> {
|
||||
let BlockchainResponse::Outputs(outputs) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::Outputs(outputs))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::NumberOutputsWithAmount`]
|
||||
pub(crate) async fn number_outputs_with_amount(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
output_amounts: Vec<u64>,
|
||||
) -> Result<HashMap<u64, usize>, Error> {
|
||||
let BlockchainResponse::NumberOutputsWithAmount(map) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::NumberOutputsWithAmount(
|
||||
output_amounts,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::KeyImagesSpent`]
|
||||
pub(crate) async fn key_images_spent(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
key_images: HashSet<[u8; 32]>,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainResponse::KeyImagesSpent(is_spent) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::KeyImagesSpent(key_images))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(is_spent)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::CompactChainHistory`]
|
||||
pub(crate) async fn compact_chain_history(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<(Vec<[u8; 32]>, u128), Error> {
|
||||
let BlockchainResponse::CompactChainHistory {
|
||||
block_ids,
|
||||
cumulative_difficulty,
|
||||
} = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::CompactChainHistory)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((block_ids, cumulative_difficulty))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FindFirstUnknown`]
|
||||
pub(crate) async fn find_first_unknown(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
hashes: Vec<[u8; 32]>,
|
||||
) -> Result<Option<(usize, u64)>, Error> {
|
||||
let BlockchainResponse::FindFirstUnknown(resp) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::FindFirstUnknown(hashes))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(resp.map(|(index, height)| (index, usize_to_u64(height))))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::TotalTxCount`]
|
||||
pub(crate) async fn total_tx_count(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainResponse::TotalTxCount(tx_count) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::TotalTxCount)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(tx_count))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::DatabaseSize`]
|
||||
pub(crate) async fn database_size(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let BlockchainResponse::DatabaseSize {
|
||||
database_size,
|
||||
free_space,
|
||||
} = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::DatabaseSize)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((database_size, free_space))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::OutputHistogram`]
|
||||
pub(crate) async fn output_histogram(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
input: OutputHistogramInput,
|
||||
) -> Result<Vec<OutputHistogramEntry>, Error> {
|
||||
let BlockchainResponse::OutputHistogram(histogram) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::OutputHistogram(input))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(histogram)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::CoinbaseTxSum`]
|
||||
pub(crate) async fn coinbase_tx_sum(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
height: u64,
|
||||
count: u64,
|
||||
) -> Result<CoinbaseTxSum, Error> {
|
||||
let BlockchainResponse::CoinbaseTxSum(sum) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::CoinbaseTxSum {
|
||||
height: u64_to_usize(height),
|
||||
count,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(sum)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::AltChains`]
|
||||
pub(crate) async fn alt_chains(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<Vec<ChainInfo>, Error> {
|
||||
let BlockchainResponse::AltChains(vec) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::AltChains)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::AltChainCount`]
|
||||
pub(crate) async fn alt_chain_count(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainResponse::AltChainCount(count) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::AltChainCount)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(count))
|
||||
}
|
99
binaries/cuprated/src/rpc/request/blockchain_context.rs
Normal file
99
binaries/cuprated/src/rpc/request/blockchain_context.rs
Normal file
|
@ -0,0 +1,99 @@
|
|||
//! Functions for [`BlockChainContextRequest`] and [`BlockChainContextResponse`].
|
||||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::{anyhow, Error};
|
||||
use monero_serai::block::Block;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_consensus_context::{
|
||||
BlockChainContext, BlockChainContextRequest, BlockChainContextResponse,
|
||||
BlockChainContextService,
|
||||
};
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
use cuprate_types::{FeeEstimate, HardFork, HardForkInfo};
|
||||
|
||||
// FIXME: use `anyhow::Error` over `tower::BoxError` in blockchain context.
|
||||
|
||||
/// [`BlockChainContextRequest::Context`].
|
||||
pub(crate) async fn context(
|
||||
blockchain_context: &mut BlockChainContextService,
|
||||
) -> Result<BlockChainContext, Error> {
|
||||
let BlockChainContextResponse::Context(context) = blockchain_context
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(context)
|
||||
}
|
||||
|
||||
/// [`BlockChainContextRequest::HardForkInfo`].
|
||||
pub(crate) async fn hard_fork_info(
|
||||
blockchain_context: &mut BlockChainContextService,
|
||||
hard_fork: HardFork,
|
||||
) -> Result<HardForkInfo, Error> {
|
||||
let BlockChainContextResponse::HardForkInfo(hf_info) = blockchain_context
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(BlockChainContextRequest::HardForkInfo(hard_fork))
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(hf_info)
|
||||
}
|
||||
|
||||
/// [`BlockChainContextRequest::FeeEstimate`].
|
||||
pub(crate) async fn fee_estimate(
|
||||
blockchain_context: &mut BlockChainContextService,
|
||||
grace_blocks: u64,
|
||||
) -> Result<FeeEstimate, Error> {
|
||||
let BlockChainContextResponse::FeeEstimate(fee) = blockchain_context
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(BlockChainContextRequest::FeeEstimate { grace_blocks })
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(fee)
|
||||
}
|
||||
|
||||
/// [`BlockChainContextRequest::CalculatePow`]
|
||||
pub(crate) async fn calculate_pow(
|
||||
blockchain_context: &mut BlockChainContextService,
|
||||
hardfork: HardFork,
|
||||
height: u64,
|
||||
block: Box<Block>,
|
||||
seed_hash: [u8; 32],
|
||||
) -> Result<[u8; 32], Error> {
|
||||
let BlockChainContextResponse::CalculatePow(hash) = blockchain_context
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(BlockChainContextRequest::CalculatePow {
|
||||
hardfork,
|
||||
height: u64_to_usize(height),
|
||||
block,
|
||||
seed_hash,
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(hash)
|
||||
}
|
221
binaries/cuprated/src/rpc/request/blockchain_manager.rs
Normal file
221
binaries/cuprated/src/rpc/request/blockchain_manager.rs
Normal file
|
@ -0,0 +1,221 @@
|
|||
//! Functions for [`BlockchainManagerRequest`] & [`BlockchainManagerResponse`].
|
||||
|
||||
use anyhow::Error;
|
||||
use monero_serai::block::Block;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
|
||||
use cuprate_p2p_core::{types::ConnectionId, NetworkZone};
|
||||
use cuprate_pruning::PruningSeed;
|
||||
use cuprate_rpc_types::misc::Span;
|
||||
use cuprate_types::{AddAuxPow, AuxPow, HardFork};
|
||||
|
||||
use crate::rpc::{
|
||||
constants::FIELD_NOT_SUPPORTED,
|
||||
handler::{BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse},
|
||||
};
|
||||
|
||||
/// [`BlockchainManagerRequest::PopBlocks`]
|
||||
pub(crate) async fn pop_blocks(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
amount: u64,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainManagerResponse::PopBlocks { new_height } = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::PopBlocks {
|
||||
amount: u64_to_usize(amount),
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(new_height))
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Prune`]
|
||||
pub(crate) async fn prune(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<PruningSeed, Error> {
|
||||
let BlockchainManagerResponse::Prune(seed) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Prune)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(seed)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Pruned`]
|
||||
pub(crate) async fn pruned(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Pruned(pruned) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Pruned)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(pruned)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::RelayBlock`]
|
||||
pub(crate) async fn relay_block(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
block: Block,
|
||||
) -> Result<(), Error> {
|
||||
let BlockchainManagerResponse::Ok = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::RelayBlock(block))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Syncing`]
|
||||
pub(crate) async fn syncing(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Syncing(syncing) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Syncing)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(syncing)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Synced`]
|
||||
pub(crate) async fn synced(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Synced(syncing) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Synced)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(syncing)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Target`]
|
||||
pub(crate) async fn target(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<std::time::Duration, Error> {
|
||||
let BlockchainManagerResponse::Target(target) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Target)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(target)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::TargetHeight`]
|
||||
pub(crate) async fn target_height(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainManagerResponse::TargetHeight { height } = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::TargetHeight)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(height))
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::GenerateBlocks`]
|
||||
pub(crate) async fn generate_blocks(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
amount_of_blocks: u64,
|
||||
prev_block: [u8; 32],
|
||||
starting_nonce: u32,
|
||||
wallet_address: String,
|
||||
) -> Result<(Vec<[u8; 32]>, u64), Error> {
|
||||
let BlockchainManagerResponse::GenerateBlocks { blocks, height } = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::GenerateBlocks {
|
||||
amount_of_blocks,
|
||||
prev_block,
|
||||
starting_nonce,
|
||||
wallet_address,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((blocks, usize_to_u64(height)))
|
||||
}
|
||||
|
||||
// [`BlockchainManagerRequest::Spans`]
|
||||
pub(crate) async fn spans<Z: NetworkZone>(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<Vec<Span>, Error> {
|
||||
// let BlockchainManagerResponse::Spans(vec) = blockchain_manager
|
||||
// .ready()
|
||||
// .await?
|
||||
// .call(BlockchainManagerRequest::Spans)
|
||||
// .await?
|
||||
// else {
|
||||
// unreachable!();
|
||||
// };
|
||||
|
||||
let vec: Vec<cuprate_p2p_core::types::Span<Z::Addr>> = todo!();
|
||||
|
||||
// FIXME: impl this map somewhere instead of inline.
|
||||
let vec = vec
|
||||
.into_iter()
|
||||
.map(|span| Span {
|
||||
connection_id: String::from(ConnectionId::DEFAULT_STR),
|
||||
nblocks: span.nblocks,
|
||||
rate: span.rate,
|
||||
remote_address: span.remote_address.to_string(),
|
||||
size: span.size,
|
||||
speed: span.speed,
|
||||
start_block_height: span.start_block_height,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::NextNeededPruningSeed`]
|
||||
pub(crate) async fn next_needed_pruning_seed(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<PruningSeed, Error> {
|
||||
let BlockchainManagerResponse::NextNeededPruningSeed(seed) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::NextNeededPruningSeed)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(seed)
|
||||
}
|
72
binaries/cuprated/src/rpc/request/txpool.rs
Normal file
72
binaries/cuprated/src/rpc/request/txpool.rs
Normal file
|
@ -0,0 +1,72 @@
|
|||
//! Functions for [`TxpoolReadRequest`].
|
||||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::{anyhow, Error};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_txpool::{
|
||||
service::{
|
||||
interface::{TxpoolReadRequest, TxpoolReadResponse},
|
||||
TxpoolReadHandle,
|
||||
},
|
||||
TxEntry,
|
||||
};
|
||||
|
||||
// FIXME: use `anyhow::Error` over `tower::BoxError` in txpool.
|
||||
|
||||
/// [`TxpoolReadRequest::Backlog`]
|
||||
pub(crate) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result<Vec<TxEntry>, Error> {
|
||||
let TxpoolReadResponse::Backlog(tx_entries) = txpool_read
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(TxpoolReadRequest::Backlog)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(tx_entries)
|
||||
}
|
||||
|
||||
/// [`TxpoolReadRequest::Size`]
|
||||
pub(crate) async fn size(
|
||||
txpool_read: &mut TxpoolReadHandle,
|
||||
include_sensitive_txs: bool,
|
||||
) -> Result<u64, Error> {
|
||||
let TxpoolReadResponse::Size(size) = txpool_read
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(TxpoolReadRequest::Size {
|
||||
include_sensitive_txs,
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(size))
|
||||
}
|
||||
|
||||
/// TODO
|
||||
pub(crate) async fn flush(
|
||||
txpool_manager: &mut Infallible,
|
||||
tx_hashes: Vec<[u8; 32]>,
|
||||
) -> Result<(), Error> {
|
||||
todo!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// TODO
|
||||
pub(crate) async fn relay(
|
||||
txpool_manager: &mut Infallible,
|
||||
tx_hashes: Vec<[u8; 32]>,
|
||||
) -> Result<(), Error> {
|
||||
todo!();
|
||||
Ok(())
|
||||
}
|
12
binaries/cuprated/src/signals.rs
Normal file
12
binaries/cuprated/src/signals.rs
Normal file
|
@ -0,0 +1,12 @@
|
|||
//! Signals for Cuprate state used throughout the binary.
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// Reorg lock.
|
||||
///
|
||||
/// A [`RwLock`] where a write lock is taken during a reorg and a read lock can be taken
|
||||
/// for any operation which must complete without a reorg happening.
|
||||
///
|
||||
/// Currently, the only operation that needs to take a read lock is adding txs to the tx-pool,
|
||||
/// this can potentially be removed in the future, see: <https://github.com/Cuprate/cuprate/issues/305>
|
||||
pub static REORG_LOCK: RwLock<()> = RwLock::const_new(());
|
53
binaries/cuprated/src/statics.rs
Normal file
53
binaries/cuprated/src/statics.rs
Normal file
|
@ -0,0 +1,53 @@
|
|||
//! Global `static`s used throughout `cuprated`.
|
||||
|
||||
use std::{
|
||||
sync::LazyLock,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
/// Define all the `static`s that should be always be initialized early on.
|
||||
///
|
||||
/// This wraps all `static`s inside a `LazyLock` and generates
|
||||
/// a [`init_lazylock_statics`] function that must/should be
|
||||
/// used by `main()` early on.
|
||||
macro_rules! define_init_lazylock_statics {
|
||||
($(
|
||||
$( #[$attr:meta] )*
|
||||
$name:ident: $t:ty = $init_fn:expr_2021;
|
||||
)*) => {
|
||||
/// Initialize global static `LazyLock` data.
|
||||
pub fn init_lazylock_statics() {
|
||||
$(
|
||||
LazyLock::force(&$name);
|
||||
)*
|
||||
}
|
||||
|
||||
$(
|
||||
$(#[$attr])*
|
||||
pub static $name: LazyLock<$t> = LazyLock::new(|| $init_fn);
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
define_init_lazylock_statics! {
|
||||
/// The start time of `cuprated`.
|
||||
START_INSTANT: SystemTime = SystemTime::now();
|
||||
|
||||
/// Start time of `cuprated` as a UNIX timestamp.
|
||||
START_INSTANT_UNIX: u64 = START_INSTANT
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Failed to set `cuprated` startup time.")
|
||||
.as_secs();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
/// Sanity check for startup UNIX time.
|
||||
#[test]
|
||||
fn start_instant_unix() {
|
||||
// Fri Sep 27 01:07:13 AM UTC 2024
|
||||
assert!(*START_INSTANT_UNIX > 1727399233);
|
||||
}
|
||||
}
|
15
binaries/cuprated/src/txpool.rs
Normal file
15
binaries/cuprated/src/txpool.rs
Normal file
|
@ -0,0 +1,15 @@
|
|||
//! Transaction Pool
|
||||
//!
|
||||
//! Handles initiating the tx-pool, providing the preprocessor required for the dandelion pool.
|
||||
use cuprate_consensus::BlockChainContextService;
|
||||
use cuprate_p2p::NetworkInterface;
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
||||
|
||||
use crate::blockchain::ConcreteTxVerifierService;
|
||||
|
||||
mod dandelion;
|
||||
mod incoming_tx;
|
||||
mod txs_being_handled;
|
||||
|
||||
pub use incoming_tx::{IncomingTxError, IncomingTxHandler, IncomingTxs};
|
65
binaries/cuprated/src/txpool/dandelion.rs
Normal file
65
binaries/cuprated/src/txpool/dandelion.rs
Normal file
|
@ -0,0 +1,65 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use cuprate_dandelion_tower::{
|
||||
pool::DandelionPoolService, DandelionConfig, DandelionRouter, Graph,
|
||||
};
|
||||
use cuprate_p2p::NetworkInterface;
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
||||
|
||||
use crate::{
|
||||
p2p::CrossNetworkInternalPeerId,
|
||||
txpool::incoming_tx::{DandelionTx, TxId},
|
||||
};
|
||||
|
||||
mod diffuse_service;
|
||||
mod stem_service;
|
||||
mod tx_store;
|
||||
|
||||
/// The configuration used for [`cuprate_dandelion_tower`].
|
||||
///
|
||||
/// TODO: should we expose this to users of cuprated? probably not.
|
||||
const DANDELION_CONFIG: DandelionConfig = DandelionConfig {
|
||||
time_between_hop: Duration::from_millis(175),
|
||||
epoch_duration: Duration::from_secs(10 * 60),
|
||||
fluff_probability: 0.12,
|
||||
graph: Graph::FourRegular,
|
||||
};
|
||||
|
||||
/// A [`DandelionRouter`] with all generic types defined.
|
||||
type ConcreteDandelionRouter = DandelionRouter<
|
||||
stem_service::OutboundPeerStream,
|
||||
diffuse_service::DiffuseService,
|
||||
CrossNetworkInternalPeerId,
|
||||
stem_service::StemPeerService<ClearNet>,
|
||||
DandelionTx,
|
||||
>;
|
||||
|
||||
/// Starts the dandelion pool manager task and returns a handle to send txs to broadcast.
|
||||
pub fn start_dandelion_pool_manager(
|
||||
router: ConcreteDandelionRouter,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
txpool_write_handle: TxpoolWriteHandle,
|
||||
) -> DandelionPoolService<DandelionTx, TxId, CrossNetworkInternalPeerId> {
|
||||
cuprate_dandelion_tower::pool::start_dandelion_pool_manager(
|
||||
// TODO: make this constant configurable?
|
||||
32,
|
||||
router,
|
||||
tx_store::TxStoreService {
|
||||
txpool_read_handle,
|
||||
txpool_write_handle,
|
||||
},
|
||||
DANDELION_CONFIG,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a [`DandelionRouter`] from a [`NetworkInterface`].
|
||||
pub fn dandelion_router(clear_net: NetworkInterface<ClearNet>) -> ConcreteDandelionRouter {
|
||||
DandelionRouter::new(
|
||||
diffuse_service::DiffuseService {
|
||||
clear_net_broadcast_service: clear_net.broadcast_svc(),
|
||||
},
|
||||
stem_service::OutboundPeerStream::new(clear_net),
|
||||
DANDELION_CONFIG,
|
||||
)
|
||||
}
|
44
binaries/cuprated/src/txpool/dandelion/diffuse_service.rs
Normal file
44
binaries/cuprated/src/txpool/dandelion/diffuse_service.rs
Normal file
|
@ -0,0 +1,44 @@
|
|||
use std::{
|
||||
future::{ready, Ready},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_dandelion_tower::traits::DiffuseRequest;
|
||||
use cuprate_p2p::{BroadcastRequest, BroadcastSvc};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
|
||||
use crate::txpool::dandelion::DandelionTx;
|
||||
|
||||
/// The dandelion diffusion service.
|
||||
pub struct DiffuseService {
|
||||
pub clear_net_broadcast_service: BroadcastSvc<ClearNet>,
|
||||
}
|
||||
|
||||
impl Service<DiffuseRequest<DandelionTx>> for DiffuseService {
|
||||
type Response = ();
|
||||
type Error = tower::BoxError;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.clear_net_broadcast_service
|
||||
.poll_ready(cx)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DiffuseRequest<DandelionTx>) -> Self::Future {
|
||||
// TODO: the dandelion crate should pass along where we got the tx from.
|
||||
let Ok(()) = self
|
||||
.clear_net_broadcast_service
|
||||
.call(BroadcastRequest::Transaction {
|
||||
tx_bytes: req.0 .0,
|
||||
direction: None,
|
||||
received_from: None,
|
||||
})
|
||||
.into_inner();
|
||||
|
||||
ready(Ok(()))
|
||||
}
|
||||
}
|
105
binaries/cuprated/src/txpool/dandelion/stem_service.rs
Normal file
105
binaries/cuprated/src/txpool/dandelion/stem_service.rs
Normal file
|
@ -0,0 +1,105 @@
|
|||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{future::BoxFuture, FutureExt, Stream};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_dandelion_tower::{traits::StemRequest, OutboundPeer};
|
||||
use cuprate_p2p::{ClientDropGuard, NetworkInterface, PeerSetRequest, PeerSetResponse};
|
||||
use cuprate_p2p_core::{
|
||||
client::{Client, InternalPeerID},
|
||||
ClearNet, NetworkZone, PeerRequest, ProtocolRequest,
|
||||
};
|
||||
use cuprate_wire::protocol::NewTransactions;
|
||||
|
||||
use crate::{p2p::CrossNetworkInternalPeerId, txpool::dandelion::DandelionTx};
|
||||
|
||||
/// The dandelion outbound peer stream.
|
||||
pub struct OutboundPeerStream {
|
||||
clear_net: NetworkInterface<ClearNet>,
|
||||
state: OutboundPeerStreamState,
|
||||
}
|
||||
|
||||
impl OutboundPeerStream {
|
||||
pub const fn new(clear_net: NetworkInterface<ClearNet>) -> Self {
|
||||
Self {
|
||||
clear_net,
|
||||
state: OutboundPeerStreamState::Standby,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for OutboundPeerStream {
|
||||
type Item = Result<
|
||||
OutboundPeer<CrossNetworkInternalPeerId, StemPeerService<ClearNet>>,
|
||||
tower::BoxError,
|
||||
>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
loop {
|
||||
match &mut self.state {
|
||||
OutboundPeerStreamState::Standby => {
|
||||
let peer_set = self.clear_net.peer_set();
|
||||
let res = ready!(peer_set.poll_ready(cx));
|
||||
|
||||
self.state = OutboundPeerStreamState::AwaitingPeer(
|
||||
peer_set.call(PeerSetRequest::StemPeer).boxed(),
|
||||
);
|
||||
}
|
||||
OutboundPeerStreamState::AwaitingPeer(fut) => {
|
||||
let res = ready!(fut.poll_unpin(cx));
|
||||
|
||||
return Poll::Ready(Some(res.map(|res| {
|
||||
let PeerSetResponse::StemPeer(stem_peer) = res else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
match stem_peer {
|
||||
Some(peer) => OutboundPeer::Peer(
|
||||
CrossNetworkInternalPeerId::ClearNet(peer.info.id),
|
||||
StemPeerService(peer),
|
||||
),
|
||||
None => OutboundPeer::Exhausted,
|
||||
}
|
||||
})));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The state of the [`OutboundPeerStream`].
|
||||
enum OutboundPeerStreamState {
|
||||
/// Standby state.
|
||||
Standby,
|
||||
/// Awaiting a response from the peer-set.
|
||||
AwaitingPeer(BoxFuture<'static, Result<PeerSetResponse<ClearNet>, tower::BoxError>>),
|
||||
}
|
||||
|
||||
/// The stem service, used to send stem txs.
|
||||
pub struct StemPeerService<N: NetworkZone>(ClientDropGuard<N>);
|
||||
|
||||
impl<N: NetworkZone> Service<StemRequest<DandelionTx>> for StemPeerService<N> {
|
||||
type Response = <Client<N> as Service<PeerRequest>>::Response;
|
||||
type Error = tower::BoxError;
|
||||
type Future = <Client<N> as Service<PeerRequest>>::Future;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: StemRequest<DandelionTx>) -> Self::Future {
|
||||
self.0
|
||||
.call(PeerRequest::Protocol(ProtocolRequest::NewTransactions(
|
||||
NewTransactions {
|
||||
txs: vec![req.0 .0],
|
||||
dandelionpp_fluff: false,
|
||||
padding: Bytes::new(),
|
||||
},
|
||||
)))
|
||||
}
|
||||
}
|
74
binaries/cuprated/src/txpool/dandelion/tx_store.rs
Normal file
74
binaries/cuprated/src/txpool/dandelion/tx_store.rs
Normal file
|
@ -0,0 +1,74 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_dandelion_tower::{
|
||||
traits::{TxStoreRequest, TxStoreResponse},
|
||||
State,
|
||||
};
|
||||
use cuprate_database::RuntimeError;
|
||||
use cuprate_txpool::service::{
|
||||
interface::{TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest},
|
||||
TxpoolReadHandle, TxpoolWriteHandle,
|
||||
};
|
||||
|
||||
use super::{DandelionTx, TxId};
|
||||
|
||||
/// The dandelion tx-store service.
|
||||
///
|
||||
/// This is just mapping the interface [`cuprate_dandelion_tower`] wants to what [`cuprate_txpool`] provides.
|
||||
pub struct TxStoreService {
|
||||
pub txpool_read_handle: TxpoolReadHandle,
|
||||
pub txpool_write_handle: TxpoolWriteHandle,
|
||||
}
|
||||
|
||||
impl Service<TxStoreRequest<TxId>> for TxStoreService {
|
||||
type Response = TxStoreResponse<DandelionTx>;
|
||||
type Error = tower::BoxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: TxStoreRequest<TxId>) -> Self::Future {
|
||||
match req {
|
||||
TxStoreRequest::Get(tx_id) => self
|
||||
.txpool_read_handle
|
||||
.clone()
|
||||
.oneshot(TxpoolReadRequest::TxBlob(tx_id))
|
||||
.map(|res| match res {
|
||||
Ok(TxpoolReadResponse::TxBlob {
|
||||
tx_blob,
|
||||
state_stem,
|
||||
}) => {
|
||||
let state = if state_stem {
|
||||
State::Stem
|
||||
} else {
|
||||
State::Fluff
|
||||
};
|
||||
|
||||
Ok(TxStoreResponse::Transaction(Some((
|
||||
DandelionTx(Bytes::from(tx_blob)),
|
||||
state,
|
||||
))))
|
||||
}
|
||||
Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Transaction(None)),
|
||||
Err(e) => Err(e.into()),
|
||||
Ok(_) => unreachable!(),
|
||||
})
|
||||
.boxed(),
|
||||
TxStoreRequest::Promote(tx_id) => self
|
||||
.txpool_write_handle
|
||||
.clone()
|
||||
.oneshot(TxpoolWriteRequest::Promote(tx_id))
|
||||
.map(|res| match res {
|
||||
Ok(_) | Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Ok),
|
||||
Err(e) => Err(e.into()),
|
||||
})
|
||||
.boxed(),
|
||||
}
|
||||
}
|
||||
}
|
384
binaries/cuprated/src/txpool/incoming_tx.rs
Normal file
384
binaries/cuprated/src/txpool/incoming_tx.rs
Normal file
|
@ -0,0 +1,384 @@
|
|||
use std::{
|
||||
collections::HashSet,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use monero_serai::transaction::Transaction;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_consensus::{
|
||||
transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse,
|
||||
BlockChainContextService, ExtendedConsensusError, VerifyTxRequest,
|
||||
};
|
||||
use cuprate_dandelion_tower::{
|
||||
pool::{DandelionPoolService, IncomingTxBuilder},
|
||||
State, TxState,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use cuprate_p2p::NetworkInterface;
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::{
|
||||
service::{
|
||||
interface::{
|
||||
TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest, TxpoolWriteResponse,
|
||||
},
|
||||
TxpoolReadHandle, TxpoolWriteHandle,
|
||||
},
|
||||
transaction_blob_hash,
|
||||
};
|
||||
use cuprate_types::TransactionVerificationData;
|
||||
|
||||
use crate::{
|
||||
blockchain::ConcreteTxVerifierService,
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
p2p::CrossNetworkInternalPeerId,
|
||||
signals::REORG_LOCK,
|
||||
txpool::{
|
||||
dandelion,
|
||||
txs_being_handled::{TxsBeingHandled, TxsBeingHandledLocally},
|
||||
},
|
||||
};
|
||||
|
||||
/// An error that can happen handling an incoming tx.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum IncomingTxError {
|
||||
#[error("Error parsing tx: {0}")]
|
||||
Parse(std::io::Error),
|
||||
#[error(transparent)]
|
||||
Consensus(ExtendedConsensusError),
|
||||
#[error("Duplicate tx in message")]
|
||||
DuplicateTransaction,
|
||||
}
|
||||
|
||||
/// Incoming transactions.
|
||||
pub struct IncomingTxs {
|
||||
/// The raw bytes of the transactions.
|
||||
pub txs: Vec<Bytes>,
|
||||
/// The routing state of the transactions.
|
||||
pub state: TxState<CrossNetworkInternalPeerId>,
|
||||
}
|
||||
|
||||
/// The transaction type used for dandelion++.
|
||||
#[derive(Clone)]
|
||||
pub struct DandelionTx(pub Bytes);
|
||||
|
||||
/// A transaction ID/hash.
|
||||
pub(super) type TxId = [u8; 32];
|
||||
|
||||
/// The service than handles incoming transaction pool transactions.
|
||||
///
|
||||
/// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes.
|
||||
#[derive(Clone)]
|
||||
pub struct IncomingTxHandler {
|
||||
/// A store of txs currently being handled in incoming tx requests.
|
||||
pub(super) txs_being_handled: TxsBeingHandled,
|
||||
/// The blockchain context cache.
|
||||
pub(super) blockchain_context_cache: BlockChainContextService,
|
||||
/// The dandelion txpool manager.
|
||||
pub(super) dandelion_pool_manager:
|
||||
DandelionPoolService<DandelionTx, TxId, CrossNetworkInternalPeerId>,
|
||||
/// The transaction verifier service.
|
||||
pub(super) tx_verifier_service: ConcreteTxVerifierService,
|
||||
/// The txpool write handle.
|
||||
pub(super) txpool_write_handle: TxpoolWriteHandle,
|
||||
/// The txpool read handle.
|
||||
pub(super) txpool_read_handle: TxpoolReadHandle,
|
||||
}
|
||||
|
||||
impl IncomingTxHandler {
|
||||
/// Initialize the [`IncomingTxHandler`].
|
||||
#[expect(clippy::significant_drop_tightening)]
|
||||
pub fn init(
|
||||
clear_net: NetworkInterface<ClearNet>,
|
||||
txpool_write_handle: TxpoolWriteHandle,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
blockchain_context_cache: BlockChainContextService,
|
||||
tx_verifier_service: ConcreteTxVerifierService,
|
||||
) -> Self {
|
||||
let dandelion_router = dandelion::dandelion_router(clear_net);
|
||||
|
||||
let dandelion_pool_manager = dandelion::start_dandelion_pool_manager(
|
||||
dandelion_router,
|
||||
txpool_read_handle.clone(),
|
||||
txpool_write_handle.clone(),
|
||||
);
|
||||
|
||||
Self {
|
||||
txs_being_handled: TxsBeingHandled::new(),
|
||||
blockchain_context_cache,
|
||||
dandelion_pool_manager,
|
||||
tx_verifier_service,
|
||||
txpool_write_handle,
|
||||
txpool_read_handle,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<IncomingTxs> for IncomingTxHandler {
|
||||
type Response = ();
|
||||
type Error = IncomingTxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: IncomingTxs) -> Self::Future {
|
||||
handle_incoming_txs(
|
||||
req,
|
||||
self.txs_being_handled.clone(),
|
||||
self.blockchain_context_cache.clone(),
|
||||
self.tx_verifier_service.clone(),
|
||||
self.txpool_write_handle.clone(),
|
||||
self.txpool_read_handle.clone(),
|
||||
self.dandelion_pool_manager.clone(),
|
||||
)
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles the incoming txs.
|
||||
async fn handle_incoming_txs(
|
||||
IncomingTxs { txs, state }: IncomingTxs,
|
||||
txs_being_handled: TxsBeingHandled,
|
||||
mut blockchain_context_cache: BlockChainContextService,
|
||||
mut tx_verifier_service: ConcreteTxVerifierService,
|
||||
mut txpool_write_handle: TxpoolWriteHandle,
|
||||
mut txpool_read_handle: TxpoolReadHandle,
|
||||
mut dandelion_pool_manager: DandelionPoolService<DandelionTx, TxId, CrossNetworkInternalPeerId>,
|
||||
) -> Result<(), IncomingTxError> {
|
||||
let _reorg_guard = REORG_LOCK.read().await;
|
||||
|
||||
let (txs, stem_pool_txs, txs_being_handled_guard) =
|
||||
prepare_incoming_txs(txs, txs_being_handled, &mut txpool_read_handle).await?;
|
||||
|
||||
let BlockChainContextResponse::Context(context) = blockchain_context_cache
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let context = context.unchecked_blockchain_context();
|
||||
|
||||
tx_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyTxRequest::Prepped {
|
||||
txs: txs.clone(),
|
||||
current_chain_height: context.chain_height,
|
||||
top_hash: context.top_hash,
|
||||
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
||||
hf: context.current_hf,
|
||||
})
|
||||
.await
|
||||
.map_err(IncomingTxError::Consensus)?;
|
||||
|
||||
for tx in txs {
|
||||
handle_valid_tx(
|
||||
tx,
|
||||
state.clone(),
|
||||
&mut txpool_write_handle,
|
||||
&mut dandelion_pool_manager,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// Re-relay any txs we got in the block that were already in our stem pool.
|
||||
for stem_tx in stem_pool_txs {
|
||||
rerelay_stem_tx(
|
||||
&stem_tx,
|
||||
state.clone(),
|
||||
&mut txpool_read_handle,
|
||||
&mut dandelion_pool_manager,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prepares the incoming transactions for verification.
|
||||
///
|
||||
/// This will filter out all transactions already in the pool or txs already being handled in another request.
|
||||
///
|
||||
/// Returns in order:
|
||||
/// - The [`TransactionVerificationData`] for all the txs we did not already have
|
||||
/// - The Ids of the transactions in the incoming message that are in our stem-pool
|
||||
/// - A [`TxsBeingHandledLocally`] guard that prevents verifying the same tx at the same time across 2 tasks.
|
||||
async fn prepare_incoming_txs(
|
||||
tx_blobs: Vec<Bytes>,
|
||||
txs_being_handled: TxsBeingHandled,
|
||||
txpool_read_handle: &mut TxpoolReadHandle,
|
||||
) -> Result<
|
||||
(
|
||||
Vec<Arc<TransactionVerificationData>>,
|
||||
Vec<TxId>,
|
||||
TxsBeingHandledLocally,
|
||||
),
|
||||
IncomingTxError,
|
||||
> {
|
||||
let mut tx_blob_hashes = HashSet::new();
|
||||
let mut txs_being_handled_locally = txs_being_handled.local_tracker();
|
||||
|
||||
// Compute the blob hash for each tx and filter out the txs currently being handled by another incoming tx batch.
|
||||
let txs = tx_blobs
|
||||
.into_iter()
|
||||
.filter_map(|tx_blob| {
|
||||
let tx_blob_hash = transaction_blob_hash(&tx_blob);
|
||||
|
||||
// If a duplicate is in here the incoming tx batch contained the same tx twice.
|
||||
if !tx_blob_hashes.insert(tx_blob_hash) {
|
||||
return Some(Err(IncomingTxError::DuplicateTransaction));
|
||||
}
|
||||
|
||||
// If a duplicate is here it is being handled in another batch.
|
||||
if !txs_being_handled_locally.try_add_tx(tx_blob_hash) {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(Ok((tx_blob_hash, tx_blob)))
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// Filter the txs already in the txpool out.
|
||||
// This will leave the txs already in the pool in [`TxBeingHandledLocally`] but that shouldn't be an issue.
|
||||
let TxpoolReadResponse::FilterKnownTxBlobHashes {
|
||||
unknown_blob_hashes,
|
||||
stem_pool_hashes,
|
||||
} = txpool_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolReadRequest::FilterKnownTxBlobHashes(tx_blob_hashes))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// Now prepare the txs for verification.
|
||||
rayon_spawn_async(move || {
|
||||
let txs = txs
|
||||
.into_iter()
|
||||
.filter_map(|(tx_blob_hash, tx_blob)| {
|
||||
if unknown_blob_hashes.contains(&tx_blob_hash) {
|
||||
Some(tx_blob)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.map(|bytes| {
|
||||
let tx = Transaction::read(&mut bytes.as_ref()).map_err(IncomingTxError::Parse)?;
|
||||
|
||||
let tx = new_tx_verification_data(tx)
|
||||
.map_err(|e| IncomingTxError::Consensus(e.into()))?;
|
||||
|
||||
Ok(Arc::new(tx))
|
||||
})
|
||||
.collect::<Result<Vec<_>, IncomingTxError>>()?;
|
||||
|
||||
Ok((txs, stem_pool_hashes, txs_being_handled_locally))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Handle a verified tx.
|
||||
///
|
||||
/// This will add the tx to the txpool and route it to the network.
|
||||
async fn handle_valid_tx(
|
||||
tx: Arc<TransactionVerificationData>,
|
||||
state: TxState<CrossNetworkInternalPeerId>,
|
||||
txpool_write_handle: &mut TxpoolWriteHandle,
|
||||
dandelion_pool_manager: &mut DandelionPoolService<
|
||||
DandelionTx,
|
||||
TxId,
|
||||
CrossNetworkInternalPeerId,
|
||||
>,
|
||||
) {
|
||||
let incoming_tx =
|
||||
IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx.tx_blob)), tx.tx_hash);
|
||||
|
||||
let TxpoolWriteResponse::AddTransaction(double_spend) = txpool_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolWriteRequest::AddTransaction {
|
||||
tx,
|
||||
state_stem: state.is_stem_stage(),
|
||||
})
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// TODO: track double spends to quickly ignore them from their blob hash.
|
||||
if let Some(tx_hash) = double_spend {
|
||||
return;
|
||||
};
|
||||
|
||||
// TODO: There is a race condition possible if a tx and block come in at the same time: <https://github.com/Cuprate/cuprate/issues/314>.
|
||||
|
||||
let incoming_tx = incoming_tx
|
||||
.with_routing_state(state)
|
||||
.with_state_in_db(None)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
dandelion_pool_manager
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(incoming_tx)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
|
||||
/// Re-relay a tx that was already in our stem pool.
|
||||
async fn rerelay_stem_tx(
|
||||
tx_hash: &TxId,
|
||||
state: TxState<CrossNetworkInternalPeerId>,
|
||||
txpool_read_handle: &mut TxpoolReadHandle,
|
||||
dandelion_pool_manager: &mut DandelionPoolService<
|
||||
DandelionTx,
|
||||
TxId,
|
||||
CrossNetworkInternalPeerId,
|
||||
>,
|
||||
) {
|
||||
let Ok(TxpoolReadResponse::TxBlob { tx_blob, .. }) = txpool_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolReadRequest::TxBlob(*tx_hash))
|
||||
.await
|
||||
else {
|
||||
// The tx could have been dropped from the pool.
|
||||
return;
|
||||
};
|
||||
|
||||
let incoming_tx =
|
||||
IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx_blob)), *tx_hash);
|
||||
|
||||
let incoming_tx = incoming_tx
|
||||
.with_routing_state(state)
|
||||
.with_state_in_db(Some(State::Stem))
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
dandelion_pool_manager
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(incoming_tx)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
53
binaries/cuprated/src/txpool/txs_being_handled.rs
Normal file
53
binaries/cuprated/src/txpool/txs_being_handled.rs
Normal file
|
@ -0,0 +1,53 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashSet;
|
||||
|
||||
/// A set of txs currently being handled, shared between instances of the incoming tx handler.
|
||||
#[derive(Clone)]
|
||||
pub struct TxsBeingHandled(Arc<DashSet<[u8; 32]>>);
|
||||
|
||||
impl TxsBeingHandled {
|
||||
/// Create a new [`TxsBeingHandled`]
|
||||
pub fn new() -> Self {
|
||||
Self(Arc::new(DashSet::new()))
|
||||
}
|
||||
|
||||
/// Create a new [`TxsBeingHandledLocally`] that will keep track of txs being handled in a request.
|
||||
pub fn local_tracker(&self) -> TxsBeingHandledLocally {
|
||||
TxsBeingHandledLocally {
|
||||
txs_being_handled: self.clone(),
|
||||
txs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A tracker of txs being handled in a single request. This will add the txs to the global [`TxsBeingHandled`]
|
||||
/// tracker as well.
|
||||
///
|
||||
/// When this is dropped the txs will be removed from [`TxsBeingHandled`].
|
||||
pub struct TxsBeingHandledLocally {
|
||||
txs_being_handled: TxsBeingHandled,
|
||||
txs: Vec<[u8; 32]>,
|
||||
}
|
||||
|
||||
impl TxsBeingHandledLocally {
|
||||
/// Try add a tx to the map from its [`transaction_blob_hash`](cuprate_txpool::transaction_blob_hash).
|
||||
///
|
||||
/// Returns `true` if the tx was added and `false` if another task is already handling this tx.
|
||||
pub fn try_add_tx(&mut self, tx_blob_hash: [u8; 32]) -> bool {
|
||||
if !self.txs_being_handled.0.insert(tx_blob_hash) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.txs.push(tx_blob_hash);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TxsBeingHandledLocally {
|
||||
fn drop(&mut self) {
|
||||
for hash in &self.txs {
|
||||
self.txs_being_handled.0.remove(hash);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
## Cuprate's architecture (implementation) book
|
||||
## Cuprate's architecture book
|
||||
This book documents Cuprate's architecture and implementation.
|
||||
|
||||
See:
|
||||
|
|
|
@ -1,19 +1,17 @@
|
|||
[book]
|
||||
authors = ["hinto-janai"]
|
||||
authors = ["Cuprate Contributors"]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "Cuprate Architecture"
|
||||
git-repository-url = "https://github.com/Cuprate/architecture-book"
|
||||
|
||||
# TODO: fix after importing real files.
|
||||
#
|
||||
# [preprocessor.last-changed]
|
||||
# command = "mdbook-last-changed"
|
||||
# renderer = ["html"]
|
||||
#
|
||||
# [output.html]
|
||||
# default-theme = "ayu"
|
||||
# preferred-dark-theme = "ayu"
|
||||
# git-repository-url = "https://github.com/hinto-janai/cuprate-architecture"
|
||||
# additional-css = ["last-changed.css"]
|
||||
[preprocessor.last-changed]
|
||||
command = "mdbook-last-changed"
|
||||
renderer = ["html"]
|
||||
|
||||
[output.html]
|
||||
default-theme = "ayu"
|
||||
preferred-dark-theme = "ayu"
|
||||
git-repository-url = "https://github.com/Cuprate/architecture-book"
|
||||
additional-css = ["last-changed.css"]
|
||||
|
|
7
books/architecture/last-changed.css
Normal file
7
books/architecture/last-changed.css
Normal file
|
@ -0,0 +1,7 @@
|
|||
footer {
|
||||
font-size: 0.8em;
|
||||
text-align: center;
|
||||
border-top: 1px solid;
|
||||
margin-top: 4%;
|
||||
padding: 5px 0;
|
||||
}
|
|
@ -1,3 +1,177 @@
|
|||
# Summary
|
||||
|
||||
- [TODO](todo.md)
|
||||
[Cuprate Architecture](cuprate-architecture.md)
|
||||
[🟡 Foreword](foreword.md)
|
||||
|
||||
---
|
||||
|
||||
- [🟠 Intro](intro/intro.md)
|
||||
- [🟡 Who this book is for](intro/who-this-book-is-for.md)
|
||||
- [🔴 Required knowledge](intro/required-knowledge.md)
|
||||
- [🔴 How to use this book](intro/how-to-use-this-book.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Bird's eye view](birds-eye-view/intro.md)
|
||||
- [⚪️ Map](birds-eye-view/map.md)
|
||||
- [⚪️ Components](birds-eye-view/components.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Formats, protocols, types](formats-protocols-types/intro.md)
|
||||
- [⚪️ monero_serai](formats-protocols-types/monero-serai.md)
|
||||
- [⚪️ cuprate_types](formats-protocols-types/cuprate-types.md)
|
||||
- [⚪️ cuprate_helper](formats-protocols-types/cuprate-helper.md)
|
||||
- [⚪️ Epee](formats-protocols-types/epee.md)
|
||||
- [⚪️ Levin](formats-protocols-types/levin.md)
|
||||
|
||||
---
|
||||
|
||||
- [🟢 Storage](storage/intro.md)
|
||||
- [🟢 Database abstraction](storage/db/intro.md)
|
||||
- [🟢 Abstraction](storage/db/abstraction/intro.md)
|
||||
- [🟢 Backend](storage/db/abstraction/backend.md)
|
||||
- [🟢 ConcreteEnv](storage/db/abstraction/concrete_env.md)
|
||||
- [🟢 Trait](storage/db/abstraction/trait.md)
|
||||
- [🟢 Syncing](storage/db/syncing.md)
|
||||
- [🟢 Resizing](storage/db/resizing.md)
|
||||
- [🟢 (De)serialization](storage/db/serde.md)
|
||||
- [🟢 Known issues and tradeoffs](storage/db/issues/intro.md)
|
||||
- [🟢 Abstracting backends](storage/db/issues/traits.md)
|
||||
- [🟢 Hot-swap](storage/db/issues/hot-swap.md)
|
||||
- [🟢 Unaligned bytes](storage/db/issues/unaligned.md)
|
||||
- [🟢 Endianness](storage/db/issues/endian.md)
|
||||
- [🟢 Multimap](storage/db/issues/multimap.md)
|
||||
- [🟢 Common behavior](storage/common/intro.md)
|
||||
- [🟢 Types](storage/common/types.md)
|
||||
- [🟢 `ops`](storage/common/ops.md)
|
||||
- [🟢 `tower::Service`](storage/common/service/intro.md)
|
||||
- [🟢 Initialization](storage/common/service/initialization.md)
|
||||
- [🟢 Requests](storage/common/service/requests.md)
|
||||
- [🟢 Responses](storage/common/service/responses.md)
|
||||
- [🟢 Resizing](storage/common/service/resizing.md)
|
||||
- [🟢 Thread model](storage/common/service/thread-model.md)
|
||||
- [🟢 Shutdown](storage/common/service/shutdown.md)
|
||||
- [🟢 Blockchain](storage/blockchain/intro.md)
|
||||
- [🟢 Schema](storage/blockchain/schema/intro.md)
|
||||
- [🟢 Tables](storage/blockchain/schema/tables.md)
|
||||
- [🟢 Multimap tables](storage/blockchain/schema/multimap.md)
|
||||
- [⚪️ Transaction pool](storage/txpool/intro.md)
|
||||
- [⚪️ Pruning](storage/pruning/intro.md)
|
||||
|
||||
---
|
||||
|
||||
- [🟢 RPC](rpc/intro.md)
|
||||
- [🟡 JSON-RPC 2.0](rpc/json-rpc.md)
|
||||
- [🟢 The types](rpc/types/intro.md)
|
||||
- [🟢 Misc types](rpc/types/misc-types.md)
|
||||
- [🟢 Base RPC types](rpc/types/base-types.md)
|
||||
- [🟢 The type generator macro](rpc/types/macro.md)
|
||||
- [🟢 Metadata](rpc/types/metadata.md)
|
||||
- [🟡 (De)serialization](rpc/types/deserialization.md)
|
||||
- [🟢 The interface](rpc/interface.md)
|
||||
- [🔴 The handler](rpc/handler/intro.md)
|
||||
- [🔴 The server](rpc/server/intro.md)
|
||||
- [🟢 Differences with `monerod`](rpc/differences/intro.md)
|
||||
- [🟢 JSON field ordering](rpc/differences/json-field-ordering.md)
|
||||
- [🟢 JSON formatting](rpc/differences/json-formatting.md)
|
||||
- [🟢 JSON strictness](rpc/differences/json-strictness.md)
|
||||
- [🟡 JSON-RPC strictness](rpc/differences/json-rpc-strictness.md)
|
||||
- [🟡 HTTP methods](rpc/differences/http-methods.md)
|
||||
- [🟡 RPC payment](rpc/differences/rpc-payment.md)
|
||||
- [🟢 Custom strings](rpc/differences/custom-strings.md)
|
||||
- [🔴 Unsupported RPC calls](rpc/differences/unsupported-rpc-calls.md)
|
||||
- [🔴 RPC calls with different behavior](rpc/differences/rpc-calls-with-different-behavior.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ ZMQ](zmq/intro.md)
|
||||
- [⚪️ TODO](zmq/todo.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Consensus](consensus/intro.md)
|
||||
- [⚪️ Verifier](consensus/verifier.md)
|
||||
- [⚪️ TODO](consensus/todo.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Networking](networking/intro.md)
|
||||
- [⚪️ P2P](networking/p2p.md)
|
||||
- [⚪️ Dandelion++](networking/dandelion.md)
|
||||
- [⚪️ Proxy](networking/proxy.md)
|
||||
- [⚪️ Tor](networking/tor.md)
|
||||
- [⚪️ i2p](networking/i2p.md)
|
||||
- [⚪️ IPv4/IPv6](networking/ipv4-ipv6.md)
|
||||
|
||||
---
|
||||
|
||||
- [🔴 Instrumentation](instrumentation/intro.md)
|
||||
- [⚪️ Logging](instrumentation/logging.md)
|
||||
- [⚪️ Data collection](instrumentation/data-collection.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Binary](binary/intro.md)
|
||||
- [⚪️ CLI](binary/cli.md)
|
||||
- [⚪️ Config](binary/config.md)
|
||||
- [⚪️ Logging](binary/logging.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Resources](resources/intro.md)
|
||||
- [⚪️ File system](resources/fs/intro.md)
|
||||
- [🟡 Index of PATHs](resources/fs/paths.md)
|
||||
- [⚪️ Sockets](resources/sockets/index.md)
|
||||
- [🔴 Index of ports](resources/sockets/ports.md)
|
||||
- [⚪️ Memory](resources/memory.md)
|
||||
- [🟡 Concurrency and parallelism](resources/cap/intro.md)
|
||||
- [⚪️ Map](resources/cap/map.md)
|
||||
- [⚪️ The RPC server](resources/cap/the-rpc-server.md)
|
||||
- [⚪️ The database](resources/cap/the-database.md)
|
||||
- [⚪️ The block downloader](resources/cap/the-block-downloader.md)
|
||||
- [⚪️ The verifier](resources/cap/the-verifier.md)
|
||||
- [⚪️ Thread exit](resources/cap/thread-exit.md)
|
||||
- [🔴 Index of threads](resources/cap/threads.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ External Monero libraries](external-monero-libraries/intro.md)
|
||||
- [⚪️ Cryptonight](external-monero-libraries/cryptonight.md)
|
||||
- [🔴 RandomX](external-monero-libraries/randomx.md)
|
||||
- [🔴 monero_serai](external-monero-libraries/monero_serai.md)
|
||||
|
||||
---
|
||||
|
||||
- [🟢 Benchmarking](benchmarking/intro.md)
|
||||
- [🟢 Criterion](benchmarking/criterion/intro.md)
|
||||
- [🟢 Creating](benchmarking/criterion/creating.md)
|
||||
- [🟢 Running](benchmarking/criterion/running.md)
|
||||
- [🟢 `cuprate-benchmark`](benchmarking/cuprate/intro.md)
|
||||
- [🟢 Creating](benchmarking/cuprate/creating.md)
|
||||
- [🟢 Running](benchmarking/cuprate/running.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Testing](testing/intro.md)
|
||||
- [⚪️ Monero data](testing/monero-data.md)
|
||||
- [⚪️ RPC client](testing/rpc-client.md)
|
||||
- [⚪️ Spawning `monerod`](testing/spawning-monerod.md)
|
||||
- [⚪️ Known issues and tradeoffs](known-issues-and-tradeoffs/intro.md)
|
||||
- [⚪️ Networking](known-issues-and-tradeoffs/networking.md)
|
||||
- [⚪️ RPC](known-issues-and-tradeoffs/rpc.md)
|
||||
- [⚪️ Storage](known-issues-and-tradeoffs/storage.md)
|
||||
|
||||
---
|
||||
|
||||
- [🟢 Monero oddities](oddities/intro.md)
|
||||
- [🟡 Little-endian IPv4 addresses](oddities/le-ipv4.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Appendix](appendix/intro.md)
|
||||
- [🟢 Crates](appendix/crates.md)
|
||||
- [🔴 Contributing](appendix/contributing.md)
|
||||
- [🔴 Build targets](appendix/build-targets.md)
|
||||
- [🔴 Protocol book](appendix/protocol-book.md)
|
||||
- [⚪️ User book](appendix/user-book.md)
|
7
books/architecture/src/appendix/build-targets.md
Normal file
7
books/architecture/src/appendix/build-targets.md
Normal file
|
@ -0,0 +1,7 @@
|
|||
# Build targets
|
||||
- x86
|
||||
- ARM64
|
||||
- Windows
|
||||
- Linux
|
||||
- macOS
|
||||
- FreeBSD(?)
|
2
books/architecture/src/appendix/contributing.md
Normal file
2
books/architecture/src/appendix/contributing.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Contributing
|
||||
<https://github.com/Cuprate/cuprate/blob/main/CONTRIBUTING.md>
|
78
books/architecture/src/appendix/crates.md
Normal file
78
books/architecture/src/appendix/crates.md
Normal file
|
@ -0,0 +1,78 @@
|
|||
# Crates
|
||||
This is an index of all of Cuprate's in-house crates it uses and maintains.
|
||||
|
||||
They are categorized into groups.
|
||||
|
||||
Crate documentation for each crate can be found by clicking the crate name or by visiting <https://doc.cuprate.org>. Documentation can also be built manually by running this at the root of the `cuprate` repository:
|
||||
```bash
|
||||
cargo doc --package $CRATE
|
||||
```
|
||||
For example, this will generate and open `cuprate-blockchain` documentation:
|
||||
```bash
|
||||
cargo doc --open --package cuprate-blockchain
|
||||
```
|
||||
|
||||
## Consensus
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-consensus`](https://doc.cuprate.org/cuprate_consensus) | [`consensus/`](https://github.com/Cuprate/cuprate/tree/main/consensus) | TODO
|
||||
| [`cuprate-consensus-context`](https://doc.cuprate.org/cuprate_consensus_context) | [`consensus/context/`](https://github.com/Cuprate/cuprate/tree/main/consensus/context) | TODO
|
||||
| [`cuprate-consensus-rules`](https://doc.cuprate.org/cuprate_consensus_rules) | [`consensus/rules/`](https://github.com/Cuprate/cuprate/tree/main/consensus/rules) | TODO
|
||||
| [`cuprate-fast-sync`](https://doc.cuprate.org/cuprate_fast_sync) | [`consensus/fast-sync/`](https://github.com/Cuprate/cuprate/tree/main/consensus/fast-sync) | Fast block synchronization
|
||||
|
||||
## Networking
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-epee-encoding`](https://doc.cuprate.org/cuprate_epee_encoding) | [`net/epee-encoding/`](https://github.com/Cuprate/cuprate/tree/main/net/epee-encoding) | Epee (de)serialization
|
||||
| [`cuprate-fixed-bytes`](https://doc.cuprate.org/cuprate_fixed_bytes) | [`net/fixed-bytes/`](https://github.com/Cuprate/cuprate/tree/main/net/fixed-bytes) | Fixed byte containers backed by `byte::Byte`
|
||||
| [`cuprate-levin`](https://doc.cuprate.org/cuprate_levin) | [`net/levin/`](https://github.com/Cuprate/cuprate/tree/main/net/levin) | Levin bucket protocol implementation
|
||||
| [`cuprate-wire`](https://doc.cuprate.org/cuprate_wire) | [`net/wire/`](https://github.com/Cuprate/cuprate/tree/main/net/wire) | TODO
|
||||
|
||||
## P2P
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-address-book`](https://doc.cuprate.org/cuprate_address_book) | [`p2p/address-book/`](https://github.com/Cuprate/cuprate/tree/main/p2p/address-book) | TODO
|
||||
| [`cuprate-async-buffer`](https://doc.cuprate.org/cuprate_async_buffer) | [`p2p/async-buffer/`](https://github.com/Cuprate/cuprate/tree/main/p2p/async-buffer) | A bounded SPSC, FIFO, asynchronous buffer that supports arbitrary weights for values
|
||||
| [`cuprate-dandelion-tower`](https://doc.cuprate.org/cuprate_dandelion_tower) | [`p2p/dandelion-tower/`](https://github.com/Cuprate/cuprate/tree/main/p2p/dandelion-tower) | TODO
|
||||
| [`cuprate-p2p`](https://doc.cuprate.org/cuprate_p2p) | [`p2p/p2p/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p) | TODO
|
||||
| [`cuprate-p2p-bucket`](https://doc.cuprate.org/cuprate_p2p_bucket) | [`p2p/bucket/`](https://github.com/Cuprate/cuprate/tree/main/p2p/bucket) | A collection data structure discriminating its items into "buckets" of limited size.
|
||||
| [`cuprate-p2p-core`](https://doc.cuprate.org/cuprate_p2p_core) | [`p2p/p2p-core/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p-core) | TODO
|
||||
|
||||
## Storage
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-blockchain`](https://doc.cuprate.org/cuprate_blockchain) | [`storage/blockchain/`](https://github.com/Cuprate/cuprate/tree/main/storage/blockchain) | Blockchain database built on-top of `cuprate-database` & `cuprate-database-service`
|
||||
| [`cuprate-database`](https://doc.cuprate.org/cuprate_database) | [`storage/database/`](https://github.com/Cuprate/cuprate/tree/main/storage/database) | Pure database abstraction
|
||||
| [`cuprate-database-service`](https://doc.cuprate.org/cuprate_database_service) | [`storage/database-service/`](https://github.com/Cuprate/cuprate/tree/main/storage/database-service) | `tower::Service` + thread-pool abstraction built on-top of `cuprate-database`
|
||||
| [`cuprate-txpool`](https://doc.cuprate.org/cuprate_txpool) | [`storage/txpool/`](https://github.com/Cuprate/cuprate/tree/main/storage/txpool) | Transaction pool database built on-top of `cuprate-database` & `cuprate-database-service`
|
||||
|
||||
## RPC
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-json-rpc`](https://doc.cuprate.org/cuprate_json_rpc) | [`rpc/json-rpc/`](https://github.com/Cuprate/cuprate/tree/main/rpc/json-rpc) | JSON-RPC 2.0 implementation
|
||||
| [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types) | [`rpc/types/`](https://github.com/Cuprate/cuprate/tree/main/rpc/types) | Monero RPC types and traits
|
||||
| [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing
|
||||
| [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers
|
||||
|
||||
## ZMQ
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-zmq-types`](https://doc.cuprate.org/cuprate_zmq_types) | [`zmq/types/`](https://github.com/Cuprate/cuprate/tree/main/zmq/types) | Message types for ZMQ Pub/Sub interface
|
||||
|
||||
## 1-off crates
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-constants`](https://doc.cuprate.org/cuprate_constants) | [`constants/`](https://github.com/Cuprate/cuprate/tree/main/constants) | Shared `const/static` data across Cuprate
|
||||
| [`cuprate-cryptonight`](https://doc.cuprate.org/cuprate_cryptonight) | [`cryptonight/`](https://github.com/Cuprate/cuprate/tree/main/cryptonight) | CryptoNight hash functions
|
||||
| [`cuprate-pruning`](https://doc.cuprate.org/cuprate_pruning) | [`pruning/`](https://github.com/Cuprate/cuprate/tree/main/pruning) | Monero pruning logic/types
|
||||
| [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate
|
||||
| [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate
|
||||
| [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate
|
||||
|
||||
## Benchmarks
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-benchmark`](https://doc.cuprate.org/cuprate_benchmark) | [`benches/benchmark/bin/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | Cuprate benchmarking binary
|
||||
| [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) | [`benches/benchmark/lib/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | Cuprate benchmarking library
|
||||
| `cuprate-benchmark-*` | [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/) | Benchmark for a Cuprate crate that uses `cuprate-benchmark`
|
||||
| `cuprate-criterion-*` | [`benches/criterion/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Benchmark for a Cuprate crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book)
|
1
books/architecture/src/appendix/intro.md
Normal file
1
books/architecture/src/appendix/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# Appendix
|
2
books/architecture/src/appendix/protocol-book.md
Normal file
2
books/architecture/src/appendix/protocol-book.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Protocol book
|
||||
<https://monero-book.cuprate.org>
|
1
books/architecture/src/appendix/user-book.md
Normal file
1
books/architecture/src/appendix/user-book.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ User book
|
21
books/architecture/src/benchmarking/criterion/creating.md
Normal file
21
books/architecture/src/benchmarking/criterion/creating.md
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Creating
|
||||
Creating a new Criterion-based benchmarking crate for one of Cuprate's crates is relatively simple,
|
||||
although, it requires knowledge of how to use Criterion first:
|
||||
|
||||
1. Read the `Getting Started` section of <https://bheisler.github.io/criterion.rs/book>
|
||||
2. Copy [`benches/criterion/example`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example) as base
|
||||
3. Get started
|
||||
|
||||
## Naming
|
||||
New benchmark crates using Criterion should:
|
||||
- Be in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/)
|
||||
- Be in the `cuprate-criterion-$CRATE_NAME` format
|
||||
|
||||
For a real example, see:
|
||||
[`cuprate-criterion-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc).
|
||||
|
||||
## Workspace
|
||||
Finally, make sure to add the benchmark crate to the workspace
|
||||
[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
|
||||
|
||||
Your benchmark is now ready to be ran.
|
4
books/architecture/src/benchmarking/criterion/intro.md
Normal file
4
books/architecture/src/benchmarking/criterion/intro.md
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Criterion
|
||||
Each sub-directory in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) is a crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) for timing single functions and/or groups of functions.
|
||||
|
||||
They are generally be small in scope.
|
15
books/architecture/src/benchmarking/criterion/running.md
Normal file
15
books/architecture/src/benchmarking/criterion/running.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Running
|
||||
To run all Criterion benchmarks, run this from the repository root:
|
||||
```bash
|
||||
cargo bench
|
||||
```
|
||||
|
||||
To run specific package(s), use:
|
||||
```bash
|
||||
cargo bench --package $CRITERION_BENCHMARK_CRATE_NAME
|
||||
```
|
||||
|
||||
For example:
|
||||
```bash
|
||||
cargo bench --package cuprate-criterion-json-rpc
|
||||
```
|
57
books/architecture/src/benchmarking/cuprate/creating.md
Normal file
57
books/architecture/src/benchmarking/cuprate/creating.md
Normal file
|
@ -0,0 +1,57 @@
|
|||
# Creating
|
||||
New benchmarks are plugged into `cuprate-benchmark` by:
|
||||
1. Implementing `cuprate_benchmark_lib::Benchmark`
|
||||
1. Registering the benchmark in the `cuprate_benchmark` binary
|
||||
|
||||
See [`benches/benchmark/example`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example)
|
||||
for an example.
|
||||
|
||||
## Creating the benchmark crate
|
||||
Before plugging into `cuprate-benchmark`, your actual benchmark crate must be created:
|
||||
|
||||
1. Create a new crate inside `benches/benchmark` (consider copying `benches/benchmark/example` as a base)
|
||||
1. Pull in `cuprate_benchmark_lib` as a dependency
|
||||
1. Create a benchmark
|
||||
1. Implement `cuprate_benchmark_lib::Benchmark`
|
||||
|
||||
New benchmark crates using `cuprate-database` should:
|
||||
- Be in [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/)
|
||||
- Be in the `cuprate-benchmark-$CRATE_NAME` format
|
||||
|
||||
For a real example, see:
|
||||
[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database).
|
||||
|
||||
## `cuprate_benchmark_lib::Benchmark`
|
||||
This is the trait that standardizes all benchmarks ran under `cuprate-benchmark`.
|
||||
|
||||
It must be implemented by your benchmarking crate.
|
||||
|
||||
See `cuprate-benchmark-lib` crate documentation for a user-guide: <https://doc.cuprate.org/cuprate_benchmark_lib>.
|
||||
|
||||
## Adding a feature to `cuprate-benchmark`
|
||||
After your benchmark's behavior is defined, it must be registered
|
||||
in the binary that is actually ran: `cuprate-benchmark`.
|
||||
|
||||
If your benchmark is new, add a new crate feature to [`cuprate-benchmark`'s Cargo.toml file](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/Cargo.toml) with an optional dependency to your benchmarking crate.
|
||||
|
||||
Please remember to edit the feature table in the
|
||||
[`README.md`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/README.md) as well!
|
||||
|
||||
## Adding to `cuprate-benchmark`'s `main()`
|
||||
After adding your crate's feature, add a conditional line that run the benchmark
|
||||
if the feature is enabled to the `main()` function:
|
||||
|
||||
For example, if your crate's name is `egg`:
|
||||
```rust
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "egg")] {
|
||||
run::run_benchmark::<cuprate_benchmark_egg::Benchmark>(&mut timings);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Workspace
|
||||
Finally, make sure to add the benchmark crate to the workspace
|
||||
[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
|
||||
|
||||
Your benchmark is now ready to be ran.
|
37
books/architecture/src/benchmarking/cuprate/intro.md
Normal file
37
books/architecture/src/benchmarking/cuprate/intro.md
Normal file
|
@ -0,0 +1,37 @@
|
|||
# cuprate-benchmark
|
||||
Cuprate has 2 custom crates for general benchmarking:
|
||||
- `cuprate-benchmark`; the actual binary crate ran
|
||||
- `cuprate-benchmark-lib`; the library that other crates hook into
|
||||
|
||||
The abstract purpose of `cuprate-benchmark` is very simple:
|
||||
1. Set-up the benchmark
|
||||
1. Start timer
|
||||
1. Run benchmark
|
||||
1. Output data
|
||||
|
||||
`cuprate-benchmark` runs the benchmarks found in [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark).
|
||||
|
||||
`cuprate-benchmark-lib` defines the `Benchmark` trait that all
|
||||
benchmark crates implement to "plug-in" to the benchmarking harness.
|
||||
|
||||
## Diagram
|
||||
A diagram displaying the relation between `cuprate-benchmark` and related crates.
|
||||
|
||||
```
|
||||
┌─────────────────────┐
|
||||
│ cuprate_benchmark │
|
||||
│ (actual binary ran) │
|
||||
└──────────┬──────────┘
|
||||
┌──────────────────┴───────────────────┐
|
||||
│ cuprate_benchmark_lib │
|
||||
│ ┌───────────────────────────────────┐│
|
||||
│ │ trait Benchmark ││
|
||||
│ └───────────────────────────────────┘│
|
||||
└──────────────────┬───────────────────┘
|
||||
┌───────────────────────────┐ │ ┌───────────────────────────┐
|
||||
│ cuprate_benchmark_example ├──┼───┤ cuprate_benchmark_* │
|
||||
└───────────────────────────┘ │ └───────────────────────────┘
|
||||
┌───────────────────────────┐ │ ┌───────────────────────────┐
|
||||
│ cuprate_benchmark_* ├──┴───┤ cuprate_benchmark_* │
|
||||
└───────────────────────────┘ └───────────────────────────┘
|
||||
```
|
16
books/architecture/src/benchmarking/cuprate/running.md
Normal file
16
books/architecture/src/benchmarking/cuprate/running.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
# Running
|
||||
`cuprate-benchmark` benchmarks are ran with this command:
|
||||
```bash
|
||||
cargo run --release --package cuprate-benchmark --features $BENCHMARK_CRATE_FEATURE
|
||||
```
|
||||
|
||||
For example, to run the example benchmark:
|
||||
```bash
|
||||
cargo run --release --package cuprate-benchmark --features example
|
||||
```
|
||||
|
||||
Use the `all` feature to run all benchmarks:
|
||||
```bash
|
||||
# Run all benchmarks
|
||||
cargo run --release --package cuprate-benchmark --features all
|
||||
```
|
22
books/architecture/src/benchmarking/intro.md
Normal file
22
books/architecture/src/benchmarking/intro.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Benchmarking
|
||||
Cuprate has 2 types of benchmarks:
|
||||
- [Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) benchmarks
|
||||
- `cuprate-benchmark` benchmarks
|
||||
|
||||
Criterion is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope.
|
||||
|
||||
`cuprate-benchmark` and [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't well-suited for micro benchmarks.
|
||||
|
||||
## File layout and purpose
|
||||
All benchmarking related files are in the [`benches/`](https://github.com/Cuprate/cuprate/tree/main/benches) folder.
|
||||
|
||||
This directory is organized like such:
|
||||
|
||||
| Directory | Purpose |
|
||||
|-------------------------------|---------|
|
||||
| [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Criterion (micro) benchmarks
|
||||
| `benches/criterion/cuprate-*` | Criterion benchmarks for the crate with the same name
|
||||
| [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark) | Cuprate's custom benchmarking files
|
||||
| [`benches/benchmark/bin`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks
|
||||
| [`benches/benchmark/lib`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into
|
||||
| `benches/benchmark/cuprate-*` | `cuprate-benchmark` benchmarks for the crate with the same name
|
1
books/architecture/src/binary/cli.md
Normal file
1
books/architecture/src/binary/cli.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ CLI
|
1
books/architecture/src/binary/config.md
Normal file
1
books/architecture/src/binary/config.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Config
|
1
books/architecture/src/binary/intro.md
Normal file
1
books/architecture/src/binary/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Binary
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue