mirror of
https://github.com/Cuprate/cuprate.git
synced 2025-01-22 10:44:36 +00:00
Merge branch 'main' into hack
This commit is contained in:
commit
95c0d8b4bf
539 changed files with 32900 additions and 22176 deletions
4
.github/labeler.yml
vendored
4
.github/labeler.yml
vendored
|
@ -56,6 +56,10 @@ A-cryptonight:
|
|||
- changed-files:
|
||||
- any-glob-to-any-file: cryptonight/**
|
||||
|
||||
A-constants:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: constants/**
|
||||
|
||||
A-storage:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: storage/**
|
||||
|
|
40
.github/workflows/architecture-book.yml
vendored
Normal file
40
.github/workflows/architecture-book.yml
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
# This action attempts to build the architecture book, if changed.
|
||||
|
||||
name: Architecture mdBook
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths: ['books/architecture/**']
|
||||
pull_request:
|
||||
paths: ['books/architecture/**']
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Version of `mdbook` to install.
|
||||
MDBOOK_VERSION: 0.4.36
|
||||
# Version of `mdbook-last-changed` to install.
|
||||
# <https://github.com/badboy/mdbook-last-changed>.
|
||||
MDBOOK_LAST_CHANGED_VERSION: 0.1.4
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/mdbook
|
||||
~/.cargo/bin/mdbook-last-changed
|
||||
key: architecture-book
|
||||
|
||||
- name: Install mdBook
|
||||
run: |
|
||||
cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists"
|
||||
cargo install --locked --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed || echo "mdbook-last-changed already exists"
|
||||
|
||||
- name: Build
|
||||
run: mdbook build books/architecture
|
1
.github/workflows/audit.yml
vendored
1
.github/workflows/audit.yml
vendored
|
@ -7,6 +7,7 @@ on:
|
|||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
|
1
.github/workflows/deny.yml
vendored
1
.github/workflows/deny.yml
vendored
|
@ -7,6 +7,7 @@ on:
|
|||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
|
74
.github/workflows/doc.yml
vendored
Normal file
74
.github/workflows/doc.yml
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
# This builds `cargo doc` and uploads it to the repo's GitHub Pages.
|
||||
|
||||
name: Doc
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ] # Only deploy if `main` changes.
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Show colored output in CI.
|
||||
CARGO_TERM_COLOR: always
|
||||
# Generate an index page.
|
||||
RUSTDOCFLAGS: '--cfg docsrs --show-type-layout --enable-index-page -Zunstable-options'
|
||||
|
||||
jobs:
|
||||
# Build documentation.
|
||||
build:
|
||||
# FIXME: how to build and merge Windows + macOS docs
|
||||
# with Linux's? Similar to the OS toggle on docs.rs.
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
# Nightly required for some `cargo doc` settings.
|
||||
toolchain: nightly
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
# Don't cache actual doc files, just build files.
|
||||
# This is so that removed crates don't show up.
|
||||
path: target/debug
|
||||
key: doc
|
||||
|
||||
# Packages other than `Boost` used by `Monero` are listed here.
|
||||
# https://github.com/monero-project/monero/blob/c444a7e002036e834bfb4c68f04a121ce1af5825/.github/workflows/build.yml#L71
|
||||
|
||||
- name: Install dependencies (Linux)
|
||||
run: sudo apt install -y libboost-dev
|
||||
|
||||
- name: Documentation
|
||||
run: cargo +nightly doc --workspace --all-features
|
||||
|
||||
- name: Upload documentation
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: target/doc/
|
||||
|
||||
# Deployment job.
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
40
.github/workflows/monero-book.yml
vendored
Normal file
40
.github/workflows/monero-book.yml
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
# This action attempts to build the Monero book, if changed.
|
||||
|
||||
name: Monero mdBook
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths: ['books/protocol/**']
|
||||
pull_request:
|
||||
paths: ['books/protocol/**']
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Version of `mdbook` to install.
|
||||
MDBOOK_VERSION: 0.4.36
|
||||
# Version of `mdbook-svgbob` to install.
|
||||
# <https://github.com/boozook/mdbook-svgbob>.
|
||||
MDBOOK_SVGBOB_VERSION: 0.2.1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/mdbook
|
||||
~/.cargo/bin/mdbook-svgbob
|
||||
key: monero-book
|
||||
|
||||
- name: Install mdBook
|
||||
run: |
|
||||
cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists"
|
||||
cargo install --locked --version ${MDBOOK_SVGBOB_VERSION} mdbook-svgbob || echo "mdbook-svgbob already exists"
|
||||
|
||||
- name: Build
|
||||
run: mdbook build books/protocol
|
40
.github/workflows/user-book.yml
vendored
Normal file
40
.github/workflows/user-book.yml
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
# This action attempts to build the user book, if changed.
|
||||
|
||||
name: User mdBook
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths: ['books/user/**']
|
||||
pull_request:
|
||||
paths: ['books/user/**']
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Version of `mdbook` to install.
|
||||
MDBOOK_VERSION: 0.4.36
|
||||
# Version of `mdbook-last-changed` to install.
|
||||
# <https://github.com/badboy/mdbook-last-changed>.
|
||||
MDBOOK_LAST_CHANGED_VERSION: 0.1.4
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/mdbook
|
||||
~/.cargo/bin/mdbook-last-changed
|
||||
key: user-book
|
||||
|
||||
- name: Install mdBook
|
||||
run: |
|
||||
cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists"
|
||||
cargo install --locked --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed || echo "mdbook-last-changed already exists"
|
||||
|
||||
- name: Build
|
||||
run: mdbook build books/user
|
|
@ -219,9 +219,9 @@ The description of pull requests should generally follow the template laid out i
|
|||
If your pull request is long and/or has sections that need clarifying, consider leaving a review on your own PR with comments explaining the changes.
|
||||
|
||||
## 5. Documentation
|
||||
Cuprate's crates (libraries) have inline documentation.
|
||||
Cuprate's crates (libraries) have inline documentation, they are published from the `main` branch at https://doc.cuprate.org.
|
||||
|
||||
These can be built and viewed using the `cargo` tool. For example, to build and view a specific crate's documentation, run the following command at the repository's root:
|
||||
Documentation can be built and viewed using the `cargo` tool. For example, to build and view a specific crate's documentation, run the following command at the repository's root:
|
||||
```bash
|
||||
cargo doc --open --package $CRATE
|
||||
```
|
||||
|
|
1747
Cargo.lock
generated
1747
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
316
Cargo.toml
316
Cargo.toml
|
@ -2,7 +2,10 @@
|
|||
resolver = "2"
|
||||
|
||||
members = [
|
||||
"binaries/cuprated",
|
||||
"constants",
|
||||
"consensus",
|
||||
"consensus/context",
|
||||
"consensus/fast-sync",
|
||||
"consensus/rules",
|
||||
"cryptonight",
|
||||
|
@ -17,6 +20,7 @@ members = [
|
|||
"p2p/async-buffer",
|
||||
"p2p/address-book",
|
||||
"storage/blockchain",
|
||||
"storage/service",
|
||||
"storage/txpool",
|
||||
"storage/database",
|
||||
"pruning",
|
||||
|
@ -46,50 +50,82 @@ opt-level = 1
|
|||
opt-level = 3
|
||||
|
||||
[workspace.dependencies]
|
||||
async-trait = { version = "0.1.74", default-features = false }
|
||||
bitflags = { version = "2.4.2", default-features = false }
|
||||
borsh = { version = "1.2.1", default-features = false }
|
||||
bytemuck = { version = "1.14.3", default-features = false }
|
||||
bytes = { version = "1.5.0", default-features = false }
|
||||
# Cuprate members
|
||||
cuprate-fast-sync = { path = "consensus/fast-sync" ,default-features = false}
|
||||
cuprate-consensus-rules = { path = "consensus/rules" ,default-features = false}
|
||||
cuprate-constants = { path = "constants" ,default-features = false}
|
||||
cuprate-consensus = { path = "consensus" ,default-features = false}
|
||||
cuprate-consensus-context = { path = "consensus/context" ,default-features = false}
|
||||
cuprate-cryptonight = { path = "cryptonight" ,default-features = false}
|
||||
cuprate-helper = { path = "helper" ,default-features = false}
|
||||
cuprate-epee-encoding = { path = "net/epee-encoding" ,default-features = false}
|
||||
cuprate-fixed-bytes = { path = "net/fixed-bytes" ,default-features = false}
|
||||
cuprate-levin = { path = "net/levin" ,default-features = false}
|
||||
cuprate-wire = { path = "net/wire" ,default-features = false}
|
||||
cuprate-p2p = { path = "p2p/p2p" ,default-features = false}
|
||||
cuprate-p2p-core = { path = "p2p/p2p-core" ,default-features = false}
|
||||
cuprate-dandelion-tower = { path = "p2p/dandelion-tower" ,default-features = false}
|
||||
cuprate-async-buffer = { path = "p2p/async-buffer" ,default-features = false}
|
||||
cuprate-address-book = { path = "p2p/address-book" ,default-features = false}
|
||||
cuprate-blockchain = { path = "storage/blockchain" ,default-features = false}
|
||||
cuprate-database = { path = "storage/database" ,default-features = false}
|
||||
cuprate-database-service = { path = "storage/service" ,default-features = false}
|
||||
cuprate-txpool = { path = "storage/txpool" ,default-features = false}
|
||||
cuprate-pruning = { path = "pruning" ,default-features = false}
|
||||
cuprate-test-utils = { path = "test-utils" ,default-features = false}
|
||||
cuprate-types = { path = "types" ,default-features = false}
|
||||
cuprate-json-rpc = { path = "rpc/json-rpc" ,default-features = false}
|
||||
cuprate-rpc-types = { path = "rpc/types" ,default-features = false}
|
||||
cuprate-rpc-interface = { path = "rpc/interface" ,default-features = false}
|
||||
|
||||
# External dependencies
|
||||
anyhow = { version = "1.0.89", default-features = false }
|
||||
async-trait = { version = "0.1.82", default-features = false }
|
||||
bitflags = { version = "2.6.0", default-features = false }
|
||||
borsh = { version = "1.5.1", default-features = false }
|
||||
bytemuck = { version = "1.18.0", default-features = false }
|
||||
bytes = { version = "1.7.2", default-features = false }
|
||||
cfg-if = { version = "1.0.0", default-features = false }
|
||||
clap = { version = "4.4.7", default-features = false }
|
||||
chrono = { version = "0.4.31", default-features = false }
|
||||
clap = { version = "4.5.17", default-features = false }
|
||||
chrono = { version = "0.4.38", default-features = false }
|
||||
crypto-bigint = { version = "0.5.5", default-features = false }
|
||||
crossbeam = { version = "0.8.4", default-features = false }
|
||||
const_format = { version = "0.2.33", default-features = false }
|
||||
curve25519-dalek = { version = "4.1.3", default-features = false }
|
||||
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
dashmap = { version = "5.5.3", default-features = false }
|
||||
dirs = { version = "5.0.1", default-features = false }
|
||||
futures = { version = "0.3.29", default-features = false }
|
||||
futures = { version = "0.3.30", default-features = false }
|
||||
hex = { version = "0.4.3", default-features = false }
|
||||
hex-literal = { version = "0.4", default-features = false }
|
||||
indexmap = { version = "2.2.5", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
paste = { version = "1.0.14", default-features = false }
|
||||
pin-project = { version = "1.1.3", default-features = false }
|
||||
indexmap = { version = "2.5.0", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false }
|
||||
paste = { version = "1.0.15", default-features = false }
|
||||
pin-project = { version = "1.1.5", default-features = false }
|
||||
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
|
||||
rand = { version = "0.8.5", default-features = false }
|
||||
rand_distr = { version = "0.4.3", default-features = false }
|
||||
rayon = { version = "1.9.0", default-features = false }
|
||||
serde_bytes = { version = "0.11.12", default-features = false }
|
||||
serde_json = { version = "1.0.108", default-features = false }
|
||||
serde = { version = "1.0.190", default-features = false }
|
||||
thiserror = { version = "1.0.50", default-features = false }
|
||||
thread_local = { version = "1.1.7", default-features = false }
|
||||
tokio-util = { version = "0.7.10", default-features = false }
|
||||
tokio-stream = { version = "0.1.14", default-features = false }
|
||||
tokio = { version = "1.33.0", default-features = false }
|
||||
tower = { version = "0.4.13", default-features = false }
|
||||
tracing-subscriber = { version = "0.3.17", default-features = false }
|
||||
rayon = { version = "1.10.0", default-features = false }
|
||||
serde_bytes = { version = "0.11.15", default-features = false }
|
||||
serde_json = { version = "1.0.128", default-features = false }
|
||||
serde = { version = "1.0.210", default-features = false }
|
||||
strum = { version = "0.26.3", default-features = false }
|
||||
thiserror = { version = "1.0.63", default-features = false }
|
||||
thread_local = { version = "1.1.8", default-features = false }
|
||||
tokio-util = { version = "0.7.12", default-features = false }
|
||||
tokio-stream = { version = "0.1.16", default-features = false }
|
||||
tokio = { version = "1.40.0", default-features = false }
|
||||
tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # <https://github.com/tower-rs/tower/pull/796>
|
||||
tracing-subscriber = { version = "0.3.18", default-features = false }
|
||||
tracing = { version = "0.1.40", default-features = false }
|
||||
|
||||
## workspace.dev-dependencies
|
||||
tempfile = { version = "3" }
|
||||
pretty_assertions = { version = "1.4.0" }
|
||||
proptest = { version = "1" }
|
||||
proptest-derive = { version = "0.4.0" }
|
||||
tokio-test = { version = "0.4.4" }
|
||||
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||
tempfile = { version = "3" }
|
||||
pretty_assertions = { version = "1.4.1" }
|
||||
proptest = { version = "1" }
|
||||
proptest-derive = { version = "0.4.0" }
|
||||
tokio-test = { version = "0.4.4" }
|
||||
|
||||
## TODO:
|
||||
## Potential dependencies.
|
||||
|
@ -101,7 +137,219 @@ tokio-test = { version = "0.4.4" }
|
|||
# regex = { version = "1.10.2" } # Regular expressions | https://github.com/rust-lang/regex
|
||||
# ryu = { version = "1.0.15" } # Fast float to string formatting | https://github.com/dtolnay/ryu
|
||||
|
||||
# Maybe one day.
|
||||
# disk = { version = "*" } # (De)serialization to/from disk with various file formats | https://github.com/hinto-janai/disk
|
||||
# readable = { version = "*" } # Stack-based string formatting utilities | https://github.com/hinto-janai/readable
|
||||
# json-rpc = { git = "https://github.com/hinto-janai/json-rpc" } # JSON-RPC 2.0 types
|
||||
# Lints: cold, warm, hot: <https://github.com/Cuprate/cuprate/issues/131>
|
||||
[workspace.lints.clippy]
|
||||
# Cold
|
||||
borrow_as_ptr = "deny"
|
||||
case_sensitive_file_extension_comparisons = "deny"
|
||||
cast_lossless = "deny"
|
||||
cast_ptr_alignment = "deny"
|
||||
checked_conversions = "deny"
|
||||
cloned_instead_of_copied = "deny"
|
||||
const_is_empty = "deny"
|
||||
doc_lazy_continuation = "deny"
|
||||
doc_link_with_quotes = "deny"
|
||||
duplicated_attributes = "deny"
|
||||
empty_enum = "deny"
|
||||
enum_glob_use = "deny"
|
||||
expl_impl_clone_on_copy = "deny"
|
||||
explicit_into_iter_loop = "deny"
|
||||
filter_map_next = "deny"
|
||||
flat_map_option = "deny"
|
||||
from_iter_instead_of_collect = "deny"
|
||||
if_not_else = "deny"
|
||||
ignored_unit_patterns = "deny"
|
||||
inconsistent_struct_constructor = "deny"
|
||||
index_refutable_slice = "deny"
|
||||
inefficient_to_string = "deny"
|
||||
invalid_upcast_comparisons = "deny"
|
||||
iter_filter_is_ok = "deny"
|
||||
iter_filter_is_some = "deny"
|
||||
implicit_clone = "deny"
|
||||
legacy_numeric_constants = "deny"
|
||||
manual_c_str_literals = "deny"
|
||||
manual_pattern_char_comparison = "deny"
|
||||
manual_instant_elapsed = "deny"
|
||||
manual_inspect = "deny"
|
||||
manual_is_variant_and = "deny"
|
||||
manual_let_else = "deny"
|
||||
manual_ok_or = "deny"
|
||||
manual_string_new = "deny"
|
||||
manual_unwrap_or_default = "deny"
|
||||
map_unwrap_or = "deny"
|
||||
match_bool = "deny"
|
||||
match_same_arms = "deny"
|
||||
match_wildcard_for_single_variants = "deny"
|
||||
mismatching_type_param_order = "deny"
|
||||
missing_transmute_annotations = "deny"
|
||||
mut_mut = "deny"
|
||||
needless_bitwise_bool = "deny"
|
||||
needless_character_iteration = "deny"
|
||||
needless_continue = "deny"
|
||||
needless_for_each = "deny"
|
||||
needless_maybe_sized = "deny"
|
||||
needless_raw_string_hashes = "deny"
|
||||
no_effect_underscore_binding = "deny"
|
||||
no_mangle_with_rust_abi = "deny"
|
||||
option_as_ref_cloned = "deny"
|
||||
option_option = "deny"
|
||||
ptr_as_ptr = "deny"
|
||||
ptr_cast_constness = "deny"
|
||||
pub_underscore_fields = "deny"
|
||||
redundant_closure_for_method_calls = "deny"
|
||||
ref_as_ptr = "deny"
|
||||
ref_option_ref = "deny"
|
||||
same_functions_in_if_condition = "deny"
|
||||
semicolon_if_nothing_returned = "deny"
|
||||
trivially_copy_pass_by_ref = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unnecessary_join = "deny"
|
||||
unnested_or_patterns = "deny"
|
||||
unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
used_underscore_binding = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
as_ptr_cast_mut = "deny"
|
||||
clear_with_drain = "deny"
|
||||
collection_is_never_read = "deny"
|
||||
debug_assert_with_mut_call = "deny"
|
||||
derive_partial_eq_without_eq = "deny"
|
||||
empty_line_after_doc_comments = "deny"
|
||||
empty_line_after_outer_attr = "deny"
|
||||
equatable_if_let = "deny"
|
||||
iter_on_empty_collections = "deny"
|
||||
iter_on_single_items = "deny"
|
||||
iter_with_drain = "deny"
|
||||
needless_collect = "deny"
|
||||
needless_pass_by_ref_mut = "deny"
|
||||
negative_feature_names = "deny"
|
||||
non_send_fields_in_send_ty = "deny"
|
||||
nonstandard_macro_braces = "deny"
|
||||
path_buf_push_overwrite = "deny"
|
||||
read_zero_byte_vec = "deny"
|
||||
redundant_clone = "deny"
|
||||
redundant_feature_names = "deny"
|
||||
trailing_empty_array = "deny"
|
||||
trait_duplication_in_bounds = "deny"
|
||||
type_repetition_in_bounds = "deny"
|
||||
uninhabited_references = "deny"
|
||||
unnecessary_struct_initialization = "deny"
|
||||
unused_peekable = "deny"
|
||||
unused_rounding = "deny"
|
||||
use_self = "deny"
|
||||
useless_let_if_seq = "deny"
|
||||
wildcard_dependencies = "deny"
|
||||
unseparated_literal_suffix = "deny"
|
||||
unnecessary_safety_doc = "deny"
|
||||
unnecessary_safety_comment = "deny"
|
||||
unnecessary_self_imports = "deny"
|
||||
string_to_string = "deny"
|
||||
rest_pat_in_fully_bound_structs = "deny"
|
||||
redundant_type_annotations = "deny"
|
||||
infinite_loop = "deny"
|
||||
zero_repeat_side_effects = "deny"
|
||||
|
||||
# Warm
|
||||
cast_possible_truncation = "deny"
|
||||
cast_possible_wrap = "deny"
|
||||
cast_precision_loss = "deny"
|
||||
cast_sign_loss = "deny"
|
||||
copy_iterator = "deny"
|
||||
doc_markdown = "deny"
|
||||
explicit_deref_methods = "deny"
|
||||
explicit_iter_loop = "deny"
|
||||
float_cmp = "deny"
|
||||
fn_params_excessive_bools = "deny"
|
||||
into_iter_without_iter = "deny"
|
||||
iter_without_into_iter = "deny"
|
||||
iter_not_returning_iterator = "deny"
|
||||
large_digit_groups = "deny"
|
||||
large_types_passed_by_value = "deny"
|
||||
manual_assert = "deny"
|
||||
maybe_infinite_iter = "deny"
|
||||
missing_fields_in_debug = "deny"
|
||||
needless_pass_by_value = "deny"
|
||||
range_minus_one = "deny"
|
||||
range_plus_one = "deny"
|
||||
redundant_else = "deny"
|
||||
ref_binding_to_reference = "deny"
|
||||
return_self_not_must_use = "deny"
|
||||
single_match_else = "deny"
|
||||
string_add_assign = "deny"
|
||||
transmute_ptr_to_ptr = "deny"
|
||||
unchecked_duration_subtraction = "deny"
|
||||
unnecessary_box_returns = "deny"
|
||||
unnecessary_wraps = "deny"
|
||||
branches_sharing_code = "deny"
|
||||
fallible_impl_from = "deny"
|
||||
missing_const_for_fn = "deny"
|
||||
significant_drop_in_scrutinee = "deny"
|
||||
significant_drop_tightening = "deny"
|
||||
try_err = "deny"
|
||||
lossy_float_literal = "deny"
|
||||
let_underscore_must_use = "deny"
|
||||
iter_over_hash_type = "deny"
|
||||
get_unwrap = "deny"
|
||||
error_impl_error = "deny"
|
||||
empty_structs_with_brackets = "deny"
|
||||
empty_enum_variants_with_brackets = "deny"
|
||||
empty_drop = "deny"
|
||||
clone_on_ref_ptr = "deny"
|
||||
upper_case_acronyms = "deny"
|
||||
allow_attributes = "deny"
|
||||
|
||||
# Hot
|
||||
# inline_always = "deny"
|
||||
# large_futures = "deny"
|
||||
# large_stack_arrays = "deny"
|
||||
# linkedlist = "deny"
|
||||
# missing_errors_doc = "deny"
|
||||
# missing_panics_doc = "deny"
|
||||
# should_panic_without_expect = "deny"
|
||||
# similar_names = "deny"
|
||||
# too_many_lines = "deny"
|
||||
# unreadable_literal = "deny"
|
||||
# wildcard_imports = "deny"
|
||||
# allow_attributes_without_reason = "deny"
|
||||
# missing_assert_message = "deny"
|
||||
# missing_docs_in_private_items = "deny"
|
||||
undocumented_unsafe_blocks = "deny"
|
||||
# multiple_unsafe_ops_per_block = "deny"
|
||||
# single_char_lifetime_names = "deny"
|
||||
# wildcard_enum_match_arm = "deny"
|
||||
|
||||
[workspace.lints.rust]
|
||||
# Cold
|
||||
future_incompatible = { level = "deny", priority = -1 }
|
||||
nonstandard_style = { level = "deny", priority = -1 }
|
||||
absolute_paths_not_starting_with_crate = "deny"
|
||||
explicit_outlives_requirements = "deny"
|
||||
keyword_idents_2018 = "deny"
|
||||
keyword_idents_2024 = "deny"
|
||||
missing_abi = "deny"
|
||||
non_ascii_idents = "deny"
|
||||
non_local_definitions = "deny"
|
||||
redundant_lifetimes = "deny"
|
||||
single_use_lifetimes = "deny"
|
||||
trivial_casts = "deny"
|
||||
trivial_numeric_casts = "deny"
|
||||
unsafe_op_in_unsafe_fn = "deny"
|
||||
unused_crate_dependencies = "deny"
|
||||
unused_import_braces = "deny"
|
||||
unused_lifetimes = "deny"
|
||||
unused_macro_rules = "deny"
|
||||
ambiguous_glob_imports = "deny"
|
||||
unused_unsafe = "deny"
|
||||
|
||||
# Warm
|
||||
let_underscore = { level = "deny", priority = -1 }
|
||||
unreachable_pub = "deny"
|
||||
unused_qualifications = "deny"
|
||||
variant_size_differences = "deny"
|
||||
non_camel_case_types = "deny"
|
||||
|
||||
# Hot
|
||||
# unused_results = "deny"
|
||||
# non_exhaustive_omitted_patterns = "deny"
|
||||
# missing_docs = "deny"
|
||||
# missing_copy_implementations = "deny"
|
||||
|
|
|
@ -49,7 +49,7 @@ Cuprate maintains various documentation books:
|
|||
| [Monero's protocol book](https://monero-book.cuprate.org) | Documents the Monero protocol |
|
||||
| [Cuprate's user book](https://user.cuprate.org) | Practical user-guide for using `cuprated` |
|
||||
|
||||
For crate (library) documentation, see the `Documentation` section in [`CONTRIBUTING.md`](CONTRIBUTING.md).
|
||||
For crate (library) documentation, see: https://doc.cuprate.org. This site holds documentation for Cuprate's crates and all dependencies. All Cuprate crates start with `cuprate_`, for example: [`cuprate_database`](https://doc.cuprate.org/cuprate_database).
|
||||
|
||||
## Contributing
|
||||
|
||||
|
|
84
binaries/cuprated/Cargo.toml
Normal file
84
binaries/cuprated/Cargo.toml
Normal file
|
@ -0,0 +1,84 @@
|
|||
[package]
|
||||
name = "cuprated"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
description = "The Cuprate Monero Rust node."
|
||||
license = "AGPL-3.0-only"
|
||||
authors = ["Boog900", "hinto-janai", "SyntheticBird45"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
|
||||
|
||||
[dependencies]
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
cuprate-consensus = { workspace = true }
|
||||
cuprate-fast-sync = { workspace = true }
|
||||
cuprate-consensus-context = { workspace = true }
|
||||
cuprate-consensus-rules = { workspace = true }
|
||||
cuprate-cryptonight = { workspace = true }
|
||||
cuprate-helper = { workspace = true }
|
||||
cuprate-epee-encoding = { workspace = true }
|
||||
cuprate-fixed-bytes = { workspace = true }
|
||||
cuprate-levin = { workspace = true }
|
||||
cuprate-wire = { workspace = true }
|
||||
cuprate-p2p = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true }
|
||||
cuprate-dandelion-tower = { workspace = true }
|
||||
cuprate-async-buffer = { workspace = true }
|
||||
cuprate-address-book = { workspace = true }
|
||||
cuprate-blockchain = { workspace = true, features = ["service"] }
|
||||
cuprate-database-service = { workspace = true }
|
||||
cuprate-txpool = { workspace = true }
|
||||
cuprate-database = { workspace = true }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-test-utils = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
cuprate-json-rpc = { workspace = true }
|
||||
cuprate-rpc-interface = { workspace = true }
|
||||
cuprate-rpc-types = { workspace = true }
|
||||
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
borsh = { workspace = true }
|
||||
bytemuck = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
cfg-if = { workspace = true }
|
||||
clap = { workspace = true, features = ["cargo"] }
|
||||
chrono = { workspace = true }
|
||||
crypto-bigint = { workspace = true }
|
||||
crossbeam = { workspace = true }
|
||||
curve25519-dalek = { workspace = true }
|
||||
const_format = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
hex-literal = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
monero-serai = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
pin-project = { workspace = true }
|
||||
randomx-rs = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
rand_distr = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
serde_bytes = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
thread_local = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
2
binaries/cuprated/README.md
Normal file
2
binaries/cuprated/README.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# `cuprated`
|
||||
TODO
|
101
binaries/cuprated/src/blockchain.rs
Normal file
101
binaries/cuprated/src/blockchain.rs
Normal file
|
@ -0,0 +1,101 @@
|
|||
//! Blockchain
|
||||
//!
|
||||
//! Contains the blockchain manager, syncer and an interface to mutate the blockchain.
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::FutureExt;
|
||||
use tokio::sync::{mpsc, Notify};
|
||||
use tower::{BoxError, Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::{generate_genesis_block, BlockChainContextService, ContextConfig};
|
||||
use cuprate_cryptonight::cryptonight_hash_v0;
|
||||
use cuprate_p2p::{block_downloader::BlockDownloaderConfig, NetworkInterface};
|
||||
use cuprate_p2p_core::{ClearNet, Network};
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainWriteRequest},
|
||||
VerifiedBlockInformation,
|
||||
};
|
||||
|
||||
use crate::constants::PANIC_CRITICAL_SERVICE_ERROR;
|
||||
|
||||
mod chain_service;
|
||||
pub mod interface;
|
||||
mod manager;
|
||||
mod syncer;
|
||||
mod types;
|
||||
|
||||
use types::{
|
||||
ConcreteBlockVerifierService, ConcreteTxVerifierService, ConsensusBlockchainReadHandle,
|
||||
};
|
||||
|
||||
/// Checks if the genesis block is in the blockchain and adds it if not.
|
||||
pub async fn check_add_genesis(
|
||||
blockchain_read_handle: &mut BlockchainReadHandle,
|
||||
blockchain_write_handle: &mut BlockchainWriteHandle,
|
||||
network: Network,
|
||||
) {
|
||||
// Try to get the chain height, will fail if the genesis block is not in the DB.
|
||||
if blockchain_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainReadRequest::ChainHeight)
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let genesis = generate_genesis_block(network);
|
||||
|
||||
assert_eq!(genesis.miner_transaction.prefix().outputs.len(), 1);
|
||||
assert!(genesis.transactions.is_empty());
|
||||
|
||||
blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::WriteBlock(
|
||||
VerifiedBlockInformation {
|
||||
block_blob: genesis.serialize(),
|
||||
txs: vec![],
|
||||
block_hash: genesis.hash(),
|
||||
pow_hash: cryptonight_hash_v0(&genesis.serialize_pow_hash()),
|
||||
height: 0,
|
||||
generated_coins: genesis.miner_transaction.prefix().outputs[0]
|
||||
.amount
|
||||
.unwrap(),
|
||||
weight: genesis.miner_transaction.weight(),
|
||||
long_term_weight: genesis.miner_transaction.weight(),
|
||||
cumulative_difficulty: 1,
|
||||
block: genesis,
|
||||
},
|
||||
))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
|
||||
/// Initializes the consensus services.
|
||||
pub async fn init_consensus(
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
context_config: ContextConfig,
|
||||
) -> Result<
|
||||
(
|
||||
ConcreteBlockVerifierService,
|
||||
ConcreteTxVerifierService,
|
||||
BlockChainContextService,
|
||||
),
|
||||
BoxError,
|
||||
> {
|
||||
let read_handle = ConsensusBlockchainReadHandle::new(blockchain_read_handle, BoxError::from);
|
||||
|
||||
let ctx_service =
|
||||
cuprate_consensus::initialize_blockchain_context(context_config, read_handle.clone())
|
||||
.await?;
|
||||
|
||||
let (block_verifier_svc, tx_verifier_svc) =
|
||||
cuprate_consensus::initialize_verifier(read_handle, ctx_service.clone());
|
||||
|
||||
Ok((block_verifier_svc, tx_verifier_svc, ctx_service))
|
||||
}
|
72
binaries/cuprated/src/blockchain/chain_service.rs
Normal file
72
binaries/cuprated/src/blockchain/chain_service.rs
Normal file
|
@ -0,0 +1,72 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::{future::BoxFuture, FutureExt, TryFutureExt};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_p2p::block_downloader::{ChainSvcRequest, ChainSvcResponse};
|
||||
use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse};
|
||||
|
||||
/// That service that allows retrieving the chain state to give to the P2P crates, so we can figure out
|
||||
/// what blocks we need.
|
||||
///
|
||||
/// This has a more minimal interface than [`BlockchainReadRequest`] to make using the p2p crates easier.
|
||||
#[derive(Clone)]
|
||||
pub struct ChainService(pub BlockchainReadHandle);
|
||||
|
||||
impl Service<ChainSvcRequest> for ChainService {
|
||||
type Response = ChainSvcResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: ChainSvcRequest) -> Self::Future {
|
||||
let map_res = |res: BlockchainResponse| match res {
|
||||
BlockchainResponse::CompactChainHistory {
|
||||
block_ids,
|
||||
cumulative_difficulty,
|
||||
} => ChainSvcResponse::CompactHistory {
|
||||
block_ids,
|
||||
cumulative_difficulty,
|
||||
},
|
||||
BlockchainResponse::FindFirstUnknown(res) => ChainSvcResponse::FindFirstUnknown(res),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
match req {
|
||||
ChainSvcRequest::CompactHistory => self
|
||||
.0
|
||||
.call(BlockchainReadRequest::CompactChainHistory)
|
||||
.map_ok(map_res)
|
||||
.map_err(Into::into)
|
||||
.boxed(),
|
||||
ChainSvcRequest::FindFirstUnknown(req) => self
|
||||
.0
|
||||
.call(BlockchainReadRequest::FindFirstUnknown(req))
|
||||
.map_ok(map_res)
|
||||
.map_err(Into::into)
|
||||
.boxed(),
|
||||
ChainSvcRequest::CumulativeDifficulty => self
|
||||
.0
|
||||
.call(BlockchainReadRequest::CompactChainHistory)
|
||||
.map_ok(|res| {
|
||||
// TODO create a custom request instead of hijacking this one.
|
||||
// TODO: use the context cache.
|
||||
let BlockchainResponse::CompactChainHistory {
|
||||
cumulative_difficulty,
|
||||
..
|
||||
} = res
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
ChainSvcResponse::CumulativeDifficulty(cumulative_difficulty)
|
||||
})
|
||||
.map_err(Into::into)
|
||||
.boxed(),
|
||||
}
|
||||
}
|
||||
}
|
161
binaries/cuprated/src/blockchain/interface.rs
Normal file
161
binaries/cuprated/src/blockchain/interface.rs
Normal file
|
@ -0,0 +1,161 @@
|
|||
//! The blockchain manager interface.
|
||||
//!
|
||||
//! This module contains all the functions to mutate the blockchain's state in any way, through the
|
||||
//! blockchain manager.
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{LazyLock, Mutex, OnceLock},
|
||||
};
|
||||
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use rayon::prelude::*;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::transactions::new_tx_verification_data;
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
blockchain::manager::{BlockchainManagerCommand, IncomingBlockOk},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
};
|
||||
|
||||
/// The channel used to send [`BlockchainManagerCommand`]s to the blockchain manager.
|
||||
///
|
||||
/// This channel is initialized in [`init_blockchain_manager`](super::manager::init_blockchain_manager), the functions
|
||||
/// in this file document what happens if this is not initialized when they are called.
|
||||
pub(super) static COMMAND_TX: OnceLock<mpsc::Sender<BlockchainManagerCommand>> = OnceLock::new();
|
||||
|
||||
/// An error that can be returned from [`handle_incoming_block`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum IncomingBlockError {
|
||||
/// Some transactions in the block were unknown.
|
||||
///
|
||||
/// The inner values are the block hash and the indexes of the missing txs in the block.
|
||||
#[error("Unknown transactions in block.")]
|
||||
UnknownTransactions([u8; 32], Vec<u64>),
|
||||
/// We are missing the block's parent.
|
||||
#[error("The block has an unknown parent.")]
|
||||
Orphan,
|
||||
/// The block was invalid.
|
||||
#[error(transparent)]
|
||||
InvalidBlock(anyhow::Error),
|
||||
}
|
||||
|
||||
/// Try to add a new block to the blockchain.
|
||||
///
|
||||
/// On success returns [`IncomingBlockOk`].
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an error if:
|
||||
/// - the block was invalid
|
||||
/// - we are missing transactions
|
||||
/// - the block's parent is unknown
|
||||
pub async fn handle_incoming_block(
|
||||
block: Block,
|
||||
given_txs: Vec<Transaction>,
|
||||
blockchain_read_handle: &mut BlockchainReadHandle,
|
||||
) -> Result<IncomingBlockOk, IncomingBlockError> {
|
||||
/// A [`HashSet`] of block hashes that the blockchain manager is currently handling.
|
||||
///
|
||||
/// This lock prevents sending the same block to the blockchain manager from multiple connections
|
||||
/// before one of them actually gets added to the chain, allowing peers to do other things.
|
||||
///
|
||||
/// This is used over something like a dashmap as we expect a lot of collisions in a short amount of
|
||||
/// time for new blocks, so we would lose the benefit of sharded locks. A dashmap is made up of `RwLocks`
|
||||
/// which are also more expensive than `Mutex`s.
|
||||
static BLOCKS_BEING_HANDLED: LazyLock<Mutex<HashSet<[u8; 32]>>> =
|
||||
LazyLock::new(|| Mutex::new(HashSet::new()));
|
||||
// FIXME: we should look in the tx-pool for txs when that is ready.
|
||||
|
||||
if !block_exists(block.header.previous, blockchain_read_handle)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
{
|
||||
return Err(IncomingBlockError::Orphan);
|
||||
}
|
||||
|
||||
let block_hash = block.hash();
|
||||
|
||||
if block_exists(block_hash, blockchain_read_handle)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
{
|
||||
return Ok(IncomingBlockOk::AlreadyHave);
|
||||
}
|
||||
|
||||
// TODO: remove this when we have a working tx-pool.
|
||||
if given_txs.len() != block.transactions.len() {
|
||||
return Err(IncomingBlockError::UnknownTransactions(
|
||||
block_hash,
|
||||
(0..usize_to_u64(block.transactions.len())).collect(),
|
||||
));
|
||||
}
|
||||
|
||||
// TODO: check we actually got given the right txs.
|
||||
let prepped_txs = given_txs
|
||||
.into_par_iter()
|
||||
.map(|tx| {
|
||||
let tx = new_tx_verification_data(tx)?;
|
||||
Ok((tx.tx_hash, tx))
|
||||
})
|
||||
.collect::<Result<_, anyhow::Error>>()
|
||||
.map_err(IncomingBlockError::InvalidBlock)?;
|
||||
|
||||
let Some(incoming_block_tx) = COMMAND_TX.get() else {
|
||||
// We could still be starting up the blockchain manager.
|
||||
return Ok(IncomingBlockOk::NotReady);
|
||||
};
|
||||
|
||||
// Add the blocks hash to the blocks being handled.
|
||||
if !BLOCKS_BEING_HANDLED.lock().unwrap().insert(block_hash) {
|
||||
// If another place is already adding this block then we can stop.
|
||||
return Ok(IncomingBlockOk::AlreadyHave);
|
||||
}
|
||||
|
||||
// From this point on we MUST not early return without removing the block hash from `BLOCKS_BEING_HANDLED`.
|
||||
|
||||
let (response_tx, response_rx) = oneshot::channel();
|
||||
|
||||
incoming_block_tx
|
||||
.send(BlockchainManagerCommand::AddBlock {
|
||||
block,
|
||||
prepped_txs,
|
||||
response_tx,
|
||||
})
|
||||
.await
|
||||
.expect("TODO: don't actually panic here, an err means we are shutting down");
|
||||
|
||||
let res = response_rx
|
||||
.await
|
||||
.expect("The blockchain manager will always respond")
|
||||
.map_err(IncomingBlockError::InvalidBlock);
|
||||
|
||||
// Remove the block hash from the blocks being handled.
|
||||
BLOCKS_BEING_HANDLED.lock().unwrap().remove(&block_hash);
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Check if we have a block with the given hash.
|
||||
async fn block_exists(
|
||||
block_hash: [u8; 32],
|
||||
blockchain_read_handle: &mut BlockchainReadHandle,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
let BlockchainResponse::FindBlock(chain) = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::FindBlock(block_hash))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(chain.is_some())
|
||||
}
|
144
binaries/cuprated/src/blockchain/manager.rs
Normal file
144
binaries/cuprated/src/blockchain/manager.rs
Normal file
|
@ -0,0 +1,144 @@
|
|||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use futures::StreamExt;
|
||||
use monero_serai::block::Block;
|
||||
use tokio::sync::{mpsc, oneshot, Notify};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::error;
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::{
|
||||
BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService,
|
||||
BlockVerifierService, ExtendedConsensusError, TxVerifierService, VerifyBlockRequest,
|
||||
VerifyBlockResponse, VerifyTxRequest, VerifyTxResponse,
|
||||
};
|
||||
use cuprate_consensus_context::RawBlockChainContext;
|
||||
use cuprate_p2p::{
|
||||
block_downloader::{BlockBatch, BlockDownloaderConfig},
|
||||
BroadcastSvc, NetworkInterface,
|
||||
};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, TransactionVerificationData,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
blockchain::{
|
||||
chain_service::ChainService,
|
||||
interface::COMMAND_TX,
|
||||
syncer,
|
||||
types::{ConcreteBlockVerifierService, ConsensusBlockchainReadHandle},
|
||||
},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
};
|
||||
|
||||
mod commands;
|
||||
mod handler;
|
||||
|
||||
pub use commands::{BlockchainManagerCommand, IncomingBlockOk};
|
||||
|
||||
/// Initialize the blockchain manager.
|
||||
///
|
||||
/// This function sets up the [`BlockchainManager`] and the [`syncer`] so that the functions in [`interface`](super::interface)
|
||||
/// can be called.
|
||||
pub async fn init_blockchain_manager(
|
||||
clearnet_interface: NetworkInterface<ClearNet>,
|
||||
blockchain_write_handle: BlockchainWriteHandle,
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
mut blockchain_context_service: BlockChainContextService,
|
||||
block_verifier_service: ConcreteBlockVerifierService,
|
||||
block_downloader_config: BlockDownloaderConfig,
|
||||
) {
|
||||
// TODO: find good values for these size limits
|
||||
let (batch_tx, batch_rx) = mpsc::channel(1);
|
||||
let stop_current_block_downloader = Arc::new(Notify::new());
|
||||
let (command_tx, command_rx) = mpsc::channel(3);
|
||||
|
||||
COMMAND_TX.set(command_tx).unwrap();
|
||||
|
||||
tokio::spawn(syncer::syncer(
|
||||
blockchain_context_service.clone(),
|
||||
ChainService(blockchain_read_handle.clone()),
|
||||
clearnet_interface.clone(),
|
||||
batch_tx,
|
||||
Arc::clone(&stop_current_block_downloader),
|
||||
block_downloader_config,
|
||||
));
|
||||
|
||||
let BlockChainContextResponse::Context(blockchain_context) = blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let manager = BlockchainManager {
|
||||
blockchain_write_handle,
|
||||
blockchain_read_handle,
|
||||
blockchain_context_service,
|
||||
cached_blockchain_context: blockchain_context.unchecked_blockchain_context().clone(),
|
||||
block_verifier_service,
|
||||
stop_current_block_downloader,
|
||||
broadcast_svc: clearnet_interface.broadcast_svc(),
|
||||
};
|
||||
|
||||
tokio::spawn(manager.run(batch_rx, command_rx));
|
||||
}
|
||||
|
||||
/// The blockchain manager.
|
||||
///
|
||||
/// This handles all mutation of the blockchain, anything that changes the state of the blockchain must
|
||||
/// go through this.
|
||||
///
|
||||
/// Other parts of Cuprate can interface with this by using the functions in [`interface`](super::interface).
|
||||
pub struct BlockchainManager {
|
||||
/// The [`BlockchainWriteHandle`], this is the _only_ part of Cuprate where a [`BlockchainWriteHandle`]
|
||||
/// is held.
|
||||
blockchain_write_handle: BlockchainWriteHandle,
|
||||
/// A [`BlockchainReadHandle`].
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
// TODO: Improve the API of the cache service.
|
||||
// TODO: rename the cache service -> `BlockchainContextService`.
|
||||
/// The blockchain context cache, this caches the current state of the blockchain to quickly calculate/retrieve
|
||||
/// values without needing to go to a [`BlockchainReadHandle`].
|
||||
blockchain_context_service: BlockChainContextService,
|
||||
/// A cached context representing the current state.
|
||||
cached_blockchain_context: RawBlockChainContext,
|
||||
/// The block verifier service, to verify incoming blocks.
|
||||
block_verifier_service: ConcreteBlockVerifierService,
|
||||
/// A [`Notify`] to tell the [syncer](syncer::syncer) that we want to cancel this current download
|
||||
/// attempt.
|
||||
stop_current_block_downloader: Arc<Notify>,
|
||||
/// The broadcast service, to broadcast new blocks.
|
||||
broadcast_svc: BroadcastSvc<ClearNet>,
|
||||
}
|
||||
|
||||
impl BlockchainManager {
|
||||
/// The [`BlockchainManager`] task.
|
||||
pub async fn run(
|
||||
mut self,
|
||||
mut block_batch_rx: mpsc::Receiver<BlockBatch>,
|
||||
mut command_rx: mpsc::Receiver<BlockchainManagerCommand>,
|
||||
) {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(batch) = block_batch_rx.recv() => {
|
||||
self.handle_incoming_block_batch(
|
||||
batch,
|
||||
).await;
|
||||
}
|
||||
Some(incoming_command) = command_rx.recv() => {
|
||||
self.handle_command(incoming_command).await;
|
||||
}
|
||||
else => {
|
||||
todo!("TODO: exit the BC manager")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
32
binaries/cuprated/src/blockchain/manager/commands.rs
Normal file
32
binaries/cuprated/src/blockchain/manager/commands.rs
Normal file
|
@ -0,0 +1,32 @@
|
|||
//! This module contains the commands for the blockchain manager.
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monero_serai::block::Block;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use cuprate_types::TransactionVerificationData;
|
||||
|
||||
/// The blockchain manager commands.
|
||||
pub enum BlockchainManagerCommand {
|
||||
/// Attempt to add a new block to the blockchain.
|
||||
AddBlock {
|
||||
/// The [`Block`] to add.
|
||||
block: Block,
|
||||
/// All the transactions defined in [`Block::transactions`].
|
||||
prepped_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
/// The channel to send the response down.
|
||||
response_tx: oneshot::Sender<Result<IncomingBlockOk, anyhow::Error>>,
|
||||
},
|
||||
}
|
||||
|
||||
/// The [`Ok`] response for an incoming block.
|
||||
pub enum IncomingBlockOk {
|
||||
/// The block was added to the main-chain.
|
||||
AddedToMainChain,
|
||||
/// The blockchain manager is not ready yet.
|
||||
NotReady,
|
||||
/// The block was added to an alt-chain.
|
||||
AddedToAltChain,
|
||||
/// We already have the block.
|
||||
AlreadyHave,
|
||||
}
|
484
binaries/cuprated/src/blockchain/manager/handler.rs
Normal file
484
binaries/cuprated/src/blockchain/manager/handler.rs
Normal file
|
@ -0,0 +1,484 @@
|
|||
//! The blockchain manager handler functions.
|
||||
use bytes::Bytes;
|
||||
use futures::{TryFutureExt, TryStreamExt};
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use rayon::prelude::*;
|
||||
use std::ops::ControlFlow;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::info;
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::{
|
||||
block::PreparedBlock, transactions::new_tx_verification_data, BlockChainContextRequest,
|
||||
BlockChainContextResponse, BlockVerifierService, ExtendedConsensusError, VerifyBlockRequest,
|
||||
VerifyBlockResponse, VerifyTxRequest, VerifyTxResponse,
|
||||
};
|
||||
use cuprate_consensus_context::NewBlockData;
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_p2p::{block_downloader::BlockBatch, constants::LONG_BAN, BroadcastRequest};
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest},
|
||||
AltBlockInformation, HardFork, TransactionVerificationData, VerifiedBlockInformation,
|
||||
};
|
||||
|
||||
use crate::blockchain::manager::commands::IncomingBlockOk;
|
||||
use crate::{
|
||||
blockchain::{
|
||||
manager::commands::BlockchainManagerCommand, types::ConsensusBlockchainReadHandle,
|
||||
},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
signals::REORG_LOCK,
|
||||
};
|
||||
|
||||
impl super::BlockchainManager {
|
||||
/// Handle an incoming command from another part of Cuprate.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
pub async fn handle_command(&mut self, command: BlockchainManagerCommand) {
|
||||
match command {
|
||||
BlockchainManagerCommand::AddBlock {
|
||||
block,
|
||||
prepped_txs,
|
||||
response_tx,
|
||||
} => {
|
||||
let res = self.handle_incoming_block(block, prepped_txs).await;
|
||||
|
||||
drop(response_tx.send(res));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Broadcast a valid block to the network.
|
||||
async fn broadcast_block(&mut self, block_bytes: Bytes, blockchain_height: usize) {
|
||||
self.broadcast_svc
|
||||
.ready()
|
||||
.await
|
||||
.expect("Broadcast service is Infallible.")
|
||||
.call(BroadcastRequest::Block {
|
||||
block_bytes,
|
||||
current_blockchain_height: usize_to_u64(blockchain_height),
|
||||
})
|
||||
.await
|
||||
.expect("Broadcast service is Infallible.");
|
||||
}
|
||||
|
||||
/// Handle an incoming [`Block`].
|
||||
///
|
||||
/// This function will route to [`Self::handle_incoming_alt_block`] if the block does not follow
|
||||
/// the top of the main chain.
|
||||
///
|
||||
/// Otherwise, this function will validate and add the block to the main chain.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
pub async fn handle_incoming_block(
|
||||
&mut self,
|
||||
block: Block,
|
||||
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
) -> Result<IncomingBlockOk, anyhow::Error> {
|
||||
if block.header.previous != self.cached_blockchain_context.top_hash {
|
||||
self.handle_incoming_alt_block(block, prepared_txs).await?;
|
||||
return Ok(IncomingBlockOk::AddedToAltChain);
|
||||
}
|
||||
|
||||
let VerifyBlockResponse::MainChain(verified_block) = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::MainChain {
|
||||
block,
|
||||
prepared_txs,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let block_blob = Bytes::copy_from_slice(&verified_block.block_blob);
|
||||
self.add_valid_block_to_main_chain(verified_block).await;
|
||||
|
||||
self.broadcast_block(block_blob, self.cached_blockchain_context.chain_height)
|
||||
.await;
|
||||
|
||||
Ok(IncomingBlockOk::AddedToMainChain)
|
||||
}
|
||||
|
||||
/// Handle an incoming [`BlockBatch`].
|
||||
///
|
||||
/// This function will route to [`Self::handle_incoming_block_batch_main_chain`] or [`Self::handle_incoming_block_batch_alt_chain`]
|
||||
/// depending on if the first block in the batch follows from the top of our chain.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if the batch is empty or if any internal service returns an unexpected
|
||||
/// error that we cannot recover from or if the incoming batch contains no blocks.
|
||||
pub async fn handle_incoming_block_batch(&mut self, batch: BlockBatch) {
|
||||
let (first_block, _) = batch
|
||||
.blocks
|
||||
.first()
|
||||
.expect("Block batch should not be empty");
|
||||
|
||||
if first_block.header.previous == self.cached_blockchain_context.top_hash {
|
||||
self.handle_incoming_block_batch_main_chain(batch).await;
|
||||
} else {
|
||||
self.handle_incoming_block_batch_alt_chain(batch).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles an incoming [`BlockBatch`] that follows the main chain.
|
||||
///
|
||||
/// This function will handle validating the blocks in the batch and adding them to the blockchain
|
||||
/// database and context cache.
|
||||
///
|
||||
/// This function will also handle banning the peer and canceling the block downloader if the
|
||||
/// block is invalid.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from or if the incoming batch contains no blocks.
|
||||
async fn handle_incoming_block_batch_main_chain(&mut self, batch: BlockBatch) {
|
||||
info!(
|
||||
"Handling batch to main chain height: {}",
|
||||
batch.blocks.first().unwrap().0.number().unwrap()
|
||||
);
|
||||
|
||||
let batch_prep_res = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::MainChainBatchPrepareBlocks {
|
||||
blocks: batch.blocks,
|
||||
})
|
||||
.await;
|
||||
|
||||
let prepped_blocks = match batch_prep_res {
|
||||
Ok(VerifyBlockResponse::MainChainBatchPrepped(prepped_blocks)) => prepped_blocks,
|
||||
Err(_) => {
|
||||
batch.peer_handle.ban_peer(LONG_BAN);
|
||||
self.stop_current_block_downloader.notify_one();
|
||||
return;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
for (block, txs) in prepped_blocks {
|
||||
let verify_res = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::MainChainPrepped { block, txs })
|
||||
.await;
|
||||
|
||||
let verified_block = match verify_res {
|
||||
Ok(VerifyBlockResponse::MainChain(verified_block)) => verified_block,
|
||||
Err(_) => {
|
||||
batch.peer_handle.ban_peer(LONG_BAN);
|
||||
self.stop_current_block_downloader.notify_one();
|
||||
return;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
self.add_valid_block_to_main_chain(verified_block).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles an incoming [`BlockBatch`] that does not follow the main-chain.
|
||||
///
|
||||
/// This function will handle validating the alt-blocks to add them to our cache and reorging the
|
||||
/// chain if the alt-chain has a higher cumulative difficulty.
|
||||
///
|
||||
/// This function will also handle banning the peer and canceling the block downloader if the
|
||||
/// alt block is invalid or if a reorg fails.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn handle_incoming_block_batch_alt_chain(&mut self, mut batch: BlockBatch) {
|
||||
// TODO: this needs testing (this whole section does but alt-blocks specifically).
|
||||
|
||||
let mut blocks = batch.blocks.into_iter();
|
||||
|
||||
while let Some((block, txs)) = blocks.next() {
|
||||
// async blocks work as try blocks.
|
||||
let res = async {
|
||||
let txs = txs
|
||||
.into_par_iter()
|
||||
.map(|tx| {
|
||||
let tx = new_tx_verification_data(tx)?;
|
||||
Ok((tx.tx_hash, tx))
|
||||
})
|
||||
.collect::<Result<_, anyhow::Error>>()?;
|
||||
|
||||
let reorged = self.handle_incoming_alt_block(block, txs).await?;
|
||||
|
||||
Ok::<_, anyhow::Error>(reorged)
|
||||
}
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Err(e) => {
|
||||
batch.peer_handle.ban_peer(LONG_BAN);
|
||||
self.stop_current_block_downloader.notify_one();
|
||||
return;
|
||||
}
|
||||
Ok(AddAltBlock::Reorged) => {
|
||||
// Collect the remaining blocks and add them to the main chain instead.
|
||||
batch.blocks = blocks.collect();
|
||||
self.handle_incoming_block_batch_main_chain(batch).await;
|
||||
return;
|
||||
}
|
||||
// continue adding alt blocks.
|
||||
Ok(AddAltBlock::Cached) => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles an incoming alt [`Block`].
|
||||
///
|
||||
/// This function will do some pre-validation of the alt block, then if the cumulative difficulty
|
||||
/// of the alt chain is higher than the main chain it will attempt a reorg otherwise it will add
|
||||
/// the alt block to the alt block cache.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This will return an [`Err`] if:
|
||||
/// - The alt block was invalid.
|
||||
/// - An attempt to reorg the chain failed.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn handle_incoming_alt_block(
|
||||
&mut self,
|
||||
block: Block,
|
||||
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
) -> Result<AddAltBlock, anyhow::Error> {
|
||||
let VerifyBlockResponse::AltChain(alt_block_info) = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::AltChain {
|
||||
block,
|
||||
prepared_txs,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
// TODO: check in consensus crate if alt block with this hash already exists.
|
||||
|
||||
// If this alt chain
|
||||
if alt_block_info.cumulative_difficulty
|
||||
> self.cached_blockchain_context.cumulative_difficulty
|
||||
{
|
||||
self.try_do_reorg(alt_block_info).await?;
|
||||
return Ok(AddAltBlock::Reorged);
|
||||
}
|
||||
|
||||
self.blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::WriteAltBlock(alt_block_info))
|
||||
.await?;
|
||||
|
||||
Ok(AddAltBlock::Cached)
|
||||
}
|
||||
|
||||
/// Attempt a re-org with the given top block of the alt-chain.
|
||||
///
|
||||
/// This function will take a write lock on [`REORG_LOCK`] and then set up the blockchain database
|
||||
/// and context cache to verify the alt-chain. It will then attempt to verify and add each block
|
||||
/// in the alt-chain to the main-chain. Releasing the lock on [`REORG_LOCK`] when finished.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an [`Err`] if the re-org was unsuccessful, if this happens the chain
|
||||
/// will be returned back into its state it was at when then function was called.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn try_do_reorg(
|
||||
&mut self,
|
||||
top_alt_block: AltBlockInformation,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let _guard = REORG_LOCK.write().await;
|
||||
|
||||
let BlockchainResponse::AltBlocksInChain(mut alt_blocks) = self
|
||||
.blockchain_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainReadRequest::AltBlocksInChain(
|
||||
top_alt_block.chain_id,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
alt_blocks.push(top_alt_block);
|
||||
|
||||
let split_height = alt_blocks[0].height;
|
||||
let current_main_chain_height = self.cached_blockchain_context.chain_height;
|
||||
|
||||
let BlockchainResponse::PopBlocks(old_main_chain_id) = self
|
||||
.blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::PopBlocks(
|
||||
current_main_chain_height - split_height + 1,
|
||||
))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
self.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::PopBlocks {
|
||||
numb_blocks: current_main_chain_height - split_height + 1,
|
||||
})
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
|
||||
let reorg_res = self.verify_add_alt_blocks_to_main_chain(alt_blocks).await;
|
||||
|
||||
match reorg_res {
|
||||
Ok(()) => Ok(()),
|
||||
Err(e) => {
|
||||
todo!("Reverse reorg")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify and add a list of [`AltBlockInformation`]s to the main-chain.
|
||||
///
|
||||
/// This function assumes the first [`AltBlockInformation`] is the next block in the blockchain
|
||||
/// for the blockchain database and the context cache, or in other words that the blockchain database
|
||||
/// and context cache have already had the top blocks popped to where the alt-chain meets the main-chain.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an [`Err`] if the alt-blocks were invalid, in this case the re-org should
|
||||
/// be aborted and the chain should be returned to its previous state.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn verify_add_alt_blocks_to_main_chain(
|
||||
&mut self,
|
||||
alt_blocks: Vec<AltBlockInformation>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
for mut alt_block in alt_blocks {
|
||||
let prepped_txs = alt_block
|
||||
.txs
|
||||
.drain(..)
|
||||
.map(|tx| Ok(Arc::new(tx.try_into()?)))
|
||||
.collect::<Result<_, anyhow::Error>>()?;
|
||||
|
||||
let prepped_block = PreparedBlock::new_alt_block(alt_block)?;
|
||||
|
||||
let VerifyBlockResponse::MainChain(verified_block) = self
|
||||
.block_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyBlockRequest::MainChainPrepped {
|
||||
block: prepped_block,
|
||||
txs: prepped_txs,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
self.add_valid_block_to_main_chain(verified_block).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adds a [`VerifiedBlockInformation`] to the main-chain.
|
||||
///
|
||||
/// This function will update the blockchain database and the context cache, it will also
|
||||
/// update [`Self::cached_blockchain_context`].
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
pub async fn add_valid_block_to_main_chain(
|
||||
&mut self,
|
||||
verified_block: VerifiedBlockInformation,
|
||||
) {
|
||||
self.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Update(NewBlockData {
|
||||
block_hash: verified_block.block_hash,
|
||||
height: verified_block.height,
|
||||
timestamp: verified_block.block.header.timestamp,
|
||||
weight: verified_block.weight,
|
||||
long_term_weight: verified_block.long_term_weight,
|
||||
generated_coins: verified_block.generated_coins,
|
||||
vote: HardFork::from_vote(verified_block.block.header.hardfork_signal),
|
||||
cumulative_difficulty: verified_block.cumulative_difficulty,
|
||||
}))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
|
||||
self.blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::WriteBlock(verified_block))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
|
||||
let BlockChainContextResponse::Context(blockchain_context) = self
|
||||
.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
self.cached_blockchain_context = blockchain_context.unchecked_blockchain_context().clone();
|
||||
}
|
||||
}
|
||||
|
||||
/// The result from successfully adding an alt-block.
|
||||
enum AddAltBlock {
|
||||
/// The alt-block was cached.
|
||||
Cached,
|
||||
/// The chain was reorged.
|
||||
Reorged,
|
||||
}
|
143
binaries/cuprated/src/blockchain/syncer.rs
Normal file
143
binaries/cuprated/src/blockchain/syncer.rs
Normal file
|
@ -0,0 +1,143 @@
|
|||
// FIXME: This whole module is not great and should be rewritten when the PeerSet is made.
|
||||
use std::{pin::pin, sync::Arc, time::Duration};
|
||||
|
||||
use futures::StreamExt;
|
||||
use tokio::time::interval;
|
||||
use tokio::{
|
||||
sync::{mpsc, Notify},
|
||||
time::sleep,
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_consensus::{BlockChainContext, BlockChainContextRequest, BlockChainContextResponse};
|
||||
use cuprate_p2p::{
|
||||
block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse},
|
||||
NetworkInterface,
|
||||
};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
|
||||
const CHECK_SYNC_FREQUENCY: Duration = Duration::from_secs(30);
|
||||
|
||||
/// An error returned from the [`syncer`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SyncerError {
|
||||
#[error("Incoming block channel closed.")]
|
||||
IncomingBlockChannelClosed,
|
||||
#[error("One of our services returned an error: {0}.")]
|
||||
ServiceError(#[from] tower::BoxError),
|
||||
}
|
||||
|
||||
/// The syncer tasks that makes sure we are fully synchronised with our connected peers.
|
||||
#[expect(
|
||||
clippy::significant_drop_tightening,
|
||||
reason = "Client pool which will be removed"
|
||||
)]
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn syncer<C, CN>(
|
||||
mut context_svc: C,
|
||||
our_chain: CN,
|
||||
clearnet_interface: NetworkInterface<ClearNet>,
|
||||
incoming_block_batch_tx: mpsc::Sender<BlockBatch>,
|
||||
stop_current_block_downloader: Arc<Notify>,
|
||||
block_downloader_config: BlockDownloaderConfig,
|
||||
) -> Result<(), SyncerError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
>,
|
||||
C::Future: Send + 'static,
|
||||
CN: Service<ChainSvcRequest, Response = ChainSvcResponse, Error = tower::BoxError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
CN::Future: Send + 'static,
|
||||
{
|
||||
tracing::info!("Starting blockchain syncer");
|
||||
|
||||
let mut check_sync_interval = interval(CHECK_SYNC_FREQUENCY);
|
||||
|
||||
let BlockChainContextResponse::Context(mut blockchain_ctx) = context_svc
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let client_pool = clearnet_interface.client_pool();
|
||||
|
||||
tracing::debug!("Waiting for new sync info in top sync channel");
|
||||
|
||||
loop {
|
||||
check_sync_interval.tick().await;
|
||||
|
||||
tracing::trace!("Checking connected peers to see if we are behind",);
|
||||
|
||||
check_update_blockchain_context(&mut context_svc, &mut blockchain_ctx).await?;
|
||||
let raw_blockchain_context = blockchain_ctx.unchecked_blockchain_context();
|
||||
|
||||
if !client_pool.contains_client_with_more_cumulative_difficulty(
|
||||
raw_blockchain_context.cumulative_difficulty,
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
"We are behind peers claimed cumulative difficulty, starting block downloader"
|
||||
);
|
||||
let mut block_batch_stream =
|
||||
clearnet_interface.block_downloader(our_chain.clone(), block_downloader_config);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
() = stop_current_block_downloader.notified() => {
|
||||
tracing::info!("Stopping block downloader");
|
||||
break;
|
||||
}
|
||||
batch = block_batch_stream.next() => {
|
||||
let Some(batch) = batch else {
|
||||
break;
|
||||
};
|
||||
|
||||
tracing::debug!("Got batch, len: {}", batch.blocks.len());
|
||||
if incoming_block_batch_tx.send(batch).await.is_err() {
|
||||
return Err(SyncerError::IncomingBlockChannelClosed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if we should update the given [`BlockChainContext`] and updates it if needed.
|
||||
async fn check_update_blockchain_context<C>(
|
||||
context_svc: C,
|
||||
old_context: &mut BlockChainContext,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
>,
|
||||
C::Future: Send + 'static,
|
||||
{
|
||||
if old_context.blockchain_context().is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let BlockChainContextResponse::Context(ctx) = context_svc
|
||||
.oneshot(BlockChainContextRequest::Context)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
*old_context = ctx;
|
||||
|
||||
Ok(())
|
||||
}
|
24
binaries/cuprated/src/blockchain/types.rs
Normal file
24
binaries/cuprated/src/blockchain/types.rs
Normal file
|
@ -0,0 +1,24 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{FutureExt, TryFutureExt};
|
||||
use tower::{util::MapErr, Service};
|
||||
|
||||
use cuprate_blockchain::{cuprate_database::RuntimeError, service::BlockchainReadHandle};
|
||||
use cuprate_consensus::{BlockChainContextService, BlockVerifierService, TxVerifierService};
|
||||
use cuprate_p2p::block_downloader::{ChainSvcRequest, ChainSvcResponse};
|
||||
use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse};
|
||||
|
||||
/// The [`BlockVerifierService`] with all generic types defined.
|
||||
pub type ConcreteBlockVerifierService = BlockVerifierService<
|
||||
BlockChainContextService,
|
||||
ConcreteTxVerifierService,
|
||||
ConsensusBlockchainReadHandle,
|
||||
>;
|
||||
|
||||
/// The [`TxVerifierService`] with all generic types defined.
|
||||
pub type ConcreteTxVerifierService = TxVerifierService<ConsensusBlockchainReadHandle>;
|
||||
|
||||
/// The [`BlockchainReadHandle`] with the [`tower::Service::Error`] mapped to conform to what the consensus crate requires.
|
||||
pub type ConsensusBlockchainReadHandle =
|
||||
MapErr<BlockchainReadHandle, fn(RuntimeError) -> tower::BoxError>;
|
1
binaries/cuprated/src/config.rs
Normal file
1
binaries/cuprated/src/config.rs
Normal file
|
@ -0,0 +1 @@
|
|||
//! cuprated config
|
38
binaries/cuprated/src/constants.rs
Normal file
38
binaries/cuprated/src/constants.rs
Normal file
|
@ -0,0 +1,38 @@
|
|||
//! General constants used throughout `cuprated`.
|
||||
|
||||
use const_format::formatcp;
|
||||
|
||||
/// `cuprated`'s semantic version (`MAJOR.MINOR.PATCH`) as string.
|
||||
pub const VERSION: &str = clap::crate_version!();
|
||||
|
||||
/// [`VERSION`] + the build type.
|
||||
///
|
||||
/// If a debug build, the suffix is `-debug`, else it is `-release`.
|
||||
pub const VERSION_BUILD: &str = if cfg!(debug_assertions) {
|
||||
formatcp!("{VERSION}-debug")
|
||||
} else {
|
||||
formatcp!("{VERSION}-release")
|
||||
};
|
||||
|
||||
/// The panic message used when cuprated encounters a critical service error.
|
||||
pub const PANIC_CRITICAL_SERVICE_ERROR: &str =
|
||||
"A service critical to Cuprate's function returned an unexpected error.";
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn version() {
|
||||
assert_eq!(VERSION, "0.0.1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn version_build() {
|
||||
if cfg!(debug_assertions) {
|
||||
assert_eq!(VERSION_BUILD, "0.0.1-debug");
|
||||
} else {
|
||||
assert_eq!(VERSION_BUILD, "0.0.1-release");
|
||||
}
|
||||
}
|
||||
}
|
30
binaries/cuprated/src/main.rs
Normal file
30
binaries/cuprated/src/main.rs
Normal file
|
@ -0,0 +1,30 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![allow(
|
||||
unused_imports,
|
||||
unreachable_pub,
|
||||
unreachable_code,
|
||||
unused_crate_dependencies,
|
||||
dead_code,
|
||||
unused_variables,
|
||||
clippy::needless_pass_by_value,
|
||||
clippy::unused_async,
|
||||
reason = "TODO: remove after v1.0.0"
|
||||
)]
|
||||
|
||||
mod blockchain;
|
||||
mod config;
|
||||
mod constants;
|
||||
mod p2p;
|
||||
mod rpc;
|
||||
mod signals;
|
||||
mod statics;
|
||||
mod txpool;
|
||||
|
||||
fn main() {
|
||||
// Initialize global static `LazyLock` data.
|
||||
statics::init_lazylock_statics();
|
||||
|
||||
// TODO: everything else.
|
||||
todo!()
|
||||
}
|
5
binaries/cuprated/src/p2p.rs
Normal file
5
binaries/cuprated/src/p2p.rs
Normal file
|
@ -0,0 +1,5 @@
|
|||
//! P2P
|
||||
//!
|
||||
//! Will handle initiating the P2P and contains a protocol request handler.
|
||||
|
||||
pub mod request_handler;
|
1
binaries/cuprated/src/p2p/request_handler.rs
Normal file
1
binaries/cuprated/src/p2p/request_handler.rs
Normal file
|
@ -0,0 +1 @@
|
|||
|
11
binaries/cuprated/src/rpc.rs
Normal file
11
binaries/cuprated/src/rpc.rs
Normal file
|
@ -0,0 +1,11 @@
|
|||
//! RPC
|
||||
//!
|
||||
//! Will contain the code to initiate the RPC and a request handler.
|
||||
|
||||
mod bin;
|
||||
mod handler;
|
||||
mod json;
|
||||
mod other;
|
||||
mod request;
|
||||
|
||||
pub use handler::CupratedRpcHandler;
|
85
binaries/cuprated/src/rpc/bin.rs
Normal file
85
binaries/cuprated/src/rpc/bin.rs
Normal file
|
@ -0,0 +1,85 @@
|
|||
use anyhow::Error;
|
||||
|
||||
use cuprate_rpc_types::{
|
||||
bin::{
|
||||
BinRequest, BinResponse, GetBlocksByHeightRequest, GetBlocksByHeightResponse,
|
||||
GetBlocksRequest, GetBlocksResponse, GetHashesRequest, GetHashesResponse,
|
||||
GetOutputIndexesRequest, GetOutputIndexesResponse, GetOutsRequest, GetOutsResponse,
|
||||
GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse,
|
||||
},
|
||||
json::{GetOutputDistributionRequest, GetOutputDistributionResponse},
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandler;
|
||||
|
||||
/// Map a [`BinRequest`] to the function that will lead to a [`BinResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandler,
|
||||
request: BinRequest,
|
||||
) -> Result<BinResponse, Error> {
|
||||
use BinRequest as Req;
|
||||
use BinResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetBlocks(r) => Resp::GetBlocks(get_blocks(state, r).await?),
|
||||
Req::GetBlocksByHeight(r) => Resp::GetBlocksByHeight(get_blocks_by_height(state, r).await?),
|
||||
Req::GetHashes(r) => Resp::GetHashes(get_hashes(state, r).await?),
|
||||
Req::GetOutputIndexes(r) => Resp::GetOutputIndexes(get_output_indexes(state, r).await?),
|
||||
Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?),
|
||||
Req::GetTransactionPoolHashes(r) => {
|
||||
Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?)
|
||||
}
|
||||
Req::GetOutputDistribution(r) => {
|
||||
Resp::GetOutputDistribution(get_output_distribution(state, r).await?)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_blocks(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlocksRequest,
|
||||
) -> Result<GetBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_blocks_by_height(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlocksByHeightRequest,
|
||||
) -> Result<GetBlocksByHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_hashes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetHashesRequest,
|
||||
) -> Result<GetHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_indexes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutputIndexesRequest,
|
||||
) -> Result<GetOutputIndexesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_outs(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutsRequest,
|
||||
) -> Result<GetOutsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_hashes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolHashesRequest,
|
||||
) -> Result<GetTransactionPoolHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_distribution(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutputDistributionRequest,
|
||||
) -> Result<GetOutputDistributionResponse, Error> {
|
||||
todo!()
|
||||
}
|
183
binaries/cuprated/src/rpc/handler.rs
Normal file
183
binaries/cuprated/src/rpc/handler.rs
Normal file
|
@ -0,0 +1,183 @@
|
|||
//! Dummy implementation of [`RpcHandler`].
|
||||
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::BoxFuture;
|
||||
use monero_serai::block::Block;
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_rpc_interface::RpcHandler;
|
||||
use cuprate_rpc_types::{
|
||||
bin::{BinRequest, BinResponse},
|
||||
json::{JsonRpcRequest, JsonRpcResponse},
|
||||
other::{OtherRequest, OtherResponse},
|
||||
};
|
||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
||||
|
||||
use crate::rpc::{bin, json, other};
|
||||
|
||||
/// TODO: use real type when public.
|
||||
#[derive(Clone)]
|
||||
#[expect(clippy::large_enum_variant)]
|
||||
pub enum BlockchainManagerRequest {
|
||||
/// Pop blocks off the top of the blockchain.
|
||||
///
|
||||
/// Input is the amount of blocks to pop.
|
||||
PopBlocks { amount: usize },
|
||||
|
||||
/// Start pruning the blockchain.
|
||||
Prune,
|
||||
|
||||
/// Is the blockchain pruned?
|
||||
Pruned,
|
||||
|
||||
/// Relay a block to the network.
|
||||
RelayBlock(Block),
|
||||
|
||||
/// Is the blockchain in the middle of syncing?
|
||||
///
|
||||
/// This returning `false` does not necessarily
|
||||
/// mean [`BlockchainManagerRequest::Synced`] will
|
||||
/// return `true`, for example, if the network has been
|
||||
/// cut off and we have no peers, this will return `false`,
|
||||
/// however, [`BlockchainManagerRequest::Synced`] may return
|
||||
/// `true` if the latest known chain tip is equal to our height.
|
||||
Syncing,
|
||||
|
||||
/// Is the blockchain fully synced?
|
||||
Synced,
|
||||
|
||||
/// Current target block time.
|
||||
Target,
|
||||
|
||||
/// The height of the next block in the chain.
|
||||
TargetHeight,
|
||||
}
|
||||
|
||||
/// TODO: use real type when public.
|
||||
#[derive(Clone)]
|
||||
pub enum BlockchainManagerResponse {
|
||||
/// General OK response.
|
||||
///
|
||||
/// Response to:
|
||||
/// - [`BlockchainManagerRequest::Prune`]
|
||||
/// - [`BlockchainManagerRequest::RelayBlock`]
|
||||
Ok,
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::PopBlocks`]
|
||||
PopBlocks { new_height: usize },
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Pruned`]
|
||||
Pruned(bool),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Syncing`]
|
||||
Syncing(bool),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Synced`]
|
||||
Synced(bool),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Target`]
|
||||
Target(std::time::Duration),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::TargetHeight`]
|
||||
TargetHeight { height: usize },
|
||||
}
|
||||
|
||||
/// TODO: use real type when public.
|
||||
pub type BlockchainManagerHandle = cuprate_database_service::DatabaseReadService<
|
||||
BlockchainManagerRequest,
|
||||
BlockchainManagerResponse,
|
||||
>;
|
||||
|
||||
/// TODO
|
||||
#[derive(Clone)]
|
||||
pub struct CupratedRpcHandler {
|
||||
/// Should this RPC server be [restricted](RpcHandler::restricted)?
|
||||
///
|
||||
/// This is not `pub` on purpose, as it should not be mutated after [`Self::new`].
|
||||
restricted: bool,
|
||||
|
||||
/// Read handle to the blockchain database.
|
||||
pub blockchain_read: BlockchainReadHandle,
|
||||
|
||||
/// Handle to the blockchain manager.
|
||||
pub blockchain_manager: BlockchainManagerHandle,
|
||||
|
||||
/// Read handle to the transaction pool database.
|
||||
pub txpool_read: TxpoolReadHandle,
|
||||
|
||||
/// TODO: handle to txpool service.
|
||||
pub txpool_manager: std::convert::Infallible,
|
||||
}
|
||||
|
||||
impl CupratedRpcHandler {
|
||||
/// Create a new [`Self`].
|
||||
pub const fn new(
|
||||
restricted: bool,
|
||||
blockchain_read: BlockchainReadHandle,
|
||||
blockchain_manager: BlockchainManagerHandle,
|
||||
txpool_read: TxpoolReadHandle,
|
||||
txpool_manager: std::convert::Infallible,
|
||||
) -> Self {
|
||||
Self {
|
||||
restricted,
|
||||
blockchain_read,
|
||||
blockchain_manager,
|
||||
txpool_read,
|
||||
txpool_manager,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RpcHandler for CupratedRpcHandler {
|
||||
fn restricted(&self) -> bool {
|
||||
self.restricted
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<JsonRpcRequest> for CupratedRpcHandler {
|
||||
type Response = JsonRpcResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<JsonRpcResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: JsonRpcRequest) -> Self::Future {
|
||||
let state = self.clone();
|
||||
Box::pin(json::map_request(state, request))
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<BinRequest> for CupratedRpcHandler {
|
||||
type Response = BinResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<BinResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: BinRequest) -> Self::Future {
|
||||
let state = self.clone();
|
||||
Box::pin(bin::map_request(state, request))
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<OtherRequest> for CupratedRpcHandler {
|
||||
type Response = OtherResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<OtherResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: OtherRequest) -> Self::Future {
|
||||
let state = self.clone();
|
||||
Box::pin(other::map_request(state, request))
|
||||
}
|
||||
}
|
294
binaries/cuprated/src/rpc/json.rs
Normal file
294
binaries/cuprated/src/rpc/json.rs
Normal file
|
@ -0,0 +1,294 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Error;
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_rpc_types::json::{
|
||||
AddAuxPowRequest, AddAuxPowResponse, BannedRequest, BannedResponse, CalcPowRequest,
|
||||
CalcPowResponse, FlushCacheRequest, FlushCacheResponse, FlushTransactionPoolRequest,
|
||||
FlushTransactionPoolResponse, GenerateBlocksRequest, GenerateBlocksResponse,
|
||||
GetAlternateChainsRequest, GetAlternateChainsResponse, GetBansRequest, GetBansResponse,
|
||||
GetBlockCountRequest, GetBlockCountResponse, GetBlockHeaderByHashRequest,
|
||||
GetBlockHeaderByHashResponse, GetBlockHeaderByHeightRequest, GetBlockHeaderByHeightResponse,
|
||||
GetBlockHeadersRangeRequest, GetBlockHeadersRangeResponse, GetBlockRequest, GetBlockResponse,
|
||||
GetCoinbaseTxSumRequest, GetCoinbaseTxSumResponse, GetConnectionsRequest,
|
||||
GetConnectionsResponse, GetFeeEstimateRequest, GetFeeEstimateResponse, GetInfoRequest,
|
||||
GetInfoResponse, GetLastBlockHeaderRequest, GetLastBlockHeaderResponse, GetMinerDataRequest,
|
||||
GetMinerDataResponse, GetOutputHistogramRequest, GetOutputHistogramResponse,
|
||||
GetTransactionPoolBacklogRequest, GetTransactionPoolBacklogResponse, GetTxIdsLooseRequest,
|
||||
GetTxIdsLooseResponse, GetVersionRequest, GetVersionResponse, HardForkInfoRequest,
|
||||
HardForkInfoResponse, JsonRpcRequest, JsonRpcResponse, OnGetBlockHashRequest,
|
||||
OnGetBlockHashResponse, PruneBlockchainRequest, PruneBlockchainResponse, RelayTxRequest,
|
||||
RelayTxResponse, SetBansRequest, SetBansResponse, SubmitBlockRequest, SubmitBlockResponse,
|
||||
SyncInfoRequest, SyncInfoResponse,
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandler;
|
||||
|
||||
/// Map a [`JsonRpcRequest`] to the function that will lead to a [`JsonRpcResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandler,
|
||||
request: JsonRpcRequest,
|
||||
) -> Result<JsonRpcResponse, Error> {
|
||||
use JsonRpcRequest as Req;
|
||||
use JsonRpcResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetBlockCount(r) => Resp::GetBlockCount(get_block_count(state, r).await?),
|
||||
Req::OnGetBlockHash(r) => Resp::OnGetBlockHash(on_get_block_hash(state, r).await?),
|
||||
Req::SubmitBlock(r) => Resp::SubmitBlock(submit_block(state, r).await?),
|
||||
Req::GenerateBlocks(r) => Resp::GenerateBlocks(generate_blocks(state, r).await?),
|
||||
Req::GetLastBlockHeader(r) => {
|
||||
Resp::GetLastBlockHeader(get_last_block_header(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeaderByHash(r) => {
|
||||
Resp::GetBlockHeaderByHash(get_block_header_by_hash(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeaderByHeight(r) => {
|
||||
Resp::GetBlockHeaderByHeight(get_block_header_by_height(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeadersRange(r) => {
|
||||
Resp::GetBlockHeadersRange(get_block_headers_range(state, r).await?)
|
||||
}
|
||||
Req::GetBlock(r) => Resp::GetBlock(get_block(state, r).await?),
|
||||
Req::GetConnections(r) => Resp::GetConnections(get_connections(state, r).await?),
|
||||
Req::GetInfo(r) => Resp::GetInfo(get_info(state, r).await?),
|
||||
Req::HardForkInfo(r) => Resp::HardForkInfo(hard_fork_info(state, r).await?),
|
||||
Req::SetBans(r) => Resp::SetBans(set_bans(state, r).await?),
|
||||
Req::GetBans(r) => Resp::GetBans(get_bans(state, r).await?),
|
||||
Req::Banned(r) => Resp::Banned(banned(state, r).await?),
|
||||
Req::FlushTransactionPool(r) => {
|
||||
Resp::FlushTransactionPool(flush_transaction_pool(state, r).await?)
|
||||
}
|
||||
Req::GetOutputHistogram(r) => {
|
||||
Resp::GetOutputHistogram(get_output_histogram(state, r).await?)
|
||||
}
|
||||
Req::GetCoinbaseTxSum(r) => Resp::GetCoinbaseTxSum(get_coinbase_tx_sum(state, r).await?),
|
||||
Req::GetVersion(r) => Resp::GetVersion(get_version(state, r).await?),
|
||||
Req::GetFeeEstimate(r) => Resp::GetFeeEstimate(get_fee_estimate(state, r).await?),
|
||||
Req::GetAlternateChains(r) => {
|
||||
Resp::GetAlternateChains(get_alternate_chains(state, r).await?)
|
||||
}
|
||||
Req::RelayTx(r) => Resp::RelayTx(relay_tx(state, r).await?),
|
||||
Req::SyncInfo(r) => Resp::SyncInfo(sync_info(state, r).await?),
|
||||
Req::GetTransactionPoolBacklog(r) => {
|
||||
Resp::GetTransactionPoolBacklog(get_transaction_pool_backlog(state, r).await?)
|
||||
}
|
||||
Req::GetMinerData(r) => Resp::GetMinerData(get_miner_data(state, r).await?),
|
||||
Req::PruneBlockchain(r) => Resp::PruneBlockchain(prune_blockchain(state, r).await?),
|
||||
Req::CalcPow(r) => Resp::CalcPow(calc_pow(state, r).await?),
|
||||
Req::FlushCache(r) => Resp::FlushCache(flush_cache(state, r).await?),
|
||||
Req::AddAuxPow(r) => Resp::AddAuxPow(add_aux_pow(state, r).await?),
|
||||
Req::GetTxIdsLoose(r) => Resp::GetTxIdsLoose(get_tx_ids_loose(state, r).await?),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_block_count(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockCountRequest,
|
||||
) -> Result<GetBlockCountResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn on_get_block_hash(
|
||||
state: CupratedRpcHandler,
|
||||
request: OnGetBlockHashRequest,
|
||||
) -> Result<OnGetBlockHashResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn submit_block(
|
||||
state: CupratedRpcHandler,
|
||||
request: SubmitBlockRequest,
|
||||
) -> Result<SubmitBlockResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn generate_blocks(
|
||||
state: CupratedRpcHandler,
|
||||
request: GenerateBlocksRequest,
|
||||
) -> Result<GenerateBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_last_block_header(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetLastBlockHeaderRequest,
|
||||
) -> Result<GetLastBlockHeaderResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_header_by_hash(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockHeaderByHashRequest,
|
||||
) -> Result<GetBlockHeaderByHashResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_header_by_height(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockHeaderByHeightRequest,
|
||||
) -> Result<GetBlockHeaderByHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_headers_range(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockHeadersRangeRequest,
|
||||
) -> Result<GetBlockHeadersRangeResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBlockRequest,
|
||||
) -> Result<GetBlockResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_connections(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetConnectionsRequest,
|
||||
) -> Result<GetConnectionsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_info(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetInfoRequest,
|
||||
) -> Result<GetInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn hard_fork_info(
|
||||
state: CupratedRpcHandler,
|
||||
request: HardForkInfoRequest,
|
||||
) -> Result<HardForkInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_bans(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetBansRequest,
|
||||
) -> Result<SetBansResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_bans(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetBansRequest,
|
||||
) -> Result<GetBansResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn banned(
|
||||
state: CupratedRpcHandler,
|
||||
request: BannedRequest,
|
||||
) -> Result<BannedResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn flush_transaction_pool(
|
||||
state: CupratedRpcHandler,
|
||||
request: FlushTransactionPoolRequest,
|
||||
) -> Result<FlushTransactionPoolResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_histogram(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutputHistogramRequest,
|
||||
) -> Result<GetOutputHistogramResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_coinbase_tx_sum(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetCoinbaseTxSumRequest,
|
||||
) -> Result<GetCoinbaseTxSumResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_version(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetVersionRequest,
|
||||
) -> Result<GetVersionResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_fee_estimate(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetFeeEstimateRequest,
|
||||
) -> Result<GetFeeEstimateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_alternate_chains(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetAlternateChainsRequest,
|
||||
) -> Result<GetAlternateChainsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn relay_tx(
|
||||
state: CupratedRpcHandler,
|
||||
request: RelayTxRequest,
|
||||
) -> Result<RelayTxResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn sync_info(
|
||||
state: CupratedRpcHandler,
|
||||
request: SyncInfoRequest,
|
||||
) -> Result<SyncInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_backlog(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolBacklogRequest,
|
||||
) -> Result<GetTransactionPoolBacklogResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_miner_data(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetMinerDataRequest,
|
||||
) -> Result<GetMinerDataResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn prune_blockchain(
|
||||
state: CupratedRpcHandler,
|
||||
request: PruneBlockchainRequest,
|
||||
) -> Result<PruneBlockchainResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn calc_pow(
|
||||
state: CupratedRpcHandler,
|
||||
request: CalcPowRequest,
|
||||
) -> Result<CalcPowResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn flush_cache(
|
||||
state: CupratedRpcHandler,
|
||||
request: FlushCacheRequest,
|
||||
) -> Result<FlushCacheResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn add_aux_pow(
|
||||
state: CupratedRpcHandler,
|
||||
request: AddAuxPowRequest,
|
||||
) -> Result<AddAuxPowResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_tx_ids_loose(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTxIdsLooseRequest,
|
||||
) -> Result<GetTxIdsLooseResponse, Error> {
|
||||
todo!()
|
||||
}
|
260
binaries/cuprated/src/rpc/other.rs
Normal file
260
binaries/cuprated/src/rpc/other.rs
Normal file
|
@ -0,0 +1,260 @@
|
|||
use anyhow::Error;
|
||||
|
||||
use cuprate_rpc_types::other::{
|
||||
GetAltBlocksHashesRequest, GetAltBlocksHashesResponse, GetHeightRequest, GetHeightResponse,
|
||||
GetLimitRequest, GetLimitResponse, GetNetStatsRequest, GetNetStatsResponse, GetOutsRequest,
|
||||
GetOutsResponse, GetPeerListRequest, GetPeerListResponse, GetPublicNodesRequest,
|
||||
GetPublicNodesResponse, GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse,
|
||||
GetTransactionPoolRequest, GetTransactionPoolResponse, GetTransactionPoolStatsRequest,
|
||||
GetTransactionPoolStatsResponse, GetTransactionsRequest, GetTransactionsResponse,
|
||||
InPeersRequest, InPeersResponse, IsKeyImageSpentRequest, IsKeyImageSpentResponse,
|
||||
MiningStatusRequest, MiningStatusResponse, OtherRequest, OtherResponse, OutPeersRequest,
|
||||
OutPeersResponse, PopBlocksRequest, PopBlocksResponse, SaveBcRequest, SaveBcResponse,
|
||||
SendRawTransactionRequest, SendRawTransactionResponse, SetBootstrapDaemonRequest,
|
||||
SetBootstrapDaemonResponse, SetLimitRequest, SetLimitResponse, SetLogCategoriesRequest,
|
||||
SetLogCategoriesResponse, SetLogHashRateRequest, SetLogHashRateResponse, SetLogLevelRequest,
|
||||
SetLogLevelResponse, StartMiningRequest, StartMiningResponse, StopDaemonRequest,
|
||||
StopDaemonResponse, StopMiningRequest, StopMiningResponse, UpdateRequest, UpdateResponse,
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandler;
|
||||
|
||||
/// Map a [`OtherRequest`] to the function that will lead to a [`OtherResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandler,
|
||||
request: OtherRequest,
|
||||
) -> Result<OtherResponse, Error> {
|
||||
use OtherRequest as Req;
|
||||
use OtherResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetHeight(r) => Resp::GetHeight(get_height(state, r).await?),
|
||||
Req::GetTransactions(r) => Resp::GetTransactions(get_transactions(state, r).await?),
|
||||
Req::GetAltBlocksHashes(r) => {
|
||||
Resp::GetAltBlocksHashes(get_alt_blocks_hashes(state, r).await?)
|
||||
}
|
||||
Req::IsKeyImageSpent(r) => Resp::IsKeyImageSpent(is_key_image_spent(state, r).await?),
|
||||
Req::SendRawTransaction(r) => {
|
||||
Resp::SendRawTransaction(send_raw_transaction(state, r).await?)
|
||||
}
|
||||
Req::StartMining(r) => Resp::StartMining(start_mining(state, r).await?),
|
||||
Req::StopMining(r) => Resp::StopMining(stop_mining(state, r).await?),
|
||||
Req::MiningStatus(r) => Resp::MiningStatus(mining_status(state, r).await?),
|
||||
Req::SaveBc(r) => Resp::SaveBc(save_bc(state, r).await?),
|
||||
Req::GetPeerList(r) => Resp::GetPeerList(get_peer_list(state, r).await?),
|
||||
Req::SetLogHashRate(r) => Resp::SetLogHashRate(set_log_hash_rate(state, r).await?),
|
||||
Req::SetLogLevel(r) => Resp::SetLogLevel(set_log_level(state, r).await?),
|
||||
Req::SetLogCategories(r) => Resp::SetLogCategories(set_log_categories(state, r).await?),
|
||||
Req::SetBootstrapDaemon(r) => {
|
||||
Resp::SetBootstrapDaemon(set_bootstrap_daemon(state, r).await?)
|
||||
}
|
||||
Req::GetTransactionPool(r) => {
|
||||
Resp::GetTransactionPool(get_transaction_pool(state, r).await?)
|
||||
}
|
||||
Req::GetTransactionPoolStats(r) => {
|
||||
Resp::GetTransactionPoolStats(get_transaction_pool_stats(state, r).await?)
|
||||
}
|
||||
Req::StopDaemon(r) => Resp::StopDaemon(stop_daemon(state, r).await?),
|
||||
Req::GetLimit(r) => Resp::GetLimit(get_limit(state, r).await?),
|
||||
Req::SetLimit(r) => Resp::SetLimit(set_limit(state, r).await?),
|
||||
Req::OutPeers(r) => Resp::OutPeers(out_peers(state, r).await?),
|
||||
Req::InPeers(r) => Resp::InPeers(in_peers(state, r).await?),
|
||||
Req::GetNetStats(r) => Resp::GetNetStats(get_net_stats(state, r).await?),
|
||||
Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?),
|
||||
Req::Update(r) => Resp::Update(update(state, r).await?),
|
||||
Req::PopBlocks(r) => Resp::PopBlocks(pop_blocks(state, r).await?),
|
||||
Req::GetTransactionPoolHashes(r) => {
|
||||
Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?)
|
||||
}
|
||||
Req::GetPublicNodes(r) => Resp::GetPublicNodes(get_public_nodes(state, r).await?),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_height(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetHeightRequest,
|
||||
) -> Result<GetHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transactions(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionsRequest,
|
||||
) -> Result<GetTransactionsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_alt_blocks_hashes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetAltBlocksHashesRequest,
|
||||
) -> Result<GetAltBlocksHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn is_key_image_spent(
|
||||
state: CupratedRpcHandler,
|
||||
request: IsKeyImageSpentRequest,
|
||||
) -> Result<IsKeyImageSpentResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn send_raw_transaction(
|
||||
state: CupratedRpcHandler,
|
||||
request: SendRawTransactionRequest,
|
||||
) -> Result<SendRawTransactionResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn start_mining(
|
||||
state: CupratedRpcHandler,
|
||||
request: StartMiningRequest,
|
||||
) -> Result<StartMiningResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn stop_mining(
|
||||
state: CupratedRpcHandler,
|
||||
request: StopMiningRequest,
|
||||
) -> Result<StopMiningResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn mining_status(
|
||||
state: CupratedRpcHandler,
|
||||
request: MiningStatusRequest,
|
||||
) -> Result<MiningStatusResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn save_bc(
|
||||
state: CupratedRpcHandler,
|
||||
request: SaveBcRequest,
|
||||
) -> Result<SaveBcResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_peer_list(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetPeerListRequest,
|
||||
) -> Result<GetPeerListResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_hash_rate(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetLogHashRateRequest,
|
||||
) -> Result<SetLogHashRateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_level(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetLogLevelRequest,
|
||||
) -> Result<SetLogLevelResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_categories(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetLogCategoriesRequest,
|
||||
) -> Result<SetLogCategoriesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_bootstrap_daemon(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetBootstrapDaemonRequest,
|
||||
) -> Result<SetBootstrapDaemonResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolRequest,
|
||||
) -> Result<GetTransactionPoolResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_stats(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolStatsRequest,
|
||||
) -> Result<GetTransactionPoolStatsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn stop_daemon(
|
||||
state: CupratedRpcHandler,
|
||||
request: StopDaemonRequest,
|
||||
) -> Result<StopDaemonResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_limit(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetLimitRequest,
|
||||
) -> Result<GetLimitResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_limit(
|
||||
state: CupratedRpcHandler,
|
||||
request: SetLimitRequest,
|
||||
) -> Result<SetLimitResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn out_peers(
|
||||
state: CupratedRpcHandler,
|
||||
request: OutPeersRequest,
|
||||
) -> Result<OutPeersResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn in_peers(
|
||||
state: CupratedRpcHandler,
|
||||
request: InPeersRequest,
|
||||
) -> Result<InPeersResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_net_stats(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetNetStatsRequest,
|
||||
) -> Result<GetNetStatsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_outs(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetOutsRequest,
|
||||
) -> Result<GetOutsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn update(
|
||||
state: CupratedRpcHandler,
|
||||
request: UpdateRequest,
|
||||
) -> Result<UpdateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn pop_blocks(
|
||||
state: CupratedRpcHandler,
|
||||
request: PopBlocksRequest,
|
||||
) -> Result<PopBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_hashes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetTransactionPoolHashesRequest,
|
||||
) -> Result<GetTransactionPoolHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_public_nodes(
|
||||
state: CupratedRpcHandler,
|
||||
request: GetPublicNodesRequest,
|
||||
) -> Result<GetPublicNodesResponse, Error> {
|
||||
todo!()
|
||||
}
|
19
binaries/cuprated/src/rpc/request.rs
Normal file
19
binaries/cuprated/src/rpc/request.rs
Normal file
|
@ -0,0 +1,19 @@
|
|||
//! Convenience functions for requests/responses.
|
||||
//!
|
||||
//! This module implements many methods for
|
||||
//! [`CupratedRpcHandler`](crate::rpc::CupratedRpcHandler)
|
||||
//! that are simple wrappers around the request/response API provided
|
||||
//! by the multiple [`tower::Service`]s.
|
||||
//!
|
||||
//! These exist to prevent noise like `unreachable!()`
|
||||
//! from being everywhere in the actual handler functions.
|
||||
//!
|
||||
//! Each module implements methods for a specific API, e.g.
|
||||
//! the [`blockchain`] modules contains methods for the
|
||||
//! blockchain database [`tower::Service`] API.
|
||||
|
||||
mod address_book;
|
||||
mod blockchain;
|
||||
mod blockchain_context;
|
||||
mod blockchain_manager;
|
||||
mod txpool;
|
104
binaries/cuprated/src/rpc/request/address_book.rs
Normal file
104
binaries/cuprated/src/rpc/request/address_book.rs
Normal file
|
@ -0,0 +1,104 @@
|
|||
//! Functions for TODO: doc enum message.
|
||||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::Error;
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_p2p_core::{
|
||||
services::{AddressBookRequest, AddressBookResponse},
|
||||
AddressBook, NetworkZone,
|
||||
};
|
||||
|
||||
/// [`AddressBookRequest::PeerlistSize`]
|
||||
pub(super) async fn peerlist_size<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let AddressBookResponse::PeerlistSize { white, grey } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(AddressBookRequest::PeerlistSize)
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((usize_to_u64(white), usize_to_u64(grey)))
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::ConnectionCount`]
|
||||
pub(super) async fn connection_count<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let AddressBookResponse::ConnectionCount { incoming, outgoing } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(AddressBookRequest::ConnectionCount)
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((usize_to_u64(incoming), usize_to_u64(outgoing)))
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::SetBan`]
|
||||
pub(super) async fn set_ban<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
peer: cuprate_p2p_core::ban::SetBan<Z::Addr>,
|
||||
) -> Result<(), Error> {
|
||||
let AddressBookResponse::Ok = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(AddressBookRequest::SetBan(peer))
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::GetBan`]
|
||||
pub(super) async fn get_ban<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
peer: Z::Addr,
|
||||
) -> Result<Option<std::time::Instant>, Error> {
|
||||
let AddressBookResponse::GetBan { unban_instant } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(AddressBookRequest::GetBan(peer))
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(unban_instant)
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::GetBans`]
|
||||
pub(super) async fn get_bans<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<(), Error> {
|
||||
let AddressBookResponse::GetBans(bans) = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(AddressBookRequest::GetBans)
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(todo!())
|
||||
}
|
308
binaries/cuprated/src/rpc/request/blockchain.rs
Normal file
308
binaries/cuprated/src/rpc/request/blockchain.rs
Normal file
|
@ -0,0 +1,308 @@
|
|||
//! Functions for [`BlockchainReadRequest`].
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
ops::Range,
|
||||
};
|
||||
|
||||
use anyhow::Error;
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, CoinbaseTxSum, ExtendedBlockHeader, MinerData, OutputHistogramEntry,
|
||||
OutputHistogramInput, OutputOnChain,
|
||||
};
|
||||
|
||||
/// [`BlockchainReadRequest::BlockExtendedHeader`].
|
||||
pub(super) async fn block_extended_header(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
height: u64,
|
||||
) -> Result<ExtendedBlockHeader, Error> {
|
||||
let BlockchainResponse::BlockExtendedHeader(header) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockExtendedHeader(u64_to_usize(
|
||||
height,
|
||||
)))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockHash`].
|
||||
pub(super) async fn block_hash(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
height: u64,
|
||||
chain: Chain,
|
||||
) -> Result<[u8; 32], Error> {
|
||||
let BlockchainResponse::BlockHash(hash) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockHash(
|
||||
u64_to_usize(height),
|
||||
chain,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FindBlock`].
|
||||
pub(super) async fn find_block(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
block_hash: [u8; 32],
|
||||
) -> Result<Option<(Chain, usize)>, Error> {
|
||||
let BlockchainResponse::FindBlock(option) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::FindBlock(block_hash))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(option)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FilterUnknownHashes`].
|
||||
pub(super) async fn filter_unknown_hashes(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
block_hashes: HashSet<[u8; 32]>,
|
||||
) -> Result<HashSet<[u8; 32]>, Error> {
|
||||
let BlockchainResponse::FilterUnknownHashes(output) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::FilterUnknownHashes(block_hashes))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockExtendedHeaderInRange`]
|
||||
pub(super) async fn block_extended_header_in_range(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
range: Range<usize>,
|
||||
chain: Chain,
|
||||
) -> Result<Vec<ExtendedBlockHeader>, Error> {
|
||||
let BlockchainResponse::BlockExtendedHeaderInRange(output) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockExtendedHeaderInRange(
|
||||
range, chain,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::ChainHeight`].
|
||||
pub(super) async fn chain_height(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
) -> Result<(u64, [u8; 32]), Error> {
|
||||
let BlockchainResponse::ChainHeight(height, hash) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((usize_to_u64(height), hash))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::GeneratedCoins`].
|
||||
pub(super) async fn generated_coins(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
block_height: u64,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainResponse::GeneratedCoins(generated_coins) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::GeneratedCoins(u64_to_usize(
|
||||
block_height,
|
||||
)))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(generated_coins)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::Outputs`]
|
||||
pub(super) async fn outputs(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
outputs: HashMap<u64, HashSet<u64>>,
|
||||
) -> Result<HashMap<u64, HashMap<u64, OutputOnChain>>, Error> {
|
||||
let BlockchainResponse::Outputs(outputs) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::Outputs(outputs))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::NumberOutputsWithAmount`]
|
||||
pub(super) async fn number_outputs_with_amount(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
output_amounts: Vec<u64>,
|
||||
) -> Result<HashMap<u64, usize>, Error> {
|
||||
let BlockchainResponse::NumberOutputsWithAmount(map) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::NumberOutputsWithAmount(
|
||||
output_amounts,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::KeyImagesSpent`]
|
||||
pub(super) async fn key_images_spent(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
key_images: HashSet<[u8; 32]>,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainResponse::KeyImagesSpent(is_spent) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::KeyImagesSpent(key_images))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(is_spent)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::CompactChainHistory`]
|
||||
pub(super) async fn compact_chain_history(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
) -> Result<(Vec<[u8; 32]>, u128), Error> {
|
||||
let BlockchainResponse::CompactChainHistory {
|
||||
block_ids,
|
||||
cumulative_difficulty,
|
||||
} = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::CompactChainHistory)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((block_ids, cumulative_difficulty))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FindFirstUnknown`]
|
||||
pub(super) async fn find_first_unknown(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
hashes: Vec<[u8; 32]>,
|
||||
) -> Result<Option<(usize, u64)>, Error> {
|
||||
let BlockchainResponse::FindFirstUnknown(resp) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::FindFirstUnknown(hashes))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(resp.map(|(index, height)| (index, usize_to_u64(height))))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::TotalTxCount`]
|
||||
pub(super) async fn total_tx_count(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainResponse::TotalTxCount(tx_count) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::TotalTxCount)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(tx_count))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::DatabaseSize`]
|
||||
pub(super) async fn database_size(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let BlockchainResponse::DatabaseSize {
|
||||
database_size,
|
||||
free_space,
|
||||
} = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::DatabaseSize)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((database_size, free_space))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::OutputHistogram`]
|
||||
pub(super) async fn output_histogram(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
input: OutputHistogramInput,
|
||||
) -> Result<Vec<OutputHistogramEntry>, Error> {
|
||||
let BlockchainResponse::OutputHistogram(histogram) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::OutputHistogram(input))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(histogram)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::CoinbaseTxSum`]
|
||||
pub(super) async fn coinbase_tx_sum(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
height: u64,
|
||||
count: u64,
|
||||
) -> Result<CoinbaseTxSum, Error> {
|
||||
let BlockchainResponse::CoinbaseTxSum(sum) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::CoinbaseTxSum {
|
||||
height: u64_to_usize(height),
|
||||
count,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(sum)
|
||||
}
|
69
binaries/cuprated/src/rpc/request/blockchain_context.rs
Normal file
69
binaries/cuprated/src/rpc/request/blockchain_context.rs
Normal file
|
@ -0,0 +1,69 @@
|
|||
//! Functions for [`BlockChainContextRequest`] and [`BlockChainContextResponse`].
|
||||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::Error;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_consensus_context::{
|
||||
BlockChainContext, BlockChainContextRequest, BlockChainContextResponse,
|
||||
BlockChainContextService,
|
||||
};
|
||||
use cuprate_types::{FeeEstimate, HardFork, HardForkInfo};
|
||||
|
||||
/// [`BlockChainContextRequest::Context`].
|
||||
pub(super) async fn context(
|
||||
service: &mut BlockChainContextService,
|
||||
height: u64,
|
||||
) -> Result<BlockChainContext, Error> {
|
||||
let BlockChainContextResponse::Context(context) = service
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(context)
|
||||
}
|
||||
|
||||
/// [`BlockChainContextRequest::HardForkInfo`].
|
||||
pub(super) async fn hard_fork_info(
|
||||
service: &mut BlockChainContextService,
|
||||
hard_fork: HardFork,
|
||||
) -> Result<HardForkInfo, Error> {
|
||||
let BlockChainContextResponse::HardForkInfo(hf_info) = service
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(BlockChainContextRequest::HardForkInfo(hard_fork))
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(hf_info)
|
||||
}
|
||||
|
||||
/// [`BlockChainContextRequest::FeeEstimate`].
|
||||
pub(super) async fn fee_estimate(
|
||||
service: &mut BlockChainContextService,
|
||||
grace_blocks: u64,
|
||||
) -> Result<FeeEstimate, Error> {
|
||||
let BlockChainContextResponse::FeeEstimate(fee) = service
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(BlockChainContextRequest::FeeEstimate { grace_blocks })
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(fee)
|
||||
}
|
141
binaries/cuprated/src/rpc/request/blockchain_manager.rs
Normal file
141
binaries/cuprated/src/rpc/request/blockchain_manager.rs
Normal file
|
@ -0,0 +1,141 @@
|
|||
//! Functions for [`BlockchainManagerRequest`] & [`BlockchainManagerResponse`].
|
||||
|
||||
use anyhow::Error;
|
||||
use monero_serai::block::Block;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
|
||||
|
||||
use crate::rpc::handler::{
|
||||
BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse,
|
||||
};
|
||||
|
||||
/// [`BlockchainManagerRequest::PopBlocks`]
|
||||
pub(super) async fn pop_blocks(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
amount: u64,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainManagerResponse::PopBlocks { new_height } = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::PopBlocks {
|
||||
amount: u64_to_usize(amount),
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(new_height))
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Prune`]
|
||||
pub(super) async fn prune(blockchain_manager: &mut BlockchainManagerHandle) -> Result<(), Error> {
|
||||
let BlockchainManagerResponse::Ok = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Prune)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Pruned`]
|
||||
pub(super) async fn pruned(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Pruned(pruned) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Pruned)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(pruned)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::RelayBlock`]
|
||||
pub(super) async fn relay_block(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
block: Block,
|
||||
) -> Result<(), Error> {
|
||||
let BlockchainManagerResponse::Ok = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::RelayBlock(block))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Syncing`]
|
||||
pub(super) async fn syncing(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Syncing(syncing) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Syncing)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(syncing)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Synced`]
|
||||
pub(super) async fn synced(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Synced(syncing) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Synced)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(syncing)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Target`]
|
||||
pub(super) async fn target(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<std::time::Duration, Error> {
|
||||
let BlockchainManagerResponse::Target(target) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Target)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(target)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::TargetHeight`]
|
||||
pub(super) async fn target_height(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainManagerResponse::TargetHeight { height } = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::TargetHeight)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(height))
|
||||
}
|
57
binaries/cuprated/src/rpc/request/txpool.rs
Normal file
57
binaries/cuprated/src/rpc/request/txpool.rs
Normal file
|
@ -0,0 +1,57 @@
|
|||
//! Functions for [`TxpoolReadRequest`].
|
||||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::Error;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_txpool::{
|
||||
service::{
|
||||
interface::{TxpoolReadRequest, TxpoolReadResponse},
|
||||
TxpoolReadHandle,
|
||||
},
|
||||
TxEntry,
|
||||
};
|
||||
|
||||
/// [`TxpoolReadRequest::Backlog`]
|
||||
pub(super) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result<Vec<TxEntry>, Error> {
|
||||
let TxpoolReadResponse::Backlog(tx_entries) = txpool_read
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(TxpoolReadRequest::Backlog)
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(tx_entries)
|
||||
}
|
||||
|
||||
/// [`TxpoolReadRequest::Size`]
|
||||
pub(super) async fn size(txpool_read: &mut TxpoolReadHandle) -> Result<u64, Error> {
|
||||
let TxpoolReadResponse::Size(size) = txpool_read
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(TxpoolReadRequest::Size)
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(size))
|
||||
}
|
||||
|
||||
/// TODO
|
||||
#[expect(clippy::needless_pass_by_ref_mut, reason = "TODO: remove after impl")]
|
||||
pub(super) async fn flush(
|
||||
txpool_read: &mut TxpoolReadHandle,
|
||||
tx_hashes: Vec<[u8; 32]>,
|
||||
) -> Result<(), Error> {
|
||||
todo!();
|
||||
Ok(())
|
||||
}
|
12
binaries/cuprated/src/signals.rs
Normal file
12
binaries/cuprated/src/signals.rs
Normal file
|
@ -0,0 +1,12 @@
|
|||
//! Signals for Cuprate state used throughout the binary.
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// Reorg lock.
|
||||
///
|
||||
/// A [`RwLock`] where a write lock is taken during a reorg and a read lock can be taken
|
||||
/// for any operation which must complete without a reorg happening.
|
||||
///
|
||||
/// Currently, the only operation that needs to take a read lock is adding txs to the tx-pool,
|
||||
/// this can potentially be removed in the future, see: <https://github.com/Cuprate/cuprate/issues/305>
|
||||
pub static REORG_LOCK: RwLock<()> = RwLock::const_new(());
|
53
binaries/cuprated/src/statics.rs
Normal file
53
binaries/cuprated/src/statics.rs
Normal file
|
@ -0,0 +1,53 @@
|
|||
//! Global `static`s used throughout `cuprated`.
|
||||
|
||||
use std::{
|
||||
sync::{atomic::AtomicU64, LazyLock},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
/// Define all the `static`s that should be always be initialized early on.
|
||||
///
|
||||
/// This wraps all `static`s inside a `LazyLock` and generates
|
||||
/// a [`init_lazylock_statics`] function that must/should be
|
||||
/// used by `main()` early on.
|
||||
macro_rules! define_init_lazylock_statics {
|
||||
($(
|
||||
$( #[$attr:meta] )*
|
||||
$name:ident: $t:ty = $init_fn:expr;
|
||||
)*) => {
|
||||
/// Initialize global static `LazyLock` data.
|
||||
pub fn init_lazylock_statics() {
|
||||
$(
|
||||
LazyLock::force(&$name);
|
||||
)*
|
||||
}
|
||||
|
||||
$(
|
||||
$(#[$attr])*
|
||||
pub static $name: LazyLock<$t> = LazyLock::new(|| $init_fn);
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
define_init_lazylock_statics! {
|
||||
/// The start time of `cuprated`.
|
||||
START_INSTANT: SystemTime = SystemTime::now();
|
||||
|
||||
/// Start time of `cuprated` as a UNIX timestamp.
|
||||
START_INSTANT_UNIX: u64 = START_INSTANT
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Failed to set `cuprated` startup time.")
|
||||
.as_secs();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
/// Sanity check for startup UNIX time.
|
||||
#[test]
|
||||
fn start_instant_unix() {
|
||||
// Fri Sep 27 01:07:13 AM UTC 2024
|
||||
assert!(*START_INSTANT_UNIX > 1727399233);
|
||||
}
|
||||
}
|
3
binaries/cuprated/src/txpool.rs
Normal file
3
binaries/cuprated/src/txpool.rs
Normal file
|
@ -0,0 +1,3 @@
|
|||
//! Transaction Pool
|
||||
//!
|
||||
//! Will handle initiating the tx-pool, providing the preprocessor required for the dandelion pool.
|
|
@ -1,4 +1,4 @@
|
|||
## Cuprate's architecture (implementation) book
|
||||
## Cuprate's architecture book
|
||||
This book documents Cuprate's architecture and implementation.
|
||||
|
||||
See:
|
||||
|
|
|
@ -1,19 +1,17 @@
|
|||
[book]
|
||||
authors = ["hinto-janai"]
|
||||
authors = ["Cuprate Contributors"]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "Cuprate Architecture"
|
||||
git-repository-url = "https://github.com/Cuprate/architecture-book"
|
||||
|
||||
# TODO: fix after importing real files.
|
||||
#
|
||||
# [preprocessor.last-changed]
|
||||
# command = "mdbook-last-changed"
|
||||
# renderer = ["html"]
|
||||
#
|
||||
# [output.html]
|
||||
# default-theme = "ayu"
|
||||
# preferred-dark-theme = "ayu"
|
||||
# git-repository-url = "https://github.com/hinto-janai/cuprate-architecture"
|
||||
# additional-css = ["last-changed.css"]
|
||||
[preprocessor.last-changed]
|
||||
command = "mdbook-last-changed"
|
||||
renderer = ["html"]
|
||||
|
||||
[output.html]
|
||||
default-theme = "ayu"
|
||||
preferred-dark-theme = "ayu"
|
||||
git-repository-url = "https://github.com/Cuprate/architecture-book"
|
||||
additional-css = ["last-changed.css"]
|
||||
|
|
7
books/architecture/last-changed.css
Normal file
7
books/architecture/last-changed.css
Normal file
|
@ -0,0 +1,7 @@
|
|||
footer {
|
||||
font-size: 0.8em;
|
||||
text-align: center;
|
||||
border-top: 1px solid;
|
||||
margin-top: 4%;
|
||||
padding: 5px 0;
|
||||
}
|
|
@ -1,3 +1,165 @@
|
|||
# Summary
|
||||
|
||||
- [TODO](todo.md)
|
||||
[Cuprate Architecture](cuprate-architecture.md)
|
||||
[🟡 Foreword](foreword.md)
|
||||
|
||||
---
|
||||
|
||||
- [🟠 Intro](intro/intro.md)
|
||||
- [🟡 Who this book is for](intro/who-this-book-is-for.md)
|
||||
- [🔴 Required knowledge](intro/required-knowledge.md)
|
||||
- [🔴 How to use this book](intro/how-to-use-this-book.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Bird's eye view](birds-eye-view/intro.md)
|
||||
- [⚪️ Map](birds-eye-view/map.md)
|
||||
- [⚪️ Components](birds-eye-view/components.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Formats, protocols, types](formats-protocols-types/intro.md)
|
||||
- [⚪️ monero_serai](formats-protocols-types/monero-serai.md)
|
||||
- [⚪️ cuprate_types](formats-protocols-types/cuprate-types.md)
|
||||
- [⚪️ cuprate_helper](formats-protocols-types/cuprate-helper.md)
|
||||
- [⚪️ Epee](formats-protocols-types/epee.md)
|
||||
- [⚪️ Levin](formats-protocols-types/levin.md)
|
||||
|
||||
---
|
||||
|
||||
- [🟢 Storage](storage/intro.md)
|
||||
- [🟢 Database abstraction](storage/db/intro.md)
|
||||
- [🟢 Abstraction](storage/db/abstraction/intro.md)
|
||||
- [🟢 Backend](storage/db/abstraction/backend.md)
|
||||
- [🟢 ConcreteEnv](storage/db/abstraction/concrete_env.md)
|
||||
- [🟢 Trait](storage/db/abstraction/trait.md)
|
||||
- [🟢 Syncing](storage/db/syncing.md)
|
||||
- [🟢 Resizing](storage/db/resizing.md)
|
||||
- [🟢 (De)serialization](storage/db/serde.md)
|
||||
- [🟢 Known issues and tradeoffs](storage/db/issues/intro.md)
|
||||
- [🟢 Abstracting backends](storage/db/issues/traits.md)
|
||||
- [🟢 Hot-swap](storage/db/issues/hot-swap.md)
|
||||
- [🟢 Unaligned bytes](storage/db/issues/unaligned.md)
|
||||
- [🟢 Endianness](storage/db/issues/endian.md)
|
||||
- [🟢 Multimap](storage/db/issues/multimap.md)
|
||||
- [🟢 Common behavior](storage/common/intro.md)
|
||||
- [🟢 Types](storage/common/types.md)
|
||||
- [🟢 `ops`](storage/common/ops.md)
|
||||
- [🟢 `tower::Service`](storage/common/service/intro.md)
|
||||
- [🟢 Initialization](storage/common/service/initialization.md)
|
||||
- [🟢 Requests](storage/common/service/requests.md)
|
||||
- [🟢 Responses](storage/common/service/responses.md)
|
||||
- [🟢 Resizing](storage/common/service/resizing.md)
|
||||
- [🟢 Thread model](storage/common/service/thread-model.md)
|
||||
- [🟢 Shutdown](storage/common/service/shutdown.md)
|
||||
- [🟢 Blockchain](storage/blockchain/intro.md)
|
||||
- [🟢 Schema](storage/blockchain/schema/intro.md)
|
||||
- [🟢 Tables](storage/blockchain/schema/tables.md)
|
||||
- [🟢 Multimap tables](storage/blockchain/schema/multimap.md)
|
||||
- [⚪️ Transaction pool](storage/txpool/intro.md)
|
||||
- [⚪️ Pruning](storage/pruning/intro.md)
|
||||
|
||||
---
|
||||
|
||||
- [🟢 RPC](rpc/intro.md)
|
||||
- [🟡 JSON-RPC 2.0](rpc/json-rpc.md)
|
||||
- [🟢 The types](rpc/types/intro.md)
|
||||
- [🟢 Misc types](rpc/types/misc-types.md)
|
||||
- [🟢 Base RPC types](rpc/types/base-types.md)
|
||||
- [🟢 The type generator macro](rpc/types/macro.md)
|
||||
- [🟢 Metadata](rpc/types/metadata.md)
|
||||
- [🟡 (De)serialization](rpc/types/deserialization.md)
|
||||
- [🟢 The interface](rpc/interface.md)
|
||||
- [🔴 The handler](rpc/handler/intro.md)
|
||||
- [🔴 The server](rpc/server/intro.md)
|
||||
- [🟢 Differences with `monerod`](rpc/differences/intro.md)
|
||||
- [🟢 JSON field ordering](rpc/differences/json-field-ordering.md)
|
||||
- [🟢 JSON formatting](rpc/differences/json-formatting.md)
|
||||
- [🟢 JSON strictness](rpc/differences/json-strictness.md)
|
||||
- [🟡 JSON-RPC strictness](rpc/differences/json-rpc-strictness.md)
|
||||
- [🟡 HTTP methods](rpc/differences/http-methods.md)
|
||||
- [🟡 RPC payment](rpc/differences/rpc-payment.md)
|
||||
- [🟢 Custom strings](rpc/differences/custom-strings.md)
|
||||
- [🔴 Unsupported RPC calls](rpc/differences/unsupported-rpc-calls.md)
|
||||
- [🔴 RPC calls with different behavior](rpc/differences/rpc-calls-with-different-behavior.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ ZMQ](zmq/intro.md)
|
||||
- [⚪️ TODO](zmq/todo.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Consensus](consensus/intro.md)
|
||||
- [⚪️ Verifier](consensus/verifier.md)
|
||||
- [⚪️ TODO](consensus/todo.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Networking](networking/intro.md)
|
||||
- [⚪️ P2P](networking/p2p.md)
|
||||
- [⚪️ Dandelion++](networking/dandelion.md)
|
||||
- [⚪️ Proxy](networking/proxy.md)
|
||||
- [⚪️ Tor](networking/tor.md)
|
||||
- [⚪️ i2p](networking/i2p.md)
|
||||
- [⚪️ IPv4/IPv6](networking/ipv4-ipv6.md)
|
||||
|
||||
---
|
||||
|
||||
- [🔴 Instrumentation](instrumentation/intro.md)
|
||||
- [⚪️ Logging](instrumentation/logging.md)
|
||||
- [⚪️ Data collection](instrumentation/data-collection.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Binary](binary/intro.md)
|
||||
- [⚪️ CLI](binary/cli.md)
|
||||
- [⚪️ Config](binary/config.md)
|
||||
- [⚪️ Logging](binary/logging.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Resources](resources/intro.md)
|
||||
- [⚪️ File system](resources/fs/intro.md)
|
||||
- [🟡 Index of PATHs](resources/fs/paths.md)
|
||||
- [⚪️ Sockets](resources/sockets/index.md)
|
||||
- [🔴 Index of ports](resources/sockets/ports.md)
|
||||
- [⚪️ Memory](resources/memory.md)
|
||||
- [🟡 Concurrency and parallelism](resources/cap/intro.md)
|
||||
- [⚪️ Map](resources/cap/map.md)
|
||||
- [⚪️ The RPC server](resources/cap/the-rpc-server.md)
|
||||
- [⚪️ The database](resources/cap/the-database.md)
|
||||
- [⚪️ The block downloader](resources/cap/the-block-downloader.md)
|
||||
- [⚪️ The verifier](resources/cap/the-verifier.md)
|
||||
- [⚪️ Thread exit](resources/cap/thread-exit.md)
|
||||
- [🔴 Index of threads](resources/cap/threads.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ External Monero libraries](external-monero-libraries/intro.md)
|
||||
- [⚪️ Cryptonight](external-monero-libraries/cryptonight.md)
|
||||
- [🔴 RandomX](external-monero-libraries/randomx.md)
|
||||
- [🔴 monero_serai](external-monero-libraries/monero_serai.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Benchmarking](benchmarking/intro.md)
|
||||
- [⚪️ Criterion](benchmarking/criterion.md)
|
||||
- [⚪️ Harness](benchmarking/harness.md)
|
||||
- [⚪️ Testing](testing/intro.md)
|
||||
- [⚪️ Monero data](testing/monero-data.md)
|
||||
- [⚪️ RPC client](testing/rpc-client.md)
|
||||
- [⚪️ Spawning `monerod`](testing/spawning-monerod.md)
|
||||
- [⚪️ Known issues and tradeoffs](known-issues-and-tradeoffs/intro.md)
|
||||
- [⚪️ Networking](known-issues-and-tradeoffs/networking.md)
|
||||
- [⚪️ RPC](known-issues-and-tradeoffs/rpc.md)
|
||||
- [⚪️ Storage](known-issues-and-tradeoffs/storage.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Appendix](appendix/intro.md)
|
||||
- [🟢 Crates](appendix/crates.md)
|
||||
- [🔴 Contributing](appendix/contributing.md)
|
||||
- [🔴 Build targets](appendix/build-targets.md)
|
||||
- [🔴 Protocol book](appendix/protocol-book.md)
|
||||
- [⚪️ User book](appendix/user-book.md)
|
7
books/architecture/src/appendix/build-targets.md
Normal file
7
books/architecture/src/appendix/build-targets.md
Normal file
|
@ -0,0 +1,7 @@
|
|||
# Build targets
|
||||
- x86
|
||||
- ARM64
|
||||
- Windows
|
||||
- Linux
|
||||
- macOS
|
||||
- FreeBSD(?)
|
2
books/architecture/src/appendix/contributing.md
Normal file
2
books/architecture/src/appendix/contributing.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Contributing
|
||||
<https://github.com/Cuprate/cuprate/blob/main/CONTRIBUTING.md>
|
64
books/architecture/src/appendix/crates.md
Normal file
64
books/architecture/src/appendix/crates.md
Normal file
|
@ -0,0 +1,64 @@
|
|||
# Crates
|
||||
This is an index of all of Cuprate's in-house crates it uses and maintains.
|
||||
|
||||
They are categorized into groups.
|
||||
|
||||
Crate documentation for each crate can be found by clicking the crate name or by visiting <https://doc.cuprate.org>. Documentation can also be built manually by running this at the root of the `cuprate` repository:
|
||||
```bash
|
||||
cargo doc --package $CRATE
|
||||
```
|
||||
For example, this will generate and open `cuprate-blockchain` documentation:
|
||||
```bash
|
||||
cargo doc --open --package cuprate-blockchain
|
||||
```
|
||||
|
||||
## Consensus
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-consensus`](https://doc.cuprate.org/cuprate_consensus) | [`consensus/`](https://github.com/Cuprate/cuprate/tree/main/consensus) | TODO
|
||||
| [`cuprate-consensus-context`](https://doc.cuprate.org/cuprate_consensus_context) | [`consensus/context/`](https://github.com/Cuprate/cuprate/tree/main/consensus/context) | TODO
|
||||
| [`cuprate-consensus-rules`](https://doc.cuprate.org/cuprate_consensus_rules) | [`consensus/rules/`](https://github.com/Cuprate/cuprate/tree/main/consensus/rules) | TODO
|
||||
| [`cuprate-fast-sync`](https://doc.cuprate.org/cuprate_fast_sync) | [`consensus/fast-sync/`](https://github.com/Cuprate/cuprate/tree/main/consensus/fast-sync) | Fast block synchronization
|
||||
|
||||
## Networking
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-epee-encoding`](https://doc.cuprate.org/cuprate_epee_encoding) | [`net/epee-encoding/`](https://github.com/Cuprate/cuprate/tree/main/net/epee-encoding) | Epee (de)serialization
|
||||
| [`cuprate-fixed-bytes`](https://doc.cuprate.org/cuprate_fixed_bytes) | [`net/fixed-bytes/`](https://github.com/Cuprate/cuprate/tree/main/net/fixed-bytes) | Fixed byte containers backed by `byte::Byte`
|
||||
| [`cuprate-levin`](https://doc.cuprate.org/cuprate_levin) | [`net/levin/`](https://github.com/Cuprate/cuprate/tree/main/net/levin) | Levin bucket protocol implementation
|
||||
| [`cuprate-wire`](https://doc.cuprate.org/cuprate_wire) | [`net/wire/`](https://github.com/Cuprate/cuprate/tree/main/net/wire) | TODO
|
||||
|
||||
## P2P
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-address-book`](https://doc.cuprate.org/cuprate_address_book) | [`p2p/address-book/`](https://github.com/Cuprate/cuprate/tree/main/p2p/address-book) | TODO
|
||||
| [`cuprate-async-buffer`](https://doc.cuprate.org/cuprate_async_buffer) | [`p2p/async-buffer/`](https://github.com/Cuprate/cuprate/tree/main/p2p/async-buffer) | A bounded SPSC, FIFO, asynchronous buffer that supports arbitrary weights for values
|
||||
| [`cuprate-dandelion-tower`](https://doc.cuprate.org/cuprate_dandelion_tower) | [`p2p/dandelion-tower/`](https://github.com/Cuprate/cuprate/tree/main/p2p/dandelion-tower) | TODO
|
||||
| [`cuprate-p2p`](https://doc.cuprate.org/cuprate_p2p) | [`p2p/p2p/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p) | TODO
|
||||
| [`cuprate-p2p-core`](https://doc.cuprate.org/cuprate_p2p_core) | [`p2p/p2p-core/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p-core) | TODO
|
||||
|
||||
## Storage
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-blockchain`](https://doc.cuprate.org/cuprate_blockchain) | [`storage/blockchain/`](https://github.com/Cuprate/cuprate/tree/main/storage/blockchain) | Blockchain database built on-top of `cuprate-database` & `cuprate-database-service`
|
||||
| [`cuprate-database`](https://doc.cuprate.org/cuprate_database) | [`storage/database/`](https://github.com/Cuprate/cuprate/tree/main/storage/database) | Pure database abstraction
|
||||
| [`cuprate-database-service`](https://doc.cuprate.org/cuprate_database_service) | [`storage/database-service/`](https://github.com/Cuprate/cuprate/tree/main/storage/database-service) | `tower::Service` + thread-pool abstraction built on-top of `cuprate-database`
|
||||
| [`cuprate-txpool`](https://doc.cuprate.org/cuprate_txpool) | [`storage/txpool/`](https://github.com/Cuprate/cuprate/tree/main/storage/txpool) | Transaction pool database built on-top of `cuprate-database` & `cuprate-database-service`
|
||||
|
||||
## RPC
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-json-rpc`](https://doc.cuprate.org/cuprate_json_rpc) | [`rpc/json-rpc/`](https://github.com/Cuprate/cuprate/tree/main/rpc/json-rpc) | JSON-RPC 2.0 implementation
|
||||
| [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types) | [`rpc/types/`](https://github.com/Cuprate/cuprate/tree/main/rpc/types) | Monero RPC types and traits
|
||||
| [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing
|
||||
| [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers
|
||||
|
||||
## 1-off crates
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-constants`](https://doc.cuprate.org/cuprate_constants) | [`constants/`](https://github.com/Cuprate/cuprate/tree/main/constants) | Shared `const/static` data across Cuprate
|
||||
| [`cuprate-cryptonight`](https://doc.cuprate.org/cuprate_cryptonight) | [`cryptonight/`](https://github.com/Cuprate/cuprate/tree/main/cryptonight) | CryptoNight hash functions
|
||||
| [`cuprate-pruning`](https://doc.cuprate.org/cuprate_pruning) | [`pruning/`](https://github.com/Cuprate/cuprate/tree/main/pruning) | Monero pruning logic/types
|
||||
| [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate
|
||||
| [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate
|
||||
| [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate
|
1
books/architecture/src/appendix/intro.md
Normal file
1
books/architecture/src/appendix/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# Appendix
|
2
books/architecture/src/appendix/protocol-book.md
Normal file
2
books/architecture/src/appendix/protocol-book.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Protocol book
|
||||
<https://monero-book.cuprate.org>
|
1
books/architecture/src/appendix/user-book.md
Normal file
1
books/architecture/src/appendix/user-book.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ User book
|
1
books/architecture/src/benchmarking/criterion.md
Normal file
1
books/architecture/src/benchmarking/criterion.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Criterion
|
1
books/architecture/src/benchmarking/harness.md
Normal file
1
books/architecture/src/benchmarking/harness.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Harness
|
1
books/architecture/src/benchmarking/intro.md
Normal file
1
books/architecture/src/benchmarking/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Benchmarking
|
1
books/architecture/src/binary/cli.md
Normal file
1
books/architecture/src/binary/cli.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ CLI
|
1
books/architecture/src/binary/config.md
Normal file
1
books/architecture/src/binary/config.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Config
|
1
books/architecture/src/binary/intro.md
Normal file
1
books/architecture/src/binary/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Binary
|
1
books/architecture/src/binary/logging.md
Normal file
1
books/architecture/src/binary/logging.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Logging
|
1
books/architecture/src/birds-eye-view/components.md
Normal file
1
books/architecture/src/birds-eye-view/components.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Components
|
1
books/architecture/src/birds-eye-view/intro.md
Normal file
1
books/architecture/src/birds-eye-view/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Bird's eye view
|
1
books/architecture/src/birds-eye-view/map.md
Normal file
1
books/architecture/src/birds-eye-view/map.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Map
|
1
books/architecture/src/consensus/intro.md
Normal file
1
books/architecture/src/consensus/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Consensus
|
1
books/architecture/src/consensus/todo.md
Normal file
1
books/architecture/src/consensus/todo.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ TODO
|
1
books/architecture/src/consensus/verifier.md
Normal file
1
books/architecture/src/consensus/verifier.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Verifier
|
22
books/architecture/src/cuprate-architecture.md
Normal file
22
books/architecture/src/cuprate-architecture.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Cuprate Architecture
|
||||
WIP
|
||||
|
||||
[Cuprate](https://github.com/Cuprate/cuprate)'s architecture book.
|
||||
|
||||
Sections are notated with colors indicating how complete they are:
|
||||
|
||||
| Color | Meaning |
|
||||
|-------|---------|
|
||||
| ⚪️ | Empty
|
||||
| 🔴 | Severely lacking information
|
||||
| 🟠 | Lacking some information
|
||||
| 🟡 | Almost ready
|
||||
| 🟢 | OK
|
||||
|
||||
---
|
||||
|
||||
Continue to the next chapter by clicking the right `>` button, or by selecting it on the left side.
|
||||
|
||||
All chapters are viewable by clicking the top-left `☰` button.
|
||||
|
||||
The entire book can searched by clicking the top-left 🔍 button.
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ Cryptonight
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ External Monero libraries
|
|
@ -0,0 +1,2 @@
|
|||
# monero_serai
|
||||
<https://github.com/serai-dex/serai/tree/develop/coins/monero>
|
|
@ -0,0 +1,2 @@
|
|||
# RandomX
|
||||
<https://github.com/tari-project/randomx-rs>
|
36
books/architecture/src/foreword.md
Normal file
36
books/architecture/src/foreword.md
Normal file
|
@ -0,0 +1,36 @@
|
|||
# Foreword
|
||||
Monero[^1] is a large software project, coming in at 329k lines of C++, C, headers, and make files.[^2] It is directly responsible for 2.6 billion dollars worth of value.[^3] It has had over 400 contributors, more if counting unnamed contributions.[^4] It has over 10,000 node operators and a large active userbase.[^5]
|
||||
|
||||
The project wasn't always this big, but somewhere in the midst of contributors coming and going, various features being added, bugs being fixed, and celebrated cryptography being implemented - there was an aspect that was lost by the project that it could not easily gain again: **maintainability**.
|
||||
|
||||
Within large and complicated software projects, there is an important transfer of knowledge that must occur for long-term survival. Much like an organism that must eventually pass the torch onto the next generation, projects must do the same for future contributors.
|
||||
|
||||
However, newcomers often lack experience, past contributors might not be around, and current maintainers may be too busy. For whatever reason, this transfer of knowledge is not always smooth.
|
||||
|
||||
There is a solution to this problem: **documentation**.
|
||||
|
||||
The activity of writing the what, where, why, and how of the solutions to technical problems can be done in an author's lonesome.
|
||||
|
||||
The activity of reading these ideas can be done by future readers at any time without permission.
|
||||
|
||||
These readers may be new prospective contributors, it may be the current maintainers, it may be researchers, it may be users of various scale. Whoever it may be, documentation acts as the link between the past and present; a bottle of wisdom thrown into the river of time for future participants to open.
|
||||
|
||||
This book is the manifestation of this will, for Cuprate[^6], an alternative Monero node. It documents Cuprate's implementation from head-to-toe such that in the case of a contributor's untimely disappearance, the project can continue.
|
||||
|
||||
People come and go, documentation is forever.
|
||||
|
||||
— hinto-janai
|
||||
|
||||
---
|
||||
|
||||
[^1]: [`monero-project/monero`](https://github.com/monero-project/monero)
|
||||
|
||||
[^2]: `git ls-files | grep "\.cpp$\|\.h$\|\.c$\|CMake" | xargs cat | wc -l` on [`cc73fe7`](https://github.com/monero-project/monero/tree/cc73fe71162d564ffda8e549b79a350bca53c454)
|
||||
|
||||
[^3]: 2024-05-24: $143.55 USD * 18,151,608 XMR = $2,605,663,258
|
||||
|
||||
[^4]: `git log --all --pretty="%an" | sort -u | wc -l` on [`cc73fe7`](https://github.com/monero-project/monero/tree/cc73fe71162d564ffda8e549b79a350bca53c454)
|
||||
|
||||
[^5]: <https://monero.fail/map>
|
||||
|
||||
[^6]: <https://github.com/Cuprate/cuprate>
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ cuprate_helper
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ cuprate_types
|
1
books/architecture/src/formats-protocols-types/epee.md
Normal file
1
books/architecture/src/formats-protocols-types/epee.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Epee
|
1
books/architecture/src/formats-protocols-types/intro.md
Normal file
1
books/architecture/src/formats-protocols-types/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Formats, protocols, types
|
1
books/architecture/src/formats-protocols-types/levin.md
Normal file
1
books/architecture/src/formats-protocols-types/levin.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Levin
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ monero_serai
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ Data collection
|
2
books/architecture/src/instrumentation/intro.md
Normal file
2
books/architecture/src/instrumentation/intro.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Instrumentation
|
||||
Cuprate is built with [instrumentation](https://en.wikipedia.org/wiki/Instrumentation) in mind.
|
1
books/architecture/src/instrumentation/logging.md
Normal file
1
books/architecture/src/instrumentation/logging.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Logging
|
5
books/architecture/src/intro/how-to-use-this-book.md
Normal file
5
books/architecture/src/intro/how-to-use-this-book.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
# How to use this book
|
||||
|
||||
## Maintainers
|
||||
## Contributors
|
||||
## Researchers
|
15
books/architecture/src/intro/intro.md
Normal file
15
books/architecture/src/intro/intro.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Intro
|
||||
[Cuprate](https://github.com/Cuprate/cuprate) is an alternative [Monero](https://getmonero.org) node implementation.
|
||||
|
||||
This book describes Cuprate's architecture, ranging from small things like database pruning to larger meta-components like the networking stack.
|
||||
|
||||
A brief overview of some aspects covered within this book:
|
||||
- Component designs
|
||||
- Implementation details
|
||||
- File location and purpose
|
||||
- Design decisions and tradeoffs
|
||||
- Things in relation to `monerod`
|
||||
- Dependency usage
|
||||
|
||||
## Source code
|
||||
The source files for this book can be found on at: <https://github.com/Cuprate/architecture-book>.
|
28
books/architecture/src/intro/required-knowledge.md
Normal file
28
books/architecture/src/intro/required-knowledge.md
Normal file
|
@ -0,0 +1,28 @@
|
|||
# Required knowledge
|
||||
|
||||
## General
|
||||
- Rust
|
||||
- Monero
|
||||
- System design
|
||||
|
||||
## Components
|
||||
### Storage
|
||||
- Embedded databases
|
||||
- LMDB
|
||||
- redb
|
||||
|
||||
### RPC
|
||||
- `axum`
|
||||
- `tower`
|
||||
- `async`
|
||||
- JSON-RPC 2.0
|
||||
- Epee
|
||||
|
||||
### Networking
|
||||
- `tower`
|
||||
- `tokio`
|
||||
- `async`
|
||||
- Levin
|
||||
|
||||
### Instrumentation
|
||||
- `tracing`
|
31
books/architecture/src/intro/who-this-book-is-for.md
Normal file
31
books/architecture/src/intro/who-this-book-is-for.md
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Who this book is for
|
||||
|
||||
## Maintainers
|
||||
As mentioned in [`Foreword`](../foreword.md), the group of people that benefit from this book's value the most by far are the current and future Cuprate maintainers.
|
||||
|
||||
Cuprate's system design is documented in this book such that if you were ever to build it again from scratch, you would have an excellent guide on how to do such, and also where improvements could be made.
|
||||
|
||||
Practically, what that means for maintainers is that it acts as _the_ reference. During maintenance, it is quite valuable to have a book that contains condensed knowledge on the behavior of components, or how certain code works, or why it was built a certain way.
|
||||
|
||||
## Contributors
|
||||
Contributors also have access to the inner-workings of Cuprate via this book, which helps when making larger contributions.
|
||||
|
||||
Design decisions and implementation details notated in this book helps answer questions such as:
|
||||
- Why is it done this way?
|
||||
- Why can it _not_ be done this way?
|
||||
- Were other methods attempted?
|
||||
|
||||
Cuprate's testing and benchmarking suites, unknown to new contributors, are also documented within this book.
|
||||
|
||||
## Researchers
|
||||
This book contains the why, where, and how of the _implementation_ of formal research.
|
||||
|
||||
Although it is an informal specification, this book still acts as a more accessible overview of Cuprate compared to examining the codebase itself.
|
||||
|
||||
## Operators & users
|
||||
This book is not a practical guide for using Cuprate itself.
|
||||
|
||||
For configuration, data collection (also important for researchers), and other practical usage, see [Cuprate's user book](https://user.cuprate.org).
|
||||
|
||||
## Observers
|
||||
Anyone curious enough is free to learn the inner-workings of Cuprate via this book, and maybe even contribute someday.
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ Known issues and tradeoffs
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ Networking
|
1
books/architecture/src/known-issues-and-tradeoffs/rpc.md
Normal file
1
books/architecture/src/known-issues-and-tradeoffs/rpc.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ RPC
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ Storage
|
1
books/architecture/src/networking/dandelion.md
Normal file
1
books/architecture/src/networking/dandelion.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Dandelion++
|
1
books/architecture/src/networking/i2p.md
Normal file
1
books/architecture/src/networking/i2p.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ i2p
|
1
books/architecture/src/networking/intro.md
Normal file
1
books/architecture/src/networking/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Networking
|
1
books/architecture/src/networking/ipv4-ipv6.md
Normal file
1
books/architecture/src/networking/ipv4-ipv6.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ IPv4/IPv6
|
1
books/architecture/src/networking/p2p.md
Normal file
1
books/architecture/src/networking/p2p.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ P2P
|
1
books/architecture/src/networking/proxy.md
Normal file
1
books/architecture/src/networking/proxy.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Proxy
|
1
books/architecture/src/networking/tor.md
Normal file
1
books/architecture/src/networking/tor.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Tor
|
32
books/architecture/src/resources/cap/intro.md
Normal file
32
books/architecture/src/resources/cap/intro.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
# Concurrency and parallelism
|
||||
It is incumbent upon software like Cuprate to take advantage of today's highly parallel hardware as much as practically possible.
|
||||
|
||||
With that said, programs must setup guardrails when operating in a concurrent and parallel manner, [for correctness and safety](https://en.wikipedia.org/wiki/Concurrency_(computer_science)).
|
||||
|
||||
There are "synchronization primitives" that help with this, common ones being:
|
||||
- [Locks](https://en.wikipedia.org/wiki/Lock_(computer_science))
|
||||
- [Channels](https://en.wikipedia.org/wiki/Channel_(programming))
|
||||
- [Atomics](https://en.wikipedia.org/wiki/Linearizability#Primitive_atomic_instructions)
|
||||
|
||||
These tools are relatively easy to use in isolation, but trickier to do so when considering the entire system. It is not uncommon for _the_ bottleneck to be the [poor orchastration](https://en.wikipedia.org/wiki/Starvation_(computer_science)) of these primitives.
|
||||
|
||||
## Analogy
|
||||
A common analogy for a parallel system is an intersection.
|
||||
|
||||
Like a parallel computer system, an intersection contains:
|
||||
1. **Parallelism:** multiple individual units that want to move around (cars, pedestrians, etc)
|
||||
1. **Synchronization primitives:** traffic lights, car lights, walk signals
|
||||
|
||||
In theory, the amount of "work" the units can do is only limited by the speed of the units themselves, but in practice, the slow cascading reaction speeds between all units, the frequent hiccups that can occur, and the synchronization primitives themselves become bottlenecks far before the maximum speed of any unit is reached.
|
||||
|
||||
A car that hogs the middle of the intersection on the wrong light is akin to a system thread holding onto a lock longer than it should be - it degrades total system output.
|
||||
|
||||
Unlike humans however, computer systems at least have the potential to move at lightning speeds, but only if the above synchronization primitives are used correctly.
|
||||
|
||||
## Goal
|
||||
To aid the long-term maintenance of highly concurrent and parallel code, this section documents:
|
||||
1. All system threads spawned and maintained
|
||||
1. All major sections where synchronization primitives are used
|
||||
1. The asynchronous behavior of some components
|
||||
|
||||
and how these compose together efficiently in Cuprate.
|
1
books/architecture/src/resources/cap/map.md
Normal file
1
books/architecture/src/resources/cap/map.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Map
|
|
@ -0,0 +1 @@
|
|||
# ⚪️ The block downloader
|
1
books/architecture/src/resources/cap/the-database.md
Normal file
1
books/architecture/src/resources/cap/the-database.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ The database
|
1
books/architecture/src/resources/cap/the-rpc-server.md
Normal file
1
books/architecture/src/resources/cap/the-rpc-server.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ The RPC server
|
1
books/architecture/src/resources/cap/the-verifier.md
Normal file
1
books/architecture/src/resources/cap/the-verifier.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ The verifier
|
1
books/architecture/src/resources/cap/thread-exit.md
Normal file
1
books/architecture/src/resources/cap/thread-exit.md
Normal file
|
@ -0,0 +1 @@
|
|||
# ⚪️ Thread exit
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue