mirror of
https://github.com/Cuprate/cuprate.git
synced 2025-03-12 09:29:11 +00:00
Merge branch 'block-downloader' into cuprated
This commit is contained in:
commit
d1288b141a
121 changed files with 7885 additions and 3472 deletions
4
.github/labeler.yml
vendored
4
.github/labeler.yml
vendored
|
@ -43,6 +43,10 @@ A-book-protocol:
|
|||
- changed-files:
|
||||
- any-glob-to-any-file: books/protocol/**
|
||||
|
||||
A-book-user:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: books/user/**
|
||||
|
||||
# Crate (sub-)directories.
|
||||
A-binaries:
|
||||
- changed-files:
|
||||
|
|
2
.github/workflows/audit.yml
vendored
2
.github/workflows/audit.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Cache
|
||||
uses: actions/cache@v3.2.3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo
|
||||
|
|
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -83,7 +83,7 @@ jobs:
|
|||
components: clippy
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: target
|
||||
key: ${{ matrix.os }}
|
||||
|
|
2
.github/workflows/deny.yml
vendored
2
.github/workflows/deny.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Cache
|
||||
uses: actions/cache@v3.2.3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,3 +1,4 @@
|
|||
target/
|
||||
.vscode
|
||||
monerod
|
||||
books/*/book
|
||||
|
|
|
@ -64,3 +64,8 @@ This section is primarily targeted at maintainers. Most contributors aren't able
|
|||
[I-]: https://github.com/Cuprate/cuprate/labels?q=I
|
||||
[O-]: https://github.com/Cuprate/cuprate/labels?q=O
|
||||
[P-]: https://github.com/Cuprate/cuprate/labels?q=P
|
||||
|
||||
## Books
|
||||
Cuprate has various documentation books whose source files live in [`books/`](https://github.com/Cuprate/cuprate/tree/main/books).
|
||||
|
||||
Please contribute if you found a mistake! The files are mostly [markdown](https://wikipedia.org/wiki/Markdown) files and can be easily edited. See the `books/` directory for more information.
|
||||
|
|
881
Cargo.lock
generated
881
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
12
Cargo.toml
12
Cargo.toml
|
@ -4,6 +4,7 @@ resolver = "2"
|
|||
members = [
|
||||
"binaries/cuprated",
|
||||
"consensus",
|
||||
"consensus/fast-sync",
|
||||
"consensus/rules",
|
||||
"cryptonight",
|
||||
"helper",
|
||||
|
@ -14,6 +15,7 @@ members = [
|
|||
"p2p/cuprate-p2p",
|
||||
"p2p/dandelion",
|
||||
"p2p/monero-p2p",
|
||||
"p2p/async-buffer",
|
||||
"p2p/address-book",
|
||||
"storage/cuprate-blockchain",
|
||||
"storage/cuprate-txpool",
|
||||
|
@ -21,6 +23,9 @@ members = [
|
|||
"pruning",
|
||||
"test-utils",
|
||||
"types",
|
||||
"rpc/json-rpc",
|
||||
"rpc/monero-rpc-types",
|
||||
"rpc/cuprate-rpc-interface",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
|
@ -53,15 +58,15 @@ chrono = { version = "0.4.31", default-features = false }
|
|||
crypto-bigint = { version = "0.5.5", default-features = false }
|
||||
crossbeam = { version = "0.8.4", default-features = false }
|
||||
curve25519-dalek = { version = "4.1.1", default-features = false }
|
||||
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
|
||||
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
dashmap = { version = "5.5.3", default-features = false }
|
||||
dirs = { version = "5.0.1", default-features = false }
|
||||
futures = { version = "0.3.29", default-features = false }
|
||||
hex = { version = "0.4.3", default-features = false }
|
||||
hex-literal = { version = "0.4", default-features = false }
|
||||
indexmap = { version = "2.2.5", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
|
||||
multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
paste = { version = "1.0.14", default-features = false }
|
||||
pin-project = { version = "1.1.3", default-features = false }
|
||||
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
|
||||
|
@ -86,7 +91,6 @@ pretty_assertions = { version = "1.4.0" }
|
|||
proptest = { version = "1" }
|
||||
proptest-derive = { version = "0.4.0" }
|
||||
|
||||
|
||||
## TODO:
|
||||
## Potential dependencies.
|
||||
# arc-swap = { version = "1.6.0" } # Atomically swappable Arc<T> | https://github.com/vorner/arc-swap
|
||||
|
|
|
@ -3,3 +3,6 @@ depending on the crate in question. Each crate declares their license in their
|
|||
`Cargo.toml`. Additionally, a full copy of both licenses are included in the
|
||||
root of this repository for reference. These copies should be provided with
|
||||
any distribution of a crate, as per the respective license's terms.
|
||||
|
||||
All documentation, including the books in the `books/` directory, is licensed
|
||||
under the MIT license.
|
|
@ -1 +1,29 @@
|
|||
# TODO
|
||||
## Books
|
||||
This directory contains the source files for Cuprate's various books.
|
||||
|
||||
The source files are edited here, and published in other repositories, see:
|
||||
- [Cuprate's architecture book](https://github.com/Cuprate/architecture-book)
|
||||
- [Cuprate's protocol book](https://github.com/Cuprate/monero-book)
|
||||
- [Cuprate's user book](https://github.com/Cuprate/user-book)
|
||||
|
||||
## Build tools
|
||||
Building the book(s) requires [Rust's cargo tool](https://doc.rust-lang.org/cargo/getting-started/installation.html) and [mdBook](https://github.com/rust-lang/mdBook).
|
||||
|
||||
After installing `cargo`, install `mdbook` with:
|
||||
```bash
|
||||
cargo install mdbook
|
||||
```
|
||||
|
||||
## Building
|
||||
To build a book, go into a book's directory and build:
|
||||
|
||||
```bash
|
||||
# This build Cuprate's user book.
|
||||
cd user/
|
||||
mdbook build
|
||||
```
|
||||
|
||||
The output will be in the `book` subdirectory (`user/book` for the above example). To open the book, you can open it in your web browser like so:
|
||||
```bash
|
||||
mdbook build --open
|
||||
```
|
||||
|
|
|
@ -1 +1,6 @@
|
|||
# TODO
|
||||
## Cuprate's architecture (implementation) book
|
||||
This book documents Cuprate's architecture and implementation.
|
||||
|
||||
See:
|
||||
- <https://architecture.cuprate.org>
|
||||
- <https://github.com/Cuprate/architecture-book>
|
||||
|
|
19
books/architecture/book.toml
Normal file
19
books/architecture/book.toml
Normal file
|
@ -0,0 +1,19 @@
|
|||
[book]
|
||||
authors = ["hinto-janai"]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "Cuprate Architecture"
|
||||
git-repository-url = "https://github.com/Cuprate/architecture-book"
|
||||
|
||||
# TODO: fix after importing real files.
|
||||
#
|
||||
# [preprocessor.last-changed]
|
||||
# command = "mdbook-last-changed"
|
||||
# renderer = ["html"]
|
||||
#
|
||||
# [output.html]
|
||||
# default-theme = "ayu"
|
||||
# preferred-dark-theme = "ayu"
|
||||
# git-repository-url = "https://github.com/hinto-janai/cuprate-architecture"
|
||||
# additional-css = ["last-changed.css"]
|
3
books/architecture/src/SUMMARY.md
Normal file
3
books/architecture/src/SUMMARY.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Summary
|
||||
|
||||
- [TODO](todo.md)
|
1
books/architecture/src/todo.md
Normal file
1
books/architecture/src/todo.md
Normal file
|
@ -0,0 +1 @@
|
|||
# TODO
|
|
@ -1 +1,6 @@
|
|||
# TODO
|
||||
## Cuprate's protocol book
|
||||
This book documents the Monero protocol.
|
||||
|
||||
See:
|
||||
- <https://monero-book.cuprate.org>
|
||||
- <https://github.com/Cuprate/monero-book>
|
||||
|
|
6
books/user/README.md
Normal file
6
books/user/README.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
## Cuprate's user book
|
||||
This book is the end-user documentation for Cuprate, aka, "how to use `cuprated`".
|
||||
|
||||
See:
|
||||
- <https://user.cuprate.org>
|
||||
- <https://github.com/Cuprate/user-book>
|
19
books/user/book.toml
Normal file
19
books/user/book.toml
Normal file
|
@ -0,0 +1,19 @@
|
|||
[book]
|
||||
authors = ["hinto-janai"]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "Cuprate's user book"
|
||||
git-repository-url = "https://github.com/Cuprate/user-book"
|
||||
|
||||
# TODO: fix after importing real files.
|
||||
#
|
||||
# [preprocessor.last-changed]
|
||||
# command = "mdbook-last-changed"
|
||||
# renderer = ["html"]
|
||||
#
|
||||
# [output.html]
|
||||
# default-theme = "ayu"
|
||||
# preferred-dark-theme = "ayu"
|
||||
# git-repository-url = "https://github.com/hinto-janai/cuprate-user"
|
||||
# additional-css = ["last-changed.css"]
|
3
books/user/src/SUMMARY.md
Normal file
3
books/user/src/SUMMARY.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Summary
|
||||
|
||||
- [TODO](todo.md)
|
1
books/user/src/todo.md
Normal file
1
books/user/src/todo.md
Normal file
|
@ -0,0 +1 @@
|
|||
# TODO
|
|
@ -7,34 +7,15 @@ license = "MIT"
|
|||
authors = ["Boog900"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
binaries = [
|
||||
"tokio/rt-multi-thread",
|
||||
"tokio/macros",
|
||||
"tower/retry",
|
||||
"tower/balance",
|
||||
"tower/buffer",
|
||||
"tower/timeout",
|
||||
"monero-serai/http-rpc",
|
||||
"dep:tracing-subscriber",
|
||||
"dep:serde_json",
|
||||
"dep:serde",
|
||||
"dep:epee-encoding",
|
||||
"dep:monero-wire",
|
||||
"dep:borsh",
|
||||
"dep:dirs",
|
||||
"dep:clap"
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
cuprate-helper = { path = "../helper", default-features = false, features = ["std", "asynch", "num"] }
|
||||
monero-consensus = {path = "./rules", features = ["rayon"]}
|
||||
cuprate-consensus-rules = { path = "./rules", features = ["rayon"] }
|
||||
cuprate-types = { path = "../types" }
|
||||
|
||||
thiserror = { workspace = true }
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
futures = { workspace = true, features = ["std"] }
|
||||
futures = { workspace = true, features = ["std", "async-await"] }
|
||||
|
||||
randomx-rs = { workspace = true }
|
||||
monero-serai = { workspace = true, features = ["std"] }
|
||||
|
@ -47,23 +28,13 @@ thread_local = { workspace = true }
|
|||
tokio = { workspace = true, features = ["rt"] }
|
||||
tokio-util = { workspace = true }
|
||||
|
||||
hex = "0.4"
|
||||
|
||||
# used in binaries
|
||||
monero-wire = {path="../net/monero-wire", optional = true}
|
||||
epee-encoding = { path="../net/epee-encoding" , optional = true}
|
||||
serde_json = {version = "1", optional = true}
|
||||
serde = {version = "1", optional = true, features = ["derive"]}
|
||||
tracing-subscriber = {version = "0.3", optional = true}
|
||||
borsh = { workspace = true, optional = true}
|
||||
dirs = {version="5.0", optional = true}
|
||||
clap = { version = "4.4.8", optional = true, features = ["derive"] }
|
||||
# here to help cargo to pick a version - remove me
|
||||
syn = "2.0.37"
|
||||
|
||||
hex = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
monero-consensus = {path = "./rules", features = ["proptest"]}
|
||||
cuprate-test-utils = { path = "../test-utils" }
|
||||
cuprate-consensus-rules = {path = "./rules", features = ["proptest"]}
|
||||
|
||||
hex-literal = { workspace = true }
|
||||
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"]}
|
||||
proptest = { workspace = true }
|
||||
|
|
|
@ -1,37 +1,14 @@
|
|||
# Consensus Rules
|
||||
|
||||
This folder contains 2 crates: `monero-consensus` (rules) and `cuprate-consensus`. `monero-consensus` contains the raw-rules
|
||||
and is built to be a more flexible library which requires the user to give the correct data and do minimal calculations, `cuprate-consensus`
|
||||
on the other hand contains multiple tower::Services that handle tx/ block verification as a whole with a `context` service that
|
||||
keeps track of blockchain state. `cuprate-consensus` uses `monero-consensus` internally.
|
||||
This folder contains 2 crates:
|
||||
- `cuprate-consensus-rules` (`rules/` directory)
|
||||
- `cuprate-consensus`
|
||||
|
||||
If you are looking to use monero consensus rules it's recommended you try to integrate `cuprate-consensus` and fall back to
|
||||
`monero-consensus` if you need more flexibility.
|
||||
`cuprate-consensus-rules` contains the raw-rules and is built to be a more flexible library which requires the user
|
||||
to give the correct data and do minimal calculations.
|
||||
|
||||
## scan_chain
|
||||
`cuprate-consensus` on the other hand contains multiple `tower::Service`s that handle transaction/block verification as a
|
||||
whole with a `context` service that keeps track of blockchain state. `cuprate-consensus` uses `cuprate-consensus-rules` internally.
|
||||
|
||||
`cuprate-consensus` contains a binary,`scan_chain`, which uses multiple RPC connections to scan the blockchain and verify it against the
|
||||
consensus rules. It keeps track of minimal data and uses the RPC connection to get blocks/transactions/outputs.
|
||||
|
||||
`scan_chain` was not built for wide usage, so you may find issues, if you do, open an issue in Cuprates issue tracker and or join our matrix
|
||||
room for help. `scan_chain` has only been verified on `x86_64-unknown-linux-gnu`.
|
||||
|
||||
`scan_chain` will take at least a day for stagenet and testnet and 6 for mainnet but expect it to be longer. If you are just looking to verify
|
||||
previous transactions it may be worth using `monerod` with `--fast-block-sync 0` this will probably be faster to complete and you will have a
|
||||
usable node at the end!
|
||||
|
||||
### How to run
|
||||
|
||||
First you will need to install Rust/Cargo: https://www.rust-lang.org/tools/install
|
||||
|
||||
Next you need to clone Cuprates git repo, enter the root of Cuprate, then run:
|
||||
|
||||
```
|
||||
cargo run --bin scan_chain -r
|
||||
```
|
||||
|
||||
If you want to pass in options you need to add `--` then the option(s), so to list the options do:
|
||||
|
||||
```
|
||||
cargo run --bin scan_chain -r -- --help
|
||||
```
|
||||
If you are looking to use Monero consensus rules it's recommended you try to integrate `cuprate-consensus` and fall back
|
||||
to `cuprate-consensus-rules` if you need more flexibility.
|
||||
|
|
23
consensus/fast-sync/Cargo.toml
Normal file
23
consensus/fast-sync/Cargo.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[package]
|
||||
name = "cuprate-fast-sync"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "cuprate-fast-sync-create-hashes"
|
||||
path = "src/create.rs"
|
||||
|
||||
[dependencies]
|
||||
clap = { workspace = true, features = ["derive", "std"] }
|
||||
cuprate-blockchain = { path = "../../storage/cuprate-blockchain" }
|
||||
cuprate-types = { path = "../../types" }
|
||||
hex.workspace = true
|
||||
hex-literal.workspace = true
|
||||
rayon.workspace = true
|
||||
sha3 = "0.10.8"
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tower.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4.4"
|
87
consensus/fast-sync/src/create.rs
Normal file
87
consensus/fast-sync/src/create.rs
Normal file
|
@ -0,0 +1,87 @@
|
|||
use std::{fmt::Write, fs::write};
|
||||
|
||||
use clap::Parser;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::{config::ConfigBuilder, service::DatabaseReadHandle, RuntimeError};
|
||||
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes};
|
||||
|
||||
const BATCH_SIZE: u64 = 512;
|
||||
|
||||
async fn read_batch(
|
||||
handle: &mut DatabaseReadHandle,
|
||||
height_from: u64,
|
||||
) -> Result<Vec<BlockId>, RuntimeError> {
|
||||
let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE as usize);
|
||||
|
||||
for height in height_from..(height_from + BATCH_SIZE) {
|
||||
let request = BCReadRequest::BlockHash(height);
|
||||
let response_channel = handle.ready().await?.call(request);
|
||||
let response = response_channel.await?;
|
||||
|
||||
match response {
|
||||
BCResponse::BlockHash(block_id) => block_ids.push(block_id),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(block_ids)
|
||||
}
|
||||
|
||||
fn generate_hex(hashes: &[HashOfHashes]) -> String {
|
||||
let mut s = String::new();
|
||||
|
||||
writeln!(&mut s, "[").unwrap();
|
||||
|
||||
for hash in hashes {
|
||||
writeln!(&mut s, "\thex!(\"{}\"),", hex::encode(hash)).unwrap();
|
||||
}
|
||||
|
||||
writeln!(&mut s, "]").unwrap();
|
||||
|
||||
s
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Args {
|
||||
#[arg(short, long)]
|
||||
height: u64,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let args = Args::parse();
|
||||
let height_target = args.height;
|
||||
|
||||
let config = ConfigBuilder::new().build();
|
||||
|
||||
let (mut read_handle, _) = cuprate_blockchain::service::init(config).unwrap();
|
||||
|
||||
let mut hashes_of_hashes = Vec::new();
|
||||
|
||||
let mut height = 0u64;
|
||||
|
||||
while height < height_target {
|
||||
match read_batch(&mut read_handle, height).await {
|
||||
Ok(block_ids) => {
|
||||
let hash = hash_of_hashes(block_ids.as_slice());
|
||||
hashes_of_hashes.push(hash);
|
||||
}
|
||||
Err(_) => {
|
||||
println!("Failed to read next batch from database");
|
||||
break;
|
||||
}
|
||||
}
|
||||
height += BATCH_SIZE;
|
||||
}
|
||||
|
||||
drop(read_handle);
|
||||
|
||||
let generated = generate_hex(&hashes_of_hashes);
|
||||
write("src/data/hashes_of_hashes", generated).expect("Could not write file");
|
||||
|
||||
println!("Generated hashes up to block height {}", height);
|
||||
}
|
12
consensus/fast-sync/src/data/hashes_of_hashes
Normal file
12
consensus/fast-sync/src/data/hashes_of_hashes
Normal file
|
@ -0,0 +1,12 @@
|
|||
[
|
||||
hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"),
|
||||
hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"),
|
||||
hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"),
|
||||
hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"),
|
||||
hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"),
|
||||
hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"),
|
||||
hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"),
|
||||
hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"),
|
||||
hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"),
|
||||
hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"),
|
||||
]
|
216
consensus/fast-sync/src/fast_sync.rs
Normal file
216
consensus/fast-sync/src/fast_sync.rs
Normal file
|
@ -0,0 +1,216 @@
|
|||
use std::{
|
||||
cmp,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
#[allow(unused_imports)]
|
||||
use hex_literal::hex;
|
||||
use tower::Service;
|
||||
|
||||
use crate::{hash_of_hashes, BlockId, HashOfHashes};
|
||||
#[cfg(not(test))]
|
||||
static HASHES_OF_HASHES: &[HashOfHashes] = &include!("./data/hashes_of_hashes");
|
||||
|
||||
#[cfg(not(test))]
|
||||
const BATCH_SIZE: usize = 512;
|
||||
|
||||
#[cfg(test)]
|
||||
static HASHES_OF_HASHES: &[HashOfHashes] = &[
|
||||
hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"),
|
||||
hex!("0102030405060708010203040506070801020304050607080102030405060708"),
|
||||
hex!("0102030405060708010203040506070801020304050607080102030405060708"),
|
||||
];
|
||||
|
||||
#[cfg(test)]
|
||||
const BATCH_SIZE: usize = 4;
|
||||
|
||||
#[inline]
|
||||
fn max_height() -> u64 {
|
||||
(HASHES_OF_HASHES.len() * BATCH_SIZE) as u64
|
||||
}
|
||||
|
||||
pub enum FastSyncRequest {
|
||||
ValidateHashes {
|
||||
start_height: u64,
|
||||
block_ids: Vec<BlockId>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ValidBlockId(BlockId);
|
||||
|
||||
fn valid_block_ids(block_ids: &[BlockId]) -> Vec<ValidBlockId> {
|
||||
block_ids.iter().map(|b| ValidBlockId(*b)).collect()
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum FastSyncResponse {
|
||||
ValidateHashes {
|
||||
validated_hashes: Vec<ValidBlockId>,
|
||||
unknown_hashes: Vec<BlockId>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum FastSyncError {
|
||||
InvalidStartHeight, // start_height not a multiple of BATCH_SIZE
|
||||
Mismatch, // hash does not match
|
||||
NothingToDo, // no complete batch to check
|
||||
OutOfRange, // start_height too high
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct FastSyncService<C> {
|
||||
context_svc: C,
|
||||
}
|
||||
|
||||
impl<C> FastSyncService<C>
|
||||
where
|
||||
C: Service<FastSyncRequest, Response = FastSyncResponse, Error = FastSyncError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
{
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn new(context_svc: C) -> FastSyncService<C> {
|
||||
FastSyncService { context_svc }
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> Service<FastSyncRequest> for FastSyncService<C>
|
||||
where
|
||||
C: Service<FastSyncRequest, Response = FastSyncResponse, Error = FastSyncError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
{
|
||||
type Response = FastSyncResponse;
|
||||
type Error = FastSyncError;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: FastSyncRequest) -> Self::Future {
|
||||
Box::pin(async move {
|
||||
match req {
|
||||
FastSyncRequest::ValidateHashes {
|
||||
start_height,
|
||||
block_ids,
|
||||
} => validate_hashes(start_height, &block_ids).await,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn validate_hashes(
|
||||
start_height: u64,
|
||||
block_ids: &[BlockId],
|
||||
) -> Result<FastSyncResponse, FastSyncError> {
|
||||
if start_height as usize % BATCH_SIZE != 0 {
|
||||
return Err(FastSyncError::InvalidStartHeight);
|
||||
}
|
||||
|
||||
if start_height >= max_height() {
|
||||
return Err(FastSyncError::OutOfRange);
|
||||
}
|
||||
|
||||
let stop_height = start_height as usize + block_ids.len();
|
||||
|
||||
let batch_from = start_height as usize / BATCH_SIZE;
|
||||
let batch_to = cmp::min(stop_height / BATCH_SIZE, HASHES_OF_HASHES.len());
|
||||
let n_batches = batch_to - batch_from;
|
||||
|
||||
if n_batches == 0 {
|
||||
return Err(FastSyncError::NothingToDo);
|
||||
}
|
||||
|
||||
for i in 0..n_batches {
|
||||
let batch = &block_ids[BATCH_SIZE * i..BATCH_SIZE * (i + 1)];
|
||||
let actual = hash_of_hashes(batch);
|
||||
let expected = HASHES_OF_HASHES[batch_from + i];
|
||||
|
||||
if expected != actual {
|
||||
return Err(FastSyncError::Mismatch);
|
||||
}
|
||||
}
|
||||
|
||||
let validated_hashes = valid_block_ids(&block_ids[..n_batches * BATCH_SIZE]);
|
||||
let unknown_hashes = block_ids[n_batches * BATCH_SIZE..].to_vec();
|
||||
|
||||
Ok(FastSyncResponse::ValidateHashes {
|
||||
validated_hashes,
|
||||
unknown_hashes,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio_test::block_on;
|
||||
|
||||
#[test]
|
||||
fn test_validate_hashes_errors() {
|
||||
let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]];
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(3, &[])),
|
||||
Err(FastSyncError::InvalidStartHeight)
|
||||
);
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(3, &ids)),
|
||||
Err(FastSyncError::InvalidStartHeight)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(20, &[])),
|
||||
Err(FastSyncError::OutOfRange)
|
||||
);
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(20, &ids)),
|
||||
Err(FastSyncError::OutOfRange)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(4, &[])),
|
||||
Err(FastSyncError::NothingToDo)
|
||||
);
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(4, &ids[..3])),
|
||||
Err(FastSyncError::NothingToDo)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_hashes_success() {
|
||||
let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]];
|
||||
let validated_hashes = valid_block_ids(&ids[0..4]);
|
||||
let unknown_hashes = ids[4..].to_vec();
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(0, &ids)),
|
||||
Ok(FastSyncResponse::ValidateHashes {
|
||||
validated_hashes,
|
||||
unknown_hashes
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_hashes_mismatch() {
|
||||
let ids = [
|
||||
[1u8; 32], [2u8; 32], [3u8; 32], [5u8; 32], [1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32],
|
||||
];
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(0, &ids)),
|
||||
Err(FastSyncError::Mismatch)
|
||||
);
|
||||
assert_eq!(
|
||||
block_on(validate_hashes(4, &ids)),
|
||||
Err(FastSyncError::Mismatch)
|
||||
);
|
||||
}
|
||||
}
|
4
consensus/fast-sync/src/lib.rs
Normal file
4
consensus/fast-sync/src/lib.rs
Normal file
|
@ -0,0 +1,4 @@
|
|||
pub mod fast_sync;
|
||||
pub mod util;
|
||||
|
||||
pub use util::{hash_of_hashes, BlockId, HashOfHashes};
|
8
consensus/fast-sync/src/util.rs
Normal file
8
consensus/fast-sync/src/util.rs
Normal file
|
@ -0,0 +1,8 @@
|
|||
use sha3::{Digest, Keccak256};
|
||||
|
||||
pub type BlockId = [u8; 32];
|
||||
pub type HashOfHashes = [u8; 32];
|
||||
|
||||
pub fn hash_of_hashes(hashes: &[BlockId]) -> HashOfHashes {
|
||||
Keccak256::digest(hashes.concat().as_slice()).into()
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
[package]
|
||||
name = "monero-consensus"
|
||||
name = "cuprate-consensus-rules"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
|
29
consensus/rules/src/batch_verifier.rs
Normal file
29
consensus/rules/src/batch_verifier.rs
Normal file
|
@ -0,0 +1,29 @@
|
|||
use multiexp::BatchVerifier as InternalBatchVerifier;
|
||||
|
||||
/// This trait represents a batch verifier.
|
||||
///
|
||||
/// A batch verifier is used to speed up verification by verifying multiple transactions together.
|
||||
///
|
||||
/// Not all proofs can be batched and at its core it's intended to verify a series of statements are
|
||||
/// each equivalent to zero.
|
||||
pub trait BatchVerifier {
|
||||
/// Queue a statement for batch verification.
|
||||
///
|
||||
/// # Panics
|
||||
/// This function may panic if `stmt` contains calls to `rayon`'s parallel iterators, e.g. `par_iter()`.
|
||||
// TODO: remove the panics by adding a generic API upstream.
|
||||
fn queue_statement<R>(
|
||||
&mut self,
|
||||
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
|
||||
) -> R;
|
||||
}
|
||||
|
||||
// impl this for a single threaded batch verifier.
|
||||
impl BatchVerifier for &'_ mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint> {
|
||||
fn queue_statement<R>(
|
||||
&mut self,
|
||||
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
|
||||
) -> R {
|
||||
stmt(self)
|
||||
}
|
||||
}
|
|
@ -1,3 +1,5 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use crypto_bigint::{CheckedMul, U256};
|
||||
use monero_serai::block::Block;
|
||||
|
||||
|
@ -196,12 +198,13 @@ fn check_timestamp(block: &Block, median_timestamp: u64) -> Result<(), BlockErro
|
|||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#no-duplicate-transactions>
|
||||
fn check_txs_unique(txs: &[[u8; 32]]) -> Result<(), BlockError> {
|
||||
txs.windows(2).try_for_each(|window| {
|
||||
if window[0] == window[1] {
|
||||
Err(BlockError::DuplicateTransaction)?;
|
||||
}
|
||||
let set = txs.iter().collect::<HashSet<_>>();
|
||||
|
||||
if set.len() == txs.len() {
|
||||
Ok(())
|
||||
})
|
||||
} else {
|
||||
Err(BlockError::DuplicateTransaction)
|
||||
}
|
||||
}
|
||||
|
||||
/// This struct contains the data needed to verify a block, implementers MUST make sure
|
||||
|
@ -275,3 +278,28 @@ pub fn check_block(
|
|||
|
||||
Ok((vote, generated_coins))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::{collection::vec, prelude::*};
|
||||
|
||||
use super::*;
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_check_unique_txs(
|
||||
mut txs in vec(any::<[u8; 32]>(), 2..3000),
|
||||
duplicate in any::<[u8; 32]>(),
|
||||
dup_idx_1 in any::<usize>(),
|
||||
dup_idx_2 in any::<usize>(),
|
||||
) {
|
||||
|
||||
prop_assert!(check_txs_unique(&txs).is_ok());
|
||||
|
||||
txs.insert(dup_idx_1 % txs.len(), duplicate);
|
||||
txs.insert(dup_idx_2 % txs.len(), duplicate);
|
||||
|
||||
prop_assert!(check_txs_unique(&txs).is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -163,6 +163,7 @@ impl HardFork {
|
|||
/// Returns the hard-fork for a blocks `major_version` field.
|
||||
///
|
||||
/// <https://monero-book.cuprate.org/consensus_rules/hardforks.html#blocks-version-and-vote>
|
||||
#[inline]
|
||||
pub fn from_version(version: u8) -> Result<HardFork, HardForkError> {
|
||||
Ok(match version {
|
||||
1 => HardFork::V1,
|
||||
|
@ -188,6 +189,7 @@ impl HardFork {
|
|||
/// Returns the hard-fork for a blocks `minor_version` (vote) field.
|
||||
///
|
||||
/// <https://monero-book.cuprate.org/consensus_rules/hardforks.html#blocks-version-and-vote>
|
||||
#[inline]
|
||||
pub fn from_vote(vote: u8) -> HardFork {
|
||||
if vote == 0 {
|
||||
// A vote of 0 is interpreted as 1 as that's what Monero used to default to.
|
||||
|
@ -197,6 +199,7 @@ impl HardFork {
|
|||
Self::from_version(vote).unwrap_or(HardFork::V16)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> {
|
||||
Ok((
|
||||
HardFork::from_version(header.major_version)?,
|
||||
|
|
|
@ -6,6 +6,12 @@ use crate::hard_forks::{HFVotes, HardFork, NUMB_OF_HARD_FORKS};
|
|||
|
||||
const TEST_WINDOW_SIZE: u64 = 25;
|
||||
|
||||
#[test]
|
||||
fn target_block_time() {
|
||||
assert_eq!(HardFork::V1.block_time().as_secs(), 60);
|
||||
assert_eq!(HardFork::V2.block_time().as_secs(), 120);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn next_hard_forks() {
|
||||
let mut prev = HardFork::V1;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub mod batch_verifier;
|
||||
pub mod blocks;
|
||||
mod decomposed_amount;
|
||||
pub mod genesis;
|
||||
|
|
|
@ -207,3 +207,17 @@ pub fn check_miner_tx(
|
|||
|
||||
check_total_output_amt(total_outs, reward, total_fees, hf)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn tail_emission(generated_coins in any::<u64>(), hf in any::<HardFork>()) {
|
||||
prop_assert!(calculate_base_reward(generated_coins, &hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,16 +1,19 @@
|
|||
use std::cmp::Ordering;
|
||||
|
||||
use monero_serai::ringct::RctType;
|
||||
use std::{cmp::Ordering, collections::HashSet, sync::Arc};
|
||||
|
||||
use monero_serai::transaction::{Input, Output, Timelock, Transaction};
|
||||
use multiexp::BatchVerifier;
|
||||
|
||||
use crate::{
|
||||
blocks::penalty_free_zone, check_point_canonically_encoded, is_decomposed_amount, HardFork,
|
||||
batch_verifier::BatchVerifier, blocks::penalty_free_zone, check_point_canonically_encoded,
|
||||
is_decomposed_amount, HardFork,
|
||||
};
|
||||
|
||||
mod contextual_data;
|
||||
mod ring_ct;
|
||||
mod ring_signatures;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub use contextual_data::*;
|
||||
pub use ring_ct::RingCTError;
|
||||
|
@ -57,7 +60,7 @@ pub enum TransactionError {
|
|||
#[error("The transaction inputs are not ordered.")]
|
||||
InputsAreNotOrdered,
|
||||
#[error("The transaction spends a decoy which is too young.")]
|
||||
OneOrMoreDecoysLocked,
|
||||
OneOrMoreRingMembersLocked,
|
||||
#[error("The transaction inputs overflow.")]
|
||||
InputsOverflow,
|
||||
#[error("The transaction has no inputs.")]
|
||||
|
@ -124,7 +127,7 @@ pub(crate) fn check_output_types(
|
|||
) -> Result<(), TransactionError> {
|
||||
if hf == &HardFork::V15 {
|
||||
for outs in outputs.windows(2) {
|
||||
if outs[0].view_tag.is_some() != outs[0].view_tag.is_some() {
|
||||
if outs[0].view_tag.is_some() != outs[1].view_tag.is_some() {
|
||||
return Err(TransactionError::OutputTypeInvalid);
|
||||
}
|
||||
}
|
||||
|
@ -213,7 +216,10 @@ fn check_number_of_outputs(
|
|||
}
|
||||
|
||||
match rct_type {
|
||||
RctType::Bulletproofs | RctType::BulletproofsCompactAmount | RctType::BulletproofsPlus => {
|
||||
RctType::Bulletproofs
|
||||
| RctType::BulletproofsCompactAmount
|
||||
| RctType::Clsag
|
||||
| RctType::BulletproofsPlus => {
|
||||
if outputs <= MAX_BULLETPROOFS_OUTPUTS {
|
||||
Ok(())
|
||||
} else {
|
||||
|
@ -247,7 +253,7 @@ fn check_outputs_semantics(
|
|||
/// Checks if an outputs unlock time has passed.
|
||||
///
|
||||
/// <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html>
|
||||
fn output_unlocked(
|
||||
pub fn output_unlocked(
|
||||
time_lock: &Timelock,
|
||||
current_chain_height: u64,
|
||||
current_time_lock_timestamp: u64,
|
||||
|
@ -272,7 +278,7 @@ fn check_block_time_lock(unlock_height: u64, current_chain_height: u64) -> bool
|
|||
unlock_height <= current_chain_height
|
||||
}
|
||||
|
||||
/// Returns if a locked output, which uses a block height, can be spend.
|
||||
/// Returns if a locked output, which uses a block height, can be spent.
|
||||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#timestamp>
|
||||
fn check_timestamp_time_lock(
|
||||
|
@ -303,7 +309,7 @@ fn check_all_time_locks(
|
|||
hf,
|
||||
) {
|
||||
tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}.");
|
||||
Err(TransactionError::OneOrMoreDecoysLocked)
|
||||
Err(TransactionError::OneOrMoreRingMembersLocked)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -316,7 +322,7 @@ fn check_all_time_locks(
|
|||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#minimum-decoys>
|
||||
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#equal-number-of-decoys>
|
||||
fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> {
|
||||
pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> {
|
||||
if hf == &HardFork::V15 {
|
||||
// Hard-fork 15 allows both v14 and v16 rules
|
||||
return check_decoy_info(decoy_info, &HardFork::V14)
|
||||
|
@ -347,26 +353,16 @@ fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), Transac
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks the inputs key images for torsion and for duplicates in the spent_kis list.
|
||||
/// Checks the inputs key images for torsion.
|
||||
///
|
||||
/// The `spent_kis` parameter is not meant to be a complete list of key images, just a list of related transactions
|
||||
/// key images, for example transactions in a block. The chain will be checked for duplicates later.
|
||||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-key-image>
|
||||
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#torsion-free-key-image>
|
||||
fn check_key_images(
|
||||
input: &Input,
|
||||
spent_kis: &mut HashSet<[u8; 32]>,
|
||||
) -> Result<(), TransactionError> {
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#torsion-free-key-image>
|
||||
fn check_key_images(input: &Input) -> Result<(), TransactionError> {
|
||||
match input {
|
||||
Input::ToKey { key_image, .. } => {
|
||||
// this happens in monero-serai but we may as well duplicate the check.
|
||||
if !key_image.is_torsion_free() {
|
||||
return Err(TransactionError::KeyImageIsNotInPrimeSubGroup);
|
||||
}
|
||||
if !spent_kis.insert(key_image.compress().to_bytes()) {
|
||||
return Err(TransactionError::KeyImageSpent);
|
||||
}
|
||||
}
|
||||
_ => Err(TransactionError::IncorrectInputType)?,
|
||||
}
|
||||
|
@ -455,7 +451,7 @@ fn check_10_block_lock(
|
|||
tracing::debug!(
|
||||
"Transaction invalid: One or more ring members younger than 10 blocks."
|
||||
);
|
||||
Err(TransactionError::OneOrMoreDecoysLocked)
|
||||
Err(TransactionError::OneOrMoreRingMembersLocked)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -510,23 +506,19 @@ fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result<u64, Transa
|
|||
///
|
||||
/// Contextual rules are rules that require blockchain context to check.
|
||||
///
|
||||
/// This function does not check signatures.
|
||||
///
|
||||
/// The `spent_kis` parameter is not meant to be a complete list of key images, just a list of related transactions
|
||||
/// key images, for example transactions in a block. The chain should be checked for duplicates later.
|
||||
/// This function does not check signatures or for duplicate key-images.
|
||||
fn check_inputs_contextual(
|
||||
inputs: &[Input],
|
||||
tx_ring_members_info: &TxRingMembersInfo,
|
||||
current_chain_height: u64,
|
||||
hf: &HardFork,
|
||||
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), TransactionError> {
|
||||
// This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members.
|
||||
// When picking ring members monerod will only look in the DB at past blocks so an output has to be younger
|
||||
// than this transaction to be used in this tx.
|
||||
if tx_ring_members_info.youngest_used_out_height >= current_chain_height {
|
||||
tracing::debug!("Transaction invalid: One or more ring members too young.");
|
||||
Err(TransactionError::OneOrMoreDecoysLocked)?;
|
||||
Err(TransactionError::OneOrMoreRingMembersLocked)?;
|
||||
}
|
||||
|
||||
check_10_block_lock(
|
||||
|
@ -541,11 +533,9 @@ fn check_inputs_contextual(
|
|||
assert_eq!(hf, &HardFork::V1);
|
||||
}
|
||||
|
||||
let mut spent_kis_lock = spent_kis.lock().unwrap();
|
||||
for input in inputs {
|
||||
check_key_images(input, &mut spent_kis_lock)?;
|
||||
check_key_images(input)?;
|
||||
}
|
||||
drop(spent_kis_lock);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -608,7 +598,7 @@ fn transaction_weight_limit(hf: &HardFork) -> usize {
|
|||
/// - The tx-pool will use the current hard-fork
|
||||
/// - When syncing the hard-fork is in the block header.
|
||||
///
|
||||
/// To fully verify a transaction this must be accompanied with [`check_transaction_contextual`]
|
||||
/// To fully verify a transaction this must be accompanied by [`check_transaction_contextual`]
|
||||
///
|
||||
pub fn check_transaction_semantic(
|
||||
tx: &Transaction,
|
||||
|
@ -616,7 +606,7 @@ pub fn check_transaction_semantic(
|
|||
tx_weight: usize,
|
||||
tx_hash: &[u8; 32],
|
||||
hf: &HardFork,
|
||||
verifier: &mut BatchVerifier<(), dalek_ff_group::EdwardsPoint>,
|
||||
verifier: impl BatchVerifier,
|
||||
) -> Result<u64, TransactionError> {
|
||||
// <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size>
|
||||
if tx_blob_size > MAX_TX_BLOB_SIZE
|
||||
|
@ -655,9 +645,11 @@ pub fn check_transaction_semantic(
|
|||
|
||||
/// Checks the transaction is contextually valid.
|
||||
///
|
||||
/// To fully verify a transaction this must be accompanied with [`check_transaction_semantic`]
|
||||
/// To fully verify a transaction this must be accompanied by [`check_transaction_semantic`].
|
||||
///
|
||||
/// `current_time_lock_timestamp` must be: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#getting-the-current-time>
|
||||
/// This function also does _not_ check for duplicate key-images: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-key-image>.
|
||||
///
|
||||
/// `current_time_lock_timestamp` must be: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#getting-the-current-time>.
|
||||
|
||||
pub fn check_transaction_contextual(
|
||||
tx: &Transaction,
|
||||
|
@ -665,7 +657,6 @@ pub fn check_transaction_contextual(
|
|||
current_chain_height: u64,
|
||||
current_time_lock_timestamp: u64,
|
||||
hf: &HardFork,
|
||||
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), TransactionError> {
|
||||
let tx_version = TxVersion::from_raw(tx.prefix.version)
|
||||
.ok_or(TransactionError::TransactionVersionInvalid)?;
|
||||
|
@ -675,7 +666,6 @@ pub fn check_transaction_contextual(
|
|||
tx_ring_members_info,
|
||||
current_chain_height,
|
||||
hf,
|
||||
spent_kis,
|
||||
)?;
|
||||
check_tx_version(&tx_ring_members_info.decoy_info, &tx_version, hf)?;
|
||||
|
||||
|
|
|
@ -6,22 +6,13 @@ use std::{
|
|||
use curve25519_dalek::EdwardsPoint;
|
||||
use monero_serai::transaction::{Input, Timelock};
|
||||
|
||||
use crate::{transactions::TransactionError, HardFork, TxVersion};
|
||||
|
||||
/// An already approved previous transaction output.
|
||||
#[derive(Debug)]
|
||||
pub struct OutputOnChain {
|
||||
pub height: u64,
|
||||
pub time_lock: Timelock,
|
||||
pub key: Option<EdwardsPoint>,
|
||||
pub commitment: EdwardsPoint,
|
||||
}
|
||||
use crate::{transactions::TransactionError, HardFork};
|
||||
|
||||
/// Gets the absolute offsets from the relative offsets.
|
||||
///
|
||||
/// This function will return an error if the relative offsets are empty.
|
||||
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions.html#inputs-must-have-decoys>
|
||||
fn get_absolute_offsets(relative_offsets: &[u64]) -> Result<Vec<u64>, TransactionError> {
|
||||
pub fn get_absolute_offsets(relative_offsets: &[u64]) -> Result<Vec<u64>, TransactionError> {
|
||||
if relative_offsets.is_empty() {
|
||||
return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys);
|
||||
}
|
||||
|
@ -64,35 +55,6 @@ pub fn insert_ring_member_ids(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the ring members for the inputs from the outputs on the chain.
|
||||
///
|
||||
/// Will error if `outputs` does not contain the outputs needed.
|
||||
pub fn get_ring_members_for_inputs<'a>(
|
||||
get_outputs: impl Fn(u64, u64) -> Option<&'a OutputOnChain>,
|
||||
inputs: &[Input],
|
||||
) -> Result<Vec<Vec<&'a OutputOnChain>>, TransactionError> {
|
||||
inputs
|
||||
.iter()
|
||||
.map(|inp| match inp {
|
||||
Input::ToKey {
|
||||
amount,
|
||||
key_offsets,
|
||||
..
|
||||
} => {
|
||||
let offsets = get_absolute_offsets(key_offsets)?;
|
||||
Ok(offsets
|
||||
.iter()
|
||||
.map(|offset| {
|
||||
get_outputs(amount.unwrap_or(0), *offset)
|
||||
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)
|
||||
})
|
||||
.collect::<Result<_, TransactionError>>()?)
|
||||
}
|
||||
_ => Err(TransactionError::IncorrectInputType),
|
||||
})
|
||||
.collect::<Result<_, TransactionError>>()
|
||||
}
|
||||
|
||||
/// Represents the ring members of all the inputs.
|
||||
#[derive(Debug)]
|
||||
pub enum Rings {
|
||||
|
@ -102,46 +64,7 @@ pub enum Rings {
|
|||
RingCT(Vec<Vec<[EdwardsPoint; 2]>>),
|
||||
}
|
||||
|
||||
impl Rings {
|
||||
/// Builds the rings for the transaction inputs, from the given outputs.
|
||||
fn new(
|
||||
outputs: Vec<Vec<&OutputOnChain>>,
|
||||
tx_version: TxVersion,
|
||||
) -> Result<Rings, TransactionError> {
|
||||
Ok(match tx_version {
|
||||
TxVersion::RingSignatures => Rings::Legacy(
|
||||
outputs
|
||||
.into_iter()
|
||||
.map(|inp_outs| {
|
||||
inp_outs
|
||||
.into_iter()
|
||||
.map(|out| out.key.ok_or(TransactionError::RingMemberNotFoundOrInvalid))
|
||||
.collect::<Result<Vec<_>, TransactionError>>()
|
||||
})
|
||||
.collect::<Result<Vec<_>, TransactionError>>()?,
|
||||
),
|
||||
TxVersion::RingCT => Rings::RingCT(
|
||||
outputs
|
||||
.into_iter()
|
||||
.map(|inp_outs| {
|
||||
inp_outs
|
||||
.into_iter()
|
||||
.map(|out| {
|
||||
Ok([
|
||||
out.key
|
||||
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)?,
|
||||
out.commitment,
|
||||
])
|
||||
})
|
||||
.collect::<Result<_, TransactionError>>()
|
||||
})
|
||||
.collect::<Result<_, _>>()?,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Information on the outputs the transaction is is referencing for inputs (ring members).
|
||||
/// Information on the outputs the transaction is referencing for inputs (ring members).
|
||||
#[derive(Debug)]
|
||||
pub struct TxRingMembersInfo {
|
||||
pub rings: Rings,
|
||||
|
@ -149,49 +72,6 @@ pub struct TxRingMembersInfo {
|
|||
pub decoy_info: Option<DecoyInfo>,
|
||||
pub youngest_used_out_height: u64,
|
||||
pub time_locked_outs: Vec<Timelock>,
|
||||
pub hf: HardFork,
|
||||
}
|
||||
|
||||
impl TxRingMembersInfo {
|
||||
/// Construct a [`TxRingMembersInfo`] struct.
|
||||
///
|
||||
/// The used outs must be all the ring members used in the transactions inputs.
|
||||
pub fn new(
|
||||
used_outs: Vec<Vec<&OutputOnChain>>,
|
||||
decoy_info: Option<DecoyInfo>,
|
||||
tx_version: TxVersion,
|
||||
hf: HardFork,
|
||||
) -> Result<TxRingMembersInfo, TransactionError> {
|
||||
Ok(TxRingMembersInfo {
|
||||
youngest_used_out_height: used_outs
|
||||
.iter()
|
||||
.map(|inp_outs| {
|
||||
inp_outs
|
||||
.iter()
|
||||
// the output with the highest height is the youngest
|
||||
.map(|out| out.height)
|
||||
.max()
|
||||
.expect("Input must have ring members")
|
||||
})
|
||||
.max()
|
||||
.expect("Tx must have inputs"),
|
||||
time_locked_outs: used_outs
|
||||
.iter()
|
||||
.flat_map(|inp_outs| {
|
||||
inp_outs
|
||||
.iter()
|
||||
.filter_map(|out| match out.time_lock {
|
||||
Timelock::None => None,
|
||||
lock => Some(lock),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect(),
|
||||
hf,
|
||||
rings: Rings::new(used_outs, tx_version)?,
|
||||
decoy_info,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct holding information about the inputs and their decoys. This data can vary by block so
|
||||
|
@ -202,7 +82,7 @@ impl TxRingMembersInfo {
|
|||
/// - The top block hash is the same as when this data was retrieved (the blockchain state is unchanged).
|
||||
///
|
||||
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions/decoys.html>
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct DecoyInfo {
|
||||
/// The number of inputs that have enough outputs on the chain to mix with.
|
||||
pub mixable: usize,
|
||||
|
@ -229,7 +109,7 @@ impl DecoyInfo {
|
|||
///
|
||||
pub fn new(
|
||||
inputs: &[Input],
|
||||
outputs_with_amount: &HashMap<u64, usize>,
|
||||
outputs_with_amount: impl Fn(u64) -> usize,
|
||||
hf: &HardFork,
|
||||
) -> Result<DecoyInfo, TransactionError> {
|
||||
let mut min_decoys = usize::MAX;
|
||||
|
@ -247,9 +127,7 @@ impl DecoyInfo {
|
|||
..
|
||||
} => {
|
||||
if let Some(amount) = amount {
|
||||
let outs_with_amt = *outputs_with_amount
|
||||
.get(amount)
|
||||
.expect("outputs_with_amount does not include needed amount.");
|
||||
let outs_with_amt = outputs_with_amount(*amount);
|
||||
|
||||
// <https://cuprate.github.io/monero-book/consensus_rules/transactions/decoys.html#mixable-and-unmixable-inputs>
|
||||
if outs_with_amt <= minimum_decoys {
|
||||
|
|
|
@ -9,12 +9,11 @@ use monero_serai::{
|
|||
transaction::{Input, Transaction},
|
||||
H,
|
||||
};
|
||||
use multiexp::BatchVerifier;
|
||||
use rand::thread_rng;
|
||||
#[cfg(feature = "rayon")]
|
||||
use rayon::prelude::*;
|
||||
|
||||
use crate::{transactions::Rings, try_par_iter, HardFork};
|
||||
use crate::{batch_verifier::BatchVerifier, transactions::Rings, try_par_iter, HardFork};
|
||||
|
||||
/// This constant contains the IDs of 2 transactions that should be allowed after the fork the ringCT
|
||||
/// type they used should be banned.
|
||||
|
@ -91,7 +90,7 @@ fn simple_type_balances(rct_sig: &RctSignatures) -> Result<(), RingCTError> {
|
|||
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct/bulletproofs+.html>
|
||||
fn check_output_range_proofs(
|
||||
rct_sig: &RctSignatures,
|
||||
verifier: &mut BatchVerifier<(), dalek_ff_group::EdwardsPoint>,
|
||||
mut verifier: impl BatchVerifier,
|
||||
) -> Result<(), RingCTError> {
|
||||
let commitments = &rct_sig.base.commitments;
|
||||
|
||||
|
@ -109,7 +108,9 @@ fn check_output_range_proofs(
|
|||
}),
|
||||
RctPrunable::MlsagBulletproofs { bulletproofs, .. }
|
||||
| RctPrunable::Clsag { bulletproofs, .. } => {
|
||||
if bulletproofs.batch_verify(&mut thread_rng(), verifier, (), commitments) {
|
||||
if verifier.queue_statement(|verifier| {
|
||||
bulletproofs.batch_verify(&mut thread_rng(), verifier, (), commitments)
|
||||
}) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(RingCTError::BulletproofsRangeInvalid)
|
||||
|
@ -121,7 +122,7 @@ fn check_output_range_proofs(
|
|||
pub(crate) fn ring_ct_semantic_checks(
|
||||
tx: &Transaction,
|
||||
tx_hash: &[u8; 32],
|
||||
verifier: &mut BatchVerifier<(), dalek_ff_group::EdwardsPoint>,
|
||||
verifier: impl BatchVerifier,
|
||||
hf: &HardFork,
|
||||
) -> Result<(), RingCTError> {
|
||||
let rct_type = tx.rct_signatures.rct_type();
|
||||
|
@ -154,6 +155,13 @@ pub(crate) fn check_input_signatures(
|
|||
Err(RingCTError::RingInvalid)?;
|
||||
}
|
||||
|
||||
let pseudo_outs = match &rct_sig.prunable {
|
||||
RctPrunable::MlsagBulletproofs { pseudo_outs, .. }
|
||||
| RctPrunable::Clsag { pseudo_outs, .. } => pseudo_outs.as_slice(),
|
||||
RctPrunable::MlsagBorromean { .. } => rct_sig.base.pseudo_outs.as_slice(),
|
||||
RctPrunable::AggregateMlsagBorromean { .. } | RctPrunable::Null => &[],
|
||||
};
|
||||
|
||||
match &rct_sig.prunable {
|
||||
RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?,
|
||||
RctPrunable::AggregateMlsagBorromean { mlsag, .. } => {
|
||||
|
@ -174,7 +182,7 @@ pub(crate) fn check_input_signatures(
|
|||
}
|
||||
RctPrunable::MlsagBorromean { mlsags, .. }
|
||||
| RctPrunable::MlsagBulletproofs { mlsags, .. } => try_par_iter(mlsags)
|
||||
.zip(&rct_sig.base.pseudo_outs)
|
||||
.zip(pseudo_outs)
|
||||
.zip(inputs)
|
||||
.zip(rings)
|
||||
.try_for_each(|(((mlsag, pseudo_out), input), ring)| {
|
||||
|
@ -189,7 +197,7 @@ pub(crate) fn check_input_signatures(
|
|||
)?)
|
||||
}),
|
||||
RctPrunable::Clsag { clsags, .. } => try_par_iter(clsags)
|
||||
.zip(&rct_sig.base.pseudo_outs)
|
||||
.zip(pseudo_outs)
|
||||
.zip(inputs)
|
||||
.zip(rings)
|
||||
.try_for_each(|(((clsags, pseudo_out), input), ring)| {
|
||||
|
|
298
consensus/rules/src/transactions/tests.rs
Normal file
298
consensus/rules/src/transactions/tests.rs
Normal file
|
@ -0,0 +1,298 @@
|
|||
use std::ops::Range;
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::{ED25519_BASEPOINT_POINT, EIGHT_TORSION},
|
||||
edwards::CompressedEdwardsY,
|
||||
EdwardsPoint,
|
||||
};
|
||||
use proptest::{collection::vec, prelude::*};
|
||||
|
||||
use monero_serai::transaction::Output;
|
||||
|
||||
use super::*;
|
||||
use crate::decomposed_amount::decomposed_amounts;
|
||||
|
||||
#[test]
|
||||
fn test_check_output_amount_v1() {
|
||||
for amount in decomposed_amounts() {
|
||||
assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok())
|
||||
}
|
||||
|
||||
proptest!(|(amount in any::<u64>().prop_filter("value_decomposed", |val| !is_decomposed_amount(val)))| {
|
||||
prop_assert!(check_output_amount_v1(amount, &HardFork::V2).is_err());
|
||||
prop_assert!(check_output_amount_v1(amount, &HardFork::V1).is_ok())
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sum_outputs() {
|
||||
let mut output_10 = Output {
|
||||
key: CompressedEdwardsY([0; 32]),
|
||||
amount: None,
|
||||
view_tag: None,
|
||||
};
|
||||
|
||||
output_10.amount = Some(10);
|
||||
|
||||
let mut outputs_20 = output_10.clone();
|
||||
outputs_20.amount = Some(20);
|
||||
|
||||
let outs = [output_10, outputs_20];
|
||||
|
||||
let sum = sum_outputs(&outs, &HardFork::V16, &TxVersion::RingSignatures).unwrap();
|
||||
assert_eq!(sum, 30);
|
||||
|
||||
assert!(sum_outputs(&outs, &HardFork::V16, &TxVersion::RingCT).is_err())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decoy_info() {
|
||||
let decoy_info = DecoyInfo {
|
||||
mixable: 0,
|
||||
not_mixable: 0,
|
||||
min_decoys: minimum_decoys(&HardFork::V8),
|
||||
max_decoys: minimum_decoys(&HardFork::V8) + 1,
|
||||
};
|
||||
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok());
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V16).is_err());
|
||||
|
||||
let mut decoy_info = DecoyInfo {
|
||||
mixable: 0,
|
||||
not_mixable: 0,
|
||||
min_decoys: minimum_decoys(&HardFork::V8) - 1,
|
||||
max_decoys: minimum_decoys(&HardFork::V8) + 1,
|
||||
};
|
||||
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err());
|
||||
|
||||
decoy_info.not_mixable = 1;
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok());
|
||||
|
||||
decoy_info.mixable = 2;
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err());
|
||||
|
||||
let mut decoy_info = DecoyInfo {
|
||||
mixable: 0,
|
||||
not_mixable: 0,
|
||||
min_decoys: minimum_decoys(&HardFork::V12),
|
||||
max_decoys: minimum_decoys(&HardFork::V12) + 1,
|
||||
};
|
||||
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_err());
|
||||
|
||||
decoy_info.max_decoys = decoy_info.min_decoys;
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_torsion_ki() {
|
||||
for &key_image in EIGHT_TORSION[1..].iter() {
|
||||
assert!(check_key_images(&Input::ToKey {
|
||||
key_image,
|
||||
amount: None,
|
||||
key_offsets: vec![],
|
||||
})
|
||||
.is_err())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a strategy that resolves to a [`RctType`] that uses
|
||||
/// BPs(+).
|
||||
#[allow(unreachable_code)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
fn bulletproof_rct_type() -> BoxedStrategy<RctType> {
|
||||
return prop_oneof![
|
||||
Just(RctType::Bulletproofs),
|
||||
Just(RctType::BulletproofsCompactAmount),
|
||||
Just(RctType::Clsag),
|
||||
Just(RctType::BulletproofsPlus),
|
||||
]
|
||||
.boxed();
|
||||
|
||||
// Here to make sure this is updated when needed.
|
||||
match unreachable!() {
|
||||
RctType::Null => {}
|
||||
RctType::MlsagAggregate => {}
|
||||
RctType::MlsagIndividual => {}
|
||||
RctType::Bulletproofs => {}
|
||||
RctType::BulletproofsCompactAmount => {}
|
||||
RctType::Clsag => {}
|
||||
RctType::BulletproofsPlus => {}
|
||||
};
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a valid prime-order point.
|
||||
fn random_point()(bytes in any::<[u8; 32]>()) -> EdwardsPoint {
|
||||
EdwardsPoint::mul_base_clamped(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a valid torsioned point.
|
||||
fn random_torsioned_point()(point in random_point(), torsion in 1..8_usize ) -> EdwardsPoint {
|
||||
point + curve25519_dalek::constants::EIGHT_TORSION[torsion]
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a random [`Output`].
|
||||
///
|
||||
/// `key` is always valid.
|
||||
fn random_out(rct: bool, view_tagged: bool)(
|
||||
point in random_point(),
|
||||
amount in any::<u64>(),
|
||||
view_tag in any::<u8>(),
|
||||
) -> Output {
|
||||
Output {
|
||||
amount: if rct { None } else { Some(amount) },
|
||||
key: point.compress(),
|
||||
view_tag: if view_tagged { Some(view_tag) } else { None },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a random [`Output`].
|
||||
///
|
||||
/// `key` is always valid but torsioned.
|
||||
fn random_torsioned_out(rct: bool, view_tagged: bool)(
|
||||
point in random_torsioned_point(),
|
||||
amount in any::<u64>(),
|
||||
view_tag in any::<u8>(),
|
||||
) -> Output {
|
||||
Output {
|
||||
amount: if rct { None } else { Some(amount) },
|
||||
key: point.compress(),
|
||||
view_tag: if view_tagged { Some(view_tag) } else { None },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a [`HardFork`] in a specific range.
|
||||
fn hf_in_range(range: Range<u8>)(
|
||||
hf in range,
|
||||
) -> HardFork {
|
||||
HardFork::from_version(hf).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a [`Timelock`] that is locked given a height and time.
|
||||
fn locked_timelock(height: u64, time_for_time_lock: u64)(
|
||||
timebased in any::<bool>(),
|
||||
lock_height in (height+1)..500_000_001,
|
||||
time_for_time_lock in (time_for_time_lock+121)..,
|
||||
) -> Timelock {
|
||||
if timebased || lock_height > 500_000_000 {
|
||||
Timelock::Time(time_for_time_lock)
|
||||
} else {
|
||||
Timelock::Block(usize::try_from(lock_height).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a [`Timelock`] that is unlocked given a height and time.
|
||||
fn unlocked_timelock(height: u64, time_for_time_lock: u64)(
|
||||
ty in 0..3,
|
||||
lock_height in 0..(height+1),
|
||||
time_for_time_lock in 0..(time_for_time_lock+121),
|
||||
) -> Timelock {
|
||||
match ty {
|
||||
0 => Timelock::None,
|
||||
1 => Timelock::Time(time_for_time_lock),
|
||||
_ => Timelock::Block(usize::try_from(lock_height).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_check_output_keys(
|
||||
outs in vec(random_out(true, true), 0..16),
|
||||
torsioned_outs in vec(random_torsioned_out(false, true), 0..16)
|
||||
) {
|
||||
prop_assert!(check_output_keys(&outs).is_ok());
|
||||
prop_assert!(check_output_keys(&torsioned_outs).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn output_types(
|
||||
mut view_tag_outs in vec(random_out(true, true), 1..16),
|
||||
mut non_view_tag_outs in vec(random_out(true, false), 1..16),
|
||||
hf_no_view_tags in hf_in_range(1..14),
|
||||
hf_view_tags in hf_in_range(16..17),
|
||||
) {
|
||||
prop_assert!(check_output_types(&view_tag_outs, &hf_view_tags).is_ok());
|
||||
prop_assert!(check_output_types(&view_tag_outs, &hf_no_view_tags).is_err());
|
||||
|
||||
|
||||
prop_assert!(check_output_types(&non_view_tag_outs, &hf_no_view_tags).is_ok());
|
||||
prop_assert!(check_output_types(&non_view_tag_outs, &hf_view_tags).is_err());
|
||||
|
||||
prop_assert!(check_output_types(&non_view_tag_outs, &HardFork::V15).is_ok());
|
||||
prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_ok());
|
||||
view_tag_outs.append(&mut non_view_tag_outs);
|
||||
prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize, rct_type in bulletproof_rct_type()) {
|
||||
prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX, rct_type in bulletproof_rct_type()) {
|
||||
prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_output_amount_v2(amt in 1..u64::MAX) {
|
||||
prop_assert!(check_output_amount_v2(amt).is_err());
|
||||
prop_assert!(check_output_amount_v2(0).is_ok())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_unlock_time(height in 1..u64::MAX) {
|
||||
prop_assert!(check_block_time_lock(height, height));
|
||||
prop_assert!(!check_block_time_lock(height, height - 1));
|
||||
prop_assert!(check_block_time_lock(height, height+1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) {
|
||||
prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, &HardFork::V16));
|
||||
prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, &HardFork::V16));
|
||||
prop_assert!(check_timestamp_time_lock(timestamp, timestamp, &HardFork::V16));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_time_locks(
|
||||
mut locked_locks in vec(locked_timelock(5_000, 100_000_000), 1..50),
|
||||
mut unlocked_locks in vec(unlocked_timelock(5_000, 100_000_000), 1..50)
|
||||
) {
|
||||
assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, &HardFork::V16).is_err());
|
||||
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_ok());
|
||||
|
||||
unlocked_locks.append(&mut locked_locks);
|
||||
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_input_has_decoys(key_offsets in vec(any::<u64>(), 1..10_000)) {
|
||||
assert!(check_input_has_decoys(&Input::ToKey {
|
||||
key_image: ED25519_BASEPOINT_POINT,
|
||||
amount: None,
|
||||
key_offsets,
|
||||
}).is_ok());
|
||||
|
||||
assert!(check_input_has_decoys(&Input::ToKey {
|
||||
key_image: ED25519_BASEPOINT_POINT,
|
||||
amount: None,
|
||||
key_offsets: vec![],
|
||||
}).is_err());
|
||||
}
|
||||
}
|
|
@ -1,14 +1,12 @@
|
|||
use std::cell::UnsafeCell;
|
||||
use std::{cell::RefCell, ops::DerefMut};
|
||||
|
||||
use multiexp::BatchVerifier as InternalBatchVerifier;
|
||||
use rayon::prelude::*;
|
||||
use thread_local::ThreadLocal;
|
||||
|
||||
use crate::ConsensusError;
|
||||
|
||||
/// A multi threaded batch verifier.
|
||||
/// A multithreaded batch verifier.
|
||||
pub struct MultiThreadedBatchVerifier {
|
||||
internal: ThreadLocal<UnsafeCell<InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>>>,
|
||||
internal: ThreadLocal<RefCell<InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>>>,
|
||||
}
|
||||
|
||||
impl MultiThreadedBatchVerifier {
|
||||
|
@ -19,29 +17,26 @@ impl MultiThreadedBatchVerifier {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn queue_statement<R>(
|
||||
&self,
|
||||
stmt: impl FnOnce(
|
||||
&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>,
|
||||
) -> Result<R, ConsensusError>,
|
||||
) -> Result<R, ConsensusError> {
|
||||
let verifier_cell = self
|
||||
.internal
|
||||
.get_or(|| UnsafeCell::new(InternalBatchVerifier::new(0)));
|
||||
// SAFETY: This is safe for 2 reasons:
|
||||
// 1. each thread gets a different batch verifier.
|
||||
// 2. only this function `queue_statement` will get the inner batch verifier, it's private.
|
||||
//
|
||||
// TODO: it's probably ok to just use RefCell
|
||||
stmt(unsafe { &mut *verifier_cell.get() })
|
||||
}
|
||||
|
||||
pub fn verify(self) -> bool {
|
||||
self.internal
|
||||
.into_iter()
|
||||
.map(UnsafeCell::into_inner)
|
||||
.map(RefCell::into_inner)
|
||||
.par_bridge()
|
||||
.find_any(|batch_verifier| !batch_verifier.verify_vartime())
|
||||
.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
impl cuprate_consensus_rules::batch_verifier::BatchVerifier for &'_ MultiThreadedBatchVerifier {
|
||||
fn queue_statement<R>(
|
||||
&mut self,
|
||||
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
|
||||
) -> R {
|
||||
let mut verifier = self
|
||||
.internal
|
||||
.get_or(|| RefCell::new(InternalBatchVerifier::new(32)))
|
||||
.borrow_mut();
|
||||
|
||||
stmt(verifier.deref_mut())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,374 +0,0 @@
|
|||
#[cfg(feature = "binaries")]
|
||||
mod bin {
|
||||
use std::{ops::Range, path::PathBuf, sync::Arc};
|
||||
|
||||
use clap::Parser;
|
||||
use futures::{channel::mpsc, SinkExt, StreamExt};
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use tokio::sync::RwLock;
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
use cuprate_helper::network::Network;
|
||||
|
||||
use cuprate_consensus::{
|
||||
context::{
|
||||
BlockChainContextRequest, BlockChainContextResponse, ContextConfig,
|
||||
UpdateBlockchainCacheData,
|
||||
},
|
||||
initialize_blockchain_context, initialize_verifier,
|
||||
rpc::{cache::ScanningCache, init_rpc_load_balancer, RpcConfig},
|
||||
Database, DatabaseRequest, DatabaseResponse, VerifiedBlockInformation, VerifyBlockRequest,
|
||||
VerifyBlockResponse,
|
||||
};
|
||||
|
||||
const MAX_BLOCKS_IN_RANGE: u64 = 500;
|
||||
const BATCHES_IN_REQUEST: u64 = 3;
|
||||
const MAX_BLOCKS_HEADERS_IN_RANGE: u64 = 1000;
|
||||
|
||||
/// Calls for a batch of blocks, returning the response and the time it took.
|
||||
async fn call_batch<D: Database>(
|
||||
range: Range<u64>,
|
||||
database: D,
|
||||
) -> Result<DatabaseResponse, tower::BoxError> {
|
||||
database
|
||||
.oneshot(DatabaseRequest::BlockBatchInRange(range))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn update_cache_and_context<Ctx>(
|
||||
cache: &RwLock<ScanningCache>,
|
||||
context_updater: &mut Ctx,
|
||||
verified_block_info: VerifiedBlockInformation,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
Ctx: tower::Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
>,
|
||||
{
|
||||
// add the new block to the cache
|
||||
cache.write().await.add_new_block_data(
|
||||
verified_block_info.generated_coins,
|
||||
&verified_block_info.block.miner_tx,
|
||||
&verified_block_info.txs,
|
||||
);
|
||||
// update the chain context svc with the new block
|
||||
context_updater
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::Update(
|
||||
UpdateBlockchainCacheData {
|
||||
new_top_hash: verified_block_info.block_hash,
|
||||
height: verified_block_info.height,
|
||||
timestamp: verified_block_info.block.header.timestamp,
|
||||
weight: verified_block_info.weight,
|
||||
long_term_weight: verified_block_info.long_term_weight,
|
||||
vote: verified_block_info.hf_vote,
|
||||
generated_coins: verified_block_info.generated_coins,
|
||||
cumulative_difficulty: verified_block_info.cumulative_difficulty,
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn call_blocks<D>(
|
||||
mut block_chan: mpsc::Sender<Vec<(Block, Vec<Transaction>)>>,
|
||||
start_height: u64,
|
||||
chain_height: u64,
|
||||
database: D,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
let mut next_fut = tokio::spawn(call_batch(
|
||||
start_height
|
||||
..(start_height + (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST)).min(chain_height),
|
||||
database.clone(),
|
||||
));
|
||||
|
||||
for next_batch_start in (start_height..chain_height)
|
||||
.step_by((MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST) as usize)
|
||||
.skip(1)
|
||||
{
|
||||
// Call the next batch while we handle this batch.
|
||||
let current_fut = std::mem::replace(
|
||||
&mut next_fut,
|
||||
tokio::spawn(call_batch(
|
||||
next_batch_start
|
||||
..(next_batch_start + (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST))
|
||||
.min(chain_height),
|
||||
database.clone(),
|
||||
)),
|
||||
);
|
||||
|
||||
let DatabaseResponse::BlockBatchInRange(blocks) = current_fut.await?? else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
tracing::info!(
|
||||
"Got batch: {:?}, chain height: {}",
|
||||
(next_batch_start - (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST))..(next_batch_start),
|
||||
chain_height
|
||||
);
|
||||
|
||||
block_chan.send(blocks).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn scan_chain<D>(
|
||||
cache: Arc<RwLock<ScanningCache>>,
|
||||
save_file: PathBuf,
|
||||
_rpc_config: Arc<std::sync::RwLock<RpcConfig>>,
|
||||
database: D,
|
||||
net: Network,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
tracing::info!("Beginning chain scan");
|
||||
|
||||
// TODO: when we implement all rules use the RPCs chain height, for now we don't check v2 txs.
|
||||
let chain_height = 3_152_725;
|
||||
|
||||
tracing::info!("scanning to chain height: {}", chain_height);
|
||||
|
||||
let config = match net {
|
||||
Network::Mainnet => ContextConfig::main_net(),
|
||||
Network::Stagenet => ContextConfig::stage_net(),
|
||||
Network::Testnet => ContextConfig::test_net(),
|
||||
};
|
||||
|
||||
let mut ctx_svc = initialize_blockchain_context(config, database.clone()).await?;
|
||||
|
||||
let (mut block_verifier, _) =
|
||||
initialize_verifier(database.clone(), ctx_svc.clone()).await?;
|
||||
|
||||
let start_height = cache.read().await.height;
|
||||
|
||||
let (block_tx, mut incoming_blocks) = mpsc::channel(3);
|
||||
|
||||
tokio::spawn(
|
||||
async move { call_blocks(block_tx, start_height, chain_height, database).await },
|
||||
);
|
||||
|
||||
while let Some(incoming_blocks) = incoming_blocks.next().await {
|
||||
let VerifyBlockResponse::MainChainBatchPrep(blocks, txs) = block_verifier
|
||||
.ready()
|
||||
.await?
|
||||
.call(VerifyBlockRequest::MainChainBatchPrep(incoming_blocks))
|
||||
.await?
|
||||
else {
|
||||
panic!()
|
||||
};
|
||||
|
||||
let mut height;
|
||||
for (block, txs) in blocks.into_iter().zip(txs) {
|
||||
let VerifyBlockResponse::MainChain(verified_block_info) = block_verifier
|
||||
.ready()
|
||||
.await?
|
||||
.call(VerifyBlockRequest::MainChainPrepared(block, txs))
|
||||
.await?
|
||||
else {
|
||||
panic!()
|
||||
};
|
||||
|
||||
height = verified_block_info.height;
|
||||
|
||||
if verified_block_info.height % 5000 == 0 {
|
||||
tracing::info!("saving cache to: {}", save_file.display());
|
||||
cache.write().await.save(&save_file).unwrap();
|
||||
}
|
||||
|
||||
update_cache_and_context(&cache, &mut ctx_svc, verified_block_info).await?;
|
||||
|
||||
if height % 200 == 0 {
|
||||
tracing::info!(
|
||||
"verified blocks: {:?}, chain height: {}",
|
||||
0..height,
|
||||
chain_height
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
struct Args {
|
||||
/// The log level, valid values:
|
||||
/// "off", "error", "warn", "info", "debug", "trace", or a number 0-5.
|
||||
#[arg(short, long, default_value = "info")]
|
||||
log_level: LevelFilter,
|
||||
/// The network we should scan, valid values:
|
||||
/// "mainnet", "testnet", "stagenet".
|
||||
#[arg(short, long, default_value = "mainnet")]
|
||||
network: String,
|
||||
/// A list of RPC nodes we should use.
|
||||
/// Example: <http://xmr-node.cakewallet.com:18081>
|
||||
#[arg(long)]
|
||||
rpc_nodes: Vec<String>,
|
||||
/// Stops the scanner from including the default list of nodes, this is not
|
||||
/// recommended unless you have sufficient self defined nodes with `rpc_nodes`
|
||||
#[arg(long)]
|
||||
dont_use_default_nodes: bool,
|
||||
/// The directory/ folder to save the scanning cache in.
|
||||
/// This will default to your user cache directory.
|
||||
#[arg(long)]
|
||||
cache_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
pub async fn run() {
|
||||
let args = Args::parse();
|
||||
|
||||
if args.dont_use_default_nodes & args.rpc_nodes.is_empty() {
|
||||
panic!("Can't run scanner with no RPC nodes, see `--help` ")
|
||||
}
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_max_level(args.log_level)
|
||||
.init();
|
||||
|
||||
let network = match args.network.as_str() {
|
||||
"mainnet" => Network::Mainnet,
|
||||
"testnet" => Network::Testnet,
|
||||
"stagenet" => Network::Stagenet,
|
||||
_ => panic!("Invalid network, scanner currently only supports mainnet"),
|
||||
};
|
||||
|
||||
let mut file_for_cache = match args.cache_dir {
|
||||
Some(dir) => dir,
|
||||
None => dirs::cache_dir().unwrap(),
|
||||
};
|
||||
|
||||
match network {
|
||||
Network::Mainnet => file_for_cache.push("cuprate_rpc_scanning_cache.bin"),
|
||||
Network::Stagenet => file_for_cache.push("cuprate_rpc_scanning_cache_stage_net.bin"),
|
||||
Network::Testnet => file_for_cache.push("cuprate_rpc_scanning_cache_test_net.bin"),
|
||||
}
|
||||
|
||||
let mut urls = if args.dont_use_default_nodes {
|
||||
vec![]
|
||||
} else {
|
||||
match network {
|
||||
Network::Mainnet => vec![
|
||||
"http://xmr-node.cakewallet.com:18081".to_string(),
|
||||
"https://node.sethforprivacy.com".to_string(),
|
||||
// "http://nodex.monerujo.io:18081".to_string(),
|
||||
"http://nodes.hashvault.pro:18081".to_string(),
|
||||
"http://node.c3pool.com:18081".to_string(),
|
||||
"http://node.trocador.app:18089".to_string(),
|
||||
"http://xmr.lukas.services:18089".to_string(),
|
||||
"http://xmr-node-eu.cakewallet.com:18081".to_string(),
|
||||
"http://68.118.241.70:18089".to_string(),
|
||||
"http://145.239.97.211:18089".to_string(),
|
||||
//
|
||||
"http://xmr-node.cakewallet.com:18081".to_string(),
|
||||
"https://node.sethforprivacy.com".to_string(),
|
||||
// "http://nodex.monerujo.io:18081".to_string(),
|
||||
"http://nodes.hashvault.pro:18081".to_string(),
|
||||
"http://node.c3pool.com:18081".to_string(),
|
||||
"http://node.trocador.app:18089".to_string(),
|
||||
"http://xmr.lukas.services:18089".to_string(),
|
||||
"http://xmr-node-eu.cakewallet.com:18081".to_string(),
|
||||
"http://68.118.241.70:18089".to_string(),
|
||||
"http://145.239.97.211:18089".to_string(),
|
||||
],
|
||||
Network::Testnet => vec![
|
||||
"http://testnet.xmr-tw.org:28081".to_string(),
|
||||
"http://node3.monerodevs.org:28089".to_string(),
|
||||
"http://node.monerodevs.org:28089".to_string(),
|
||||
"http://125.229.105.12:28081".to_string(),
|
||||
"http://node2.monerodevs.org:28089".to_string(),
|
||||
"https://testnet.xmr.ditatompel.com".to_string(),
|
||||
"http://singapore.node.xmr.pm:28081".to_string(),
|
||||
//
|
||||
"http://testnet.xmr-tw.org:28081".to_string(),
|
||||
"http://node3.monerodevs.org:28089".to_string(),
|
||||
"http://node.monerodevs.org:28089".to_string(),
|
||||
"http://125.229.105.12:28081".to_string(),
|
||||
"http://node2.monerodevs.org:28089".to_string(),
|
||||
"https://testnet.xmr.ditatompel.com".to_string(),
|
||||
"http://singapore.node.xmr.pm:28081".to_string(),
|
||||
],
|
||||
Network::Stagenet => vec![
|
||||
"http://125.229.105.12:38081".to_string(),
|
||||
"http://90.189.159.23:38089".to_string(),
|
||||
"http://stagenet.xmr-tw.org:38081".to_string(),
|
||||
"http://node.monerodevs.org:38089".to_string(),
|
||||
"http://stagenet.community.rino.io:38081".to_string(),
|
||||
"http://node2.monerodevs.org:38089".to_string(),
|
||||
"http://node3.monerodevs.org:38089".to_string(),
|
||||
"http://singapore.node.xmr.pm:38081".to_string(),
|
||||
"https://stagenet.xmr.ditatompel.com".to_string(),
|
||||
"http://3.10.182.182:38081".to_string(),
|
||||
//
|
||||
"http://125.229.105.12:38081".to_string(),
|
||||
"http://90.189.159.23:38089".to_string(),
|
||||
"http://stagenet.xmr-tw.org:38081".to_string(),
|
||||
"http://node.monerodevs.org:38089".to_string(),
|
||||
"http://stagenet.community.rino.io:38081".to_string(),
|
||||
"http://node2.monerodevs.org:38089".to_string(),
|
||||
"http://node3.monerodevs.org:38089".to_string(),
|
||||
"http://singapore.node.xmr.pm:38081".to_string(),
|
||||
"https://stagenet.xmr.ditatompel.com".to_string(),
|
||||
"http://3.10.182.182:38081".to_string(),
|
||||
],
|
||||
}
|
||||
};
|
||||
|
||||
urls.extend(args.rpc_nodes.into_iter());
|
||||
|
||||
let rpc_config = RpcConfig::new(MAX_BLOCKS_IN_RANGE, MAX_BLOCKS_HEADERS_IN_RANGE);
|
||||
let rpc_config = Arc::new(std::sync::RwLock::new(rpc_config));
|
||||
|
||||
tracing::info!("Attempting to open cache at: {}", file_for_cache.display());
|
||||
let cache = match ScanningCache::load(&file_for_cache) {
|
||||
Ok(cache) => {
|
||||
tracing::info!("Reloaded from cache, chain height: {}", cache.height);
|
||||
Arc::new(RwLock::new(cache))
|
||||
}
|
||||
Err(_) => {
|
||||
tracing::warn!("Couldn't load from cache starting from scratch");
|
||||
let mut cache = ScanningCache::default();
|
||||
let genesis = monero_consensus::genesis::generate_genesis_block(&network);
|
||||
|
||||
let total_outs = genesis
|
||||
.miner_tx
|
||||
.prefix
|
||||
.outputs
|
||||
.iter()
|
||||
.map(|out| out.amount.unwrap_or(0))
|
||||
.sum::<u64>();
|
||||
|
||||
cache.add_new_block_data(total_outs, &genesis.miner_tx, &[]);
|
||||
Arc::new(RwLock::new(cache))
|
||||
}
|
||||
};
|
||||
|
||||
let rpc = init_rpc_load_balancer(urls, cache.clone(), rpc_config.clone());
|
||||
|
||||
scan_chain(cache, file_for_cache, rpc_config, rpc, network)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "binaries")]
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
bin::run().await
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "binaries"))]
|
||||
fn main() {
|
||||
panic!("must run with feature `binaries`")
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
//! Block Verifier Service.
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
collections::HashMap,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use futures::FutureExt;
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
|
@ -14,8 +14,9 @@ use monero_serai::{
|
|||
};
|
||||
use rayon::prelude::*;
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::instrument;
|
||||
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
blocks::{
|
||||
calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height,
|
||||
randomx_seed_height, BlockError, RandomX,
|
||||
|
@ -23,35 +24,48 @@ use monero_consensus::{
|
|||
miner_tx::MinerTxError,
|
||||
ConsensusError, HardFork,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
||||
|
||||
use crate::{
|
||||
context::{
|
||||
rx_vms::RandomXVM, BlockChainContextRequest, BlockChainContextResponse,
|
||||
RawBlockChainContext,
|
||||
},
|
||||
transactions::{
|
||||
batch_setup_txs, contextual_data, OutputCache, TransactionVerificationData,
|
||||
VerifyTxRequest, VerifyTxResponse,
|
||||
},
|
||||
transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse},
|
||||
Database, ExtendedConsensusError,
|
||||
};
|
||||
|
||||
/// A pre-prepared block with all data needed to verify it, except the block's proof of work.
|
||||
#[derive(Debug)]
|
||||
pub struct PrePreparedBlockExPOW {
|
||||
pub struct PreparedBlockExPow {
|
||||
/// The block.
|
||||
pub block: Block,
|
||||
/// The serialised block's bytes.
|
||||
pub block_blob: Vec<u8>,
|
||||
|
||||
/// The block's hard-fork vote.
|
||||
pub hf_vote: HardFork,
|
||||
/// The block's hard-fork version.
|
||||
pub hf_version: HardFork,
|
||||
|
||||
/// The block's hash.
|
||||
pub block_hash: [u8; 32],
|
||||
/// The height of the block.
|
||||
pub height: u64,
|
||||
|
||||
/// The weight of the block's miner transaction.
|
||||
pub miner_tx_weight: usize,
|
||||
}
|
||||
|
||||
impl PrePreparedBlockExPOW {
|
||||
pub fn new(block: Block) -> Result<PrePreparedBlockExPOW, ConsensusError> {
|
||||
impl PreparedBlockExPow {
|
||||
/// Prepare a new block.
|
||||
///
|
||||
/// # Errors
|
||||
/// This errors if either the `block`'s:
|
||||
/// - Hard-fork values are invalid
|
||||
/// - Miner transaction is missing a miner input
|
||||
pub fn new(block: Block) -> Result<PreparedBlockExPow, ConsensusError> {
|
||||
let (hf_version, hf_vote) =
|
||||
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
||||
|
||||
|
@ -61,7 +75,7 @@ impl PrePreparedBlockExPOW {
|
|||
)))?
|
||||
};
|
||||
|
||||
Ok(PrePreparedBlockExPOW {
|
||||
Ok(PreparedBlockExPow {
|
||||
block_blob: block.serialize(),
|
||||
hf_vote,
|
||||
hf_version,
|
||||
|
@ -75,31 +89,37 @@ impl PrePreparedBlockExPOW {
|
|||
}
|
||||
}
|
||||
|
||||
/// A pre-prepared block with all data needed to verify it.
|
||||
#[derive(Debug)]
|
||||
pub struct PrePreparedBlock {
|
||||
pub struct PreparedBlock {
|
||||
/// The block
|
||||
pub block: Block,
|
||||
/// The serialised blocks bytes
|
||||
pub block_blob: Vec<u8>,
|
||||
|
||||
/// The blocks hf vote
|
||||
pub hf_vote: HardFork,
|
||||
/// The blocks hf version
|
||||
pub hf_version: HardFork,
|
||||
|
||||
/// The blocks hash
|
||||
pub block_hash: [u8; 32],
|
||||
/// The blocks POW hash.
|
||||
pub pow_hash: [u8; 32],
|
||||
|
||||
/// The weight of the blocks miner transaction.
|
||||
pub miner_tx_weight: usize,
|
||||
}
|
||||
|
||||
impl PrePreparedBlock {
|
||||
pub fn new(block: Block) -> Result<PrePreparedBlock, ConsensusError> {
|
||||
struct DummyRX;
|
||||
|
||||
impl RandomX for DummyRX {
|
||||
type Error = ();
|
||||
fn calculate_hash(&self, _: &[u8]) -> Result<[u8; 32], Self::Error> {
|
||||
panic!("DummyRX cant calculate hash")
|
||||
}
|
||||
}
|
||||
|
||||
impl PreparedBlock {
|
||||
/// Creates a new [`PreparedBlock`].
|
||||
///
|
||||
/// The randomX VM must be Some if RX is needed or this will panic.
|
||||
/// The randomX VM must also be initialised with the correct seed.
|
||||
fn new<R: RandomX>(
|
||||
block: Block,
|
||||
randomx_vm: Option<&R>,
|
||||
) -> Result<PreparedBlock, ConsensusError> {
|
||||
let (hf_version, hf_vote) =
|
||||
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
||||
|
||||
|
@ -109,35 +129,37 @@ impl PrePreparedBlock {
|
|||
)))?
|
||||
};
|
||||
|
||||
Ok(PrePreparedBlock {
|
||||
Ok(PreparedBlock {
|
||||
block_blob: block.serialize(),
|
||||
hf_vote,
|
||||
hf_version,
|
||||
|
||||
block_hash: block.hash(),
|
||||
|
||||
pow_hash: calculate_pow_hash::<DummyRX>(
|
||||
None,
|
||||
pow_hash: calculate_pow_hash(
|
||||
randomx_vm,
|
||||
&block.serialize_hashable(),
|
||||
*height,
|
||||
&hf_version,
|
||||
)?,
|
||||
|
||||
miner_tx_weight: block.miner_tx.weight(),
|
||||
block,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_rx<R: RandomX>(
|
||||
block: PrePreparedBlockExPOW,
|
||||
/// Creates a new [`PreparedBlock`] from a [`PreparedBlockExPow`].
|
||||
///
|
||||
/// This function will give an invalid PoW hash if `randomx_vm` is not initialised
|
||||
/// with the correct seed.
|
||||
///
|
||||
/// # Panics
|
||||
/// This function will panic if `randomx_vm` is
|
||||
/// [`None`] even though RandomX is needed.
|
||||
fn new_prepped<R: RandomX>(
|
||||
block: PreparedBlockExPow,
|
||||
randomx_vm: Option<&R>,
|
||||
) -> Result<PrePreparedBlock, ConsensusError> {
|
||||
let Some(Input::Gen(height)) = block.block.miner_tx.prefix.inputs.first() else {
|
||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||
MinerTxError::InputNotOfTypeGen,
|
||||
)))?
|
||||
};
|
||||
|
||||
Ok(PrePreparedBlock {
|
||||
) -> Result<PreparedBlock, ConsensusError> {
|
||||
Ok(PreparedBlock {
|
||||
block_blob: block.block_blob,
|
||||
hf_vote: block.hf_vote,
|
||||
hf_version: block.hf_version,
|
||||
|
@ -146,7 +168,7 @@ impl PrePreparedBlock {
|
|||
pow_hash: calculate_pow_hash(
|
||||
randomx_vm,
|
||||
&block.block.serialize_hashable(),
|
||||
*height,
|
||||
block.height,
|
||||
&block.hf_version,
|
||||
)?,
|
||||
|
||||
|
@ -156,45 +178,46 @@ impl PrePreparedBlock {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VerifiedBlockInformation {
|
||||
pub block: Block,
|
||||
pub hf_vote: HardFork,
|
||||
pub txs: Vec<Arc<TransactionVerificationData>>,
|
||||
pub block_hash: [u8; 32],
|
||||
pub pow_hash: [u8; 32],
|
||||
pub height: u64,
|
||||
pub generated_coins: u64,
|
||||
pub weight: usize,
|
||||
pub long_term_weight: usize,
|
||||
pub cumulative_difficulty: u128,
|
||||
}
|
||||
|
||||
/// A request to verify a block.
|
||||
pub enum VerifyBlockRequest {
|
||||
MainChainBatchPrep(Vec<(Block, Vec<Transaction>)>),
|
||||
/// A request to verify a block.
|
||||
MainChain {
|
||||
block: Block,
|
||||
prepared_txs: Vec<Arc<TransactionVerificationData>>,
|
||||
txs: Vec<Transaction>,
|
||||
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
},
|
||||
/// Verifies a prepared block.
|
||||
MainChainPrepped {
|
||||
/// The already prepared block.
|
||||
block: PreparedBlock,
|
||||
/// The full list of transactions for this block, in the order given in `block`.
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
},
|
||||
/// Batch prepares a list of blocks and transactions for verification.
|
||||
MainChainBatchPrepareBlocks {
|
||||
/// The list of blocks and their transactions (not necessarily in the order given in the block).
|
||||
blocks: Vec<(Block, Vec<Transaction>)>,
|
||||
},
|
||||
MainChainPrepared(PrePreparedBlock, Vec<Arc<TransactionVerificationData>>),
|
||||
}
|
||||
|
||||
/// A response from a verify block request.
|
||||
#[allow(clippy::large_enum_variant)] // The largest variant is most common ([`MainChain`])
|
||||
pub enum VerifyBlockResponse {
|
||||
/// This block is valid.
|
||||
MainChain(VerifiedBlockInformation),
|
||||
MainChainBatchPrep(
|
||||
Vec<PrePreparedBlock>,
|
||||
Vec<Vec<Arc<TransactionVerificationData>>>,
|
||||
),
|
||||
/// A list of prepared blocks for verification, you should call [`VerifyBlockRequest::MainChainPrepped`] on each of the returned
|
||||
/// blocks to fully verify them.
|
||||
MainChainBatchPrepped(Vec<(PreparedBlock, Vec<Arc<TransactionVerificationData>>)>),
|
||||
}
|
||||
|
||||
// TODO: it is probably a bad idea for this to derive clone, if 2 places (RPC, P2P) receive valid but different blocks
|
||||
// then they will both get approved but only one should go to main chain.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockVerifierService<C: Clone, TxV: Clone, D> {
|
||||
/// The block verifier service.
|
||||
pub struct BlockVerifierService<C, TxV, D> {
|
||||
/// The context service.
|
||||
context_svc: C,
|
||||
/// The tx verifier service.
|
||||
tx_verifier_svc: TxV,
|
||||
database: D,
|
||||
/// The database.
|
||||
// Not use yet but will be.
|
||||
_database: D,
|
||||
}
|
||||
|
||||
impl<C, TxV, D> BlockVerifierService<C, TxV, D>
|
||||
|
@ -210,7 +233,8 @@ where
|
|||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
pub fn new(
|
||||
/// Creates a new block verifier.
|
||||
pub(crate) fn new(
|
||||
context_svc: C,
|
||||
tx_verifier_svc: TxV,
|
||||
database: D,
|
||||
|
@ -218,7 +242,7 @@ where
|
|||
BlockVerifierService {
|
||||
context_svc,
|
||||
tx_verifier_svc,
|
||||
database,
|
||||
_database: database,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -255,41 +279,33 @@ where
|
|||
fn call(&mut self, req: VerifyBlockRequest) -> Self::Future {
|
||||
let context_svc = self.context_svc.clone();
|
||||
let tx_verifier_svc = self.tx_verifier_svc.clone();
|
||||
let database = self.database.clone();
|
||||
|
||||
async move {
|
||||
match req {
|
||||
VerifyBlockRequest::MainChain {
|
||||
block,
|
||||
prepared_txs,
|
||||
txs,
|
||||
} => {
|
||||
verify_main_chain_block(block, txs, prepared_txs, context_svc, tx_verifier_svc)
|
||||
verify_main_chain_block(block, prepared_txs, context_svc, tx_verifier_svc).await
|
||||
}
|
||||
VerifyBlockRequest::MainChainBatchPrepareBlocks { blocks } => {
|
||||
batch_prepare_main_chain_block(blocks, context_svc).await
|
||||
}
|
||||
VerifyBlockRequest::MainChainPrepped { block, txs } => {
|
||||
verify_prepped_main_chain_block(block, txs, context_svc, tx_verifier_svc, None)
|
||||
.await
|
||||
}
|
||||
VerifyBlockRequest::MainChainPrepared(prepped_block, txs) => {
|
||||
verify_main_chain_block_prepared(
|
||||
prepped_block,
|
||||
txs,
|
||||
context_svc,
|
||||
tx_verifier_svc,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
VerifyBlockRequest::MainChainBatchPrep(blocks) => {
|
||||
batch_verify_main_chain_block(blocks, context_svc, database).await
|
||||
}
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
async fn batch_verify_main_chain_block<C, D>(
|
||||
/// Batch prepares a list of blocks for verification.
|
||||
#[instrument(level = "debug", name = "batch_prep_blocks", skip_all, fields(amt = blocks.len()))]
|
||||
async fn batch_prepare_main_chain_block<C>(
|
||||
blocks: Vec<(Block, Vec<Transaction>)>,
|
||||
mut context_svc: C,
|
||||
mut database: D,
|
||||
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||
where
|
||||
C: Service<
|
||||
|
@ -299,38 +315,45 @@ where
|
|||
> + Send
|
||||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
||||
|
||||
tracing::debug!("Calculating block hashes.");
|
||||
let blocks: Vec<PrePreparedBlockExPOW> = rayon_spawn_async(|| {
|
||||
let blocks: Vec<PreparedBlockExPow> = rayon_spawn_async(|| {
|
||||
blocks
|
||||
.into_iter()
|
||||
.map(PrePreparedBlockExPOW::new)
|
||||
.map(PreparedBlockExPow::new)
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
})
|
||||
.await?;
|
||||
|
||||
// A Vec of (timestamp, HF) for each block to calculate the expected difficulty for each block.
|
||||
let mut timestamps_hfs = Vec::with_capacity(blocks.len());
|
||||
let mut new_rx_vm = None;
|
||||
|
||||
tracing::debug!("Checking blocks follow each other.");
|
||||
|
||||
// For every block make sure they have the correct height and previous ID
|
||||
for window in blocks.windows(2) {
|
||||
if window[0].block_hash != window[1].block.header.previous
|
||||
|| window[0].height != window[1].height - 1
|
||||
let block_0 = &window[0];
|
||||
let block_1 = &window[1];
|
||||
|
||||
if block_0.block_hash != block_1.block.header.previous
|
||||
|| block_0.height != block_1.height - 1
|
||||
{
|
||||
tracing::debug!("Blocks do not follow each other, verification failed.");
|
||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
|
||||
}
|
||||
|
||||
if is_randomx_seed_height(window[0].height) {
|
||||
new_rx_vm = Some((window[0].height, window[0].block_hash));
|
||||
// Cache any potential RX VM seeds as we may need them for future blocks in the batch.
|
||||
if is_randomx_seed_height(block_0.height) {
|
||||
new_rx_vm = Some((block_0.height, block_0.block_hash));
|
||||
}
|
||||
|
||||
timestamps_hfs.push((window[0].block.header.timestamp, window[0].hf_version))
|
||||
timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version))
|
||||
}
|
||||
|
||||
tracing::debug!("getting blockchain context");
|
||||
// Get the current blockchain context.
|
||||
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||
.ready()
|
||||
.await?
|
||||
|
@ -341,6 +364,7 @@ where
|
|||
panic!("Context service returned wrong response!");
|
||||
};
|
||||
|
||||
// Calculate the expected difficulties for each block in the batch.
|
||||
let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc
|
||||
.ready()
|
||||
.await?
|
||||
|
@ -355,19 +379,28 @@ where
|
|||
|
||||
let context = checked_context.unchecked_blockchain_context().clone();
|
||||
|
||||
// Make sure the blocks follow the main chain.
|
||||
|
||||
if context.chain_height != blocks[0].height {
|
||||
tracing::debug!("Blocks do not follow main chain, verification failed.");
|
||||
|
||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||
MinerTxError::InputsHeightIncorrect,
|
||||
)))?;
|
||||
}
|
||||
|
||||
if context.top_hash != blocks[0].block.header.previous {
|
||||
tracing::debug!("Blocks do not follow main chain, verification failed.");
|
||||
|
||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
|
||||
}
|
||||
|
||||
let mut rx_vms = context.rx_vms;
|
||||
|
||||
// If we have a RX seed in the batch calculate it.
|
||||
if let Some((new_vm_height, new_vm_seed)) = new_rx_vm {
|
||||
tracing::debug!("New randomX seed in batch, initialising VM");
|
||||
|
||||
let new_vm = rayon_spawn_async(move || {
|
||||
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
|
||||
})
|
||||
|
@ -386,175 +419,56 @@ where
|
|||
rx_vms.insert(new_vm_height, new_vm);
|
||||
}
|
||||
|
||||
tracing::debug!("Calculating PoW and prepping transaction");
|
||||
|
||||
let blocks = rayon_spawn_async(move || {
|
||||
blocks
|
||||
.into_par_iter()
|
||||
.zip(difficulties)
|
||||
.map(|(block, difficultly)| {
|
||||
.zip(txs)
|
||||
.map(|((block, difficultly), txs)| {
|
||||
// Calculate the PoW for the block.
|
||||
let height = block.height;
|
||||
let block = PrePreparedBlock::new_rx(
|
||||
let block = PreparedBlock::new_prepped(
|
||||
block,
|
||||
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
|
||||
)?;
|
||||
|
||||
check_block_pow(&block.pow_hash, difficultly)?;
|
||||
Ok(block)
|
||||
// Check the PoW
|
||||
check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?;
|
||||
|
||||
// Now setup the txs.
|
||||
let mut txs = txs
|
||||
.into_par_iter()
|
||||
.map(|tx| {
|
||||
let tx = TransactionVerificationData::new(tx)?;
|
||||
Ok::<_, ConsensusError>((tx.tx_hash, tx))
|
||||
})
|
||||
.collect::<Result<HashMap<_, _>, _>>()?;
|
||||
|
||||
// Order the txs correctly.
|
||||
let mut ordered_txs = Vec::with_capacity(txs.len());
|
||||
|
||||
for tx_hash in &block.block.txs {
|
||||
let tx = txs
|
||||
.remove(tx_hash)
|
||||
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
|
||||
ordered_txs.push(Arc::new(tx));
|
||||
}
|
||||
|
||||
Ok((block, ordered_txs))
|
||||
})
|
||||
.collect::<Result<Vec<_>, ConsensusError>>()
|
||||
.collect::<Result<Vec<_>, ExtendedConsensusError>>()
|
||||
})
|
||||
.await?;
|
||||
|
||||
let txs = batch_setup_txs(
|
||||
txs.into_iter()
|
||||
.zip(blocks.iter().map(|block| block.hf_version))
|
||||
.collect(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut complete_block_idx = 0;
|
||||
|
||||
let mut out_cache = OutputCache::new();
|
||||
|
||||
out_cache
|
||||
.extend_from_block(
|
||||
blocks
|
||||
.iter()
|
||||
.map(|block| &block.block)
|
||||
.zip(txs.iter().map(Vec::as_slice)),
|
||||
&mut database,
|
||||
)
|
||||
.await?;
|
||||
|
||||
for (idx, hf) in blocks
|
||||
.windows(2)
|
||||
.enumerate()
|
||||
.filter(|(_, block)| block[0].hf_version != blocks[1].hf_version)
|
||||
.map(|(i, block)| (i, &block[0].hf_version))
|
||||
{
|
||||
contextual_data::batch_fill_ring_member_info(
|
||||
txs.iter()
|
||||
.take(idx + 1)
|
||||
.skip(complete_block_idx)
|
||||
.flat_map(|txs| txs.iter()),
|
||||
hf,
|
||||
context.re_org_token.clone(),
|
||||
database.clone(),
|
||||
Some(&out_cache),
|
||||
)
|
||||
.await?;
|
||||
|
||||
complete_block_idx = idx + 1;
|
||||
}
|
||||
|
||||
if complete_block_idx != blocks.len() {
|
||||
contextual_data::batch_fill_ring_member_info(
|
||||
txs.iter()
|
||||
.skip(complete_block_idx)
|
||||
.flat_map(|txs| txs.iter()),
|
||||
&blocks.last().unwrap().hf_version,
|
||||
context.re_org_token.clone(),
|
||||
database.clone(),
|
||||
Some(&out_cache),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(VerifyBlockResponse::MainChainBatchPrep(blocks, txs))
|
||||
}
|
||||
|
||||
async fn verify_main_chain_block_prepared<C, TxV>(
|
||||
prepped_block: PrePreparedBlock,
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
context_svc: C,
|
||||
tx_verifier_svc: TxV,
|
||||
context: Option<RawBlockChainContext>,
|
||||
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
> + Send
|
||||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
|
||||
{
|
||||
let context = match context {
|
||||
Some(context) => context,
|
||||
None => {
|
||||
tracing::debug!("getting blockchain context");
|
||||
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||
.oneshot(BlockChainContextRequest::GetContext)
|
||||
.await
|
||||
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||
else {
|
||||
panic!("Context service returned wrong response!");
|
||||
};
|
||||
|
||||
let context = checked_context.unchecked_blockchain_context().clone();
|
||||
|
||||
tracing::debug!("got blockchain context: {:?}", context);
|
||||
context
|
||||
}
|
||||
};
|
||||
|
||||
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
||||
.map_err(ConsensusError::Block)?;
|
||||
|
||||
// Check that the txs included are what we need and that there are not any extra.
|
||||
// Collecting into a HashSet could hide duplicates but we check Key Images are unique so someone would have to find
|
||||
// a hash collision to include duplicate txs here.
|
||||
let mut tx_hashes = txs.iter().map(|tx| &tx.tx_hash).collect::<HashSet<_>>();
|
||||
for tx_hash in &prepped_block.block.txs {
|
||||
if !tx_hashes.remove(tx_hash) {
|
||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||
}
|
||||
}
|
||||
if !tx_hashes.is_empty() {
|
||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||
}
|
||||
|
||||
tx_verifier_svc
|
||||
.oneshot(VerifyTxRequest::Block {
|
||||
txs: txs.clone(),
|
||||
current_chain_height: context.chain_height,
|
||||
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
||||
hf: context.current_hf,
|
||||
re_org_token: context.re_org_token.clone(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let block_weight =
|
||||
prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
||||
let total_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
|
||||
|
||||
let (hf_vote, generated_coins) = check_block(
|
||||
&prepped_block.block,
|
||||
total_fees,
|
||||
block_weight,
|
||||
prepped_block.block_blob.len(),
|
||||
&context.context_to_verify_block,
|
||||
)
|
||||
.map_err(ConsensusError::Block)?;
|
||||
|
||||
Ok(VerifyBlockResponse::MainChain(VerifiedBlockInformation {
|
||||
block_hash: prepped_block.block_hash,
|
||||
block: prepped_block.block,
|
||||
txs,
|
||||
pow_hash: prepped_block.pow_hash,
|
||||
generated_coins,
|
||||
weight: block_weight,
|
||||
height: context.chain_height,
|
||||
long_term_weight: context.next_block_long_term_weight(block_weight),
|
||||
hf_vote,
|
||||
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
|
||||
}))
|
||||
Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks))
|
||||
}
|
||||
|
||||
/// Verifies a prepared block.
|
||||
async fn verify_main_chain_block<C, TxV>(
|
||||
block: Block,
|
||||
txs: Vec<Transaction>,
|
||||
mut prepared_txs: Vec<Arc<TransactionVerificationData>>,
|
||||
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
mut context_svc: C,
|
||||
tx_verifier_svc: TxV,
|
||||
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||
|
@ -568,13 +482,11 @@ where
|
|||
C::Future: Send + 'static,
|
||||
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
|
||||
{
|
||||
tracing::debug!("getting blockchain context");
|
||||
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::GetContext)
|
||||
.await
|
||||
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||
.await?
|
||||
else {
|
||||
panic!("Context service returned wrong response!");
|
||||
};
|
||||
|
@ -582,26 +494,156 @@ where
|
|||
let context = checked_context.unchecked_blockchain_context().clone();
|
||||
tracing::debug!("got blockchain context: {:?}", context);
|
||||
|
||||
let rx_vms = context.rx_vms.clone();
|
||||
let prepped_block = rayon_spawn_async(move || {
|
||||
let prepped_block_ex_pow = PrePreparedBlockExPOW::new(block)?;
|
||||
let height = prepped_block_ex_pow.height;
|
||||
tracing::debug!(
|
||||
"Preparing block for verification, expected height: {}",
|
||||
context.chain_height
|
||||
);
|
||||
|
||||
PrePreparedBlock::new_rx(prepped_block_ex_pow, rx_vms.get(&height).map(AsRef::as_ref))
|
||||
// Set up the block and just pass it to [`verify_prepped_main_chain_block`]
|
||||
|
||||
let rx_vms = context.rx_vms.clone();
|
||||
|
||||
let height = context.chain_height;
|
||||
let prepped_block = rayon_spawn_async(move || {
|
||||
PreparedBlock::new(
|
||||
block,
|
||||
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
|
||||
check_block_pow(&prepped_block.pow_hash, context.cumulative_difficulty)
|
||||
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
||||
.map_err(ConsensusError::Block)?;
|
||||
|
||||
prepared_txs.append(&mut batch_setup_txs(vec![(txs, context.current_hf)]).await?[0]);
|
||||
// Check that the txs included are what we need and that there are not any extra.
|
||||
|
||||
verify_main_chain_block_prepared(
|
||||
let mut ordered_txs = Vec::with_capacity(txs.len());
|
||||
|
||||
tracing::debug!("Ordering transactions for block.");
|
||||
|
||||
if !prepped_block.block.txs.is_empty() {
|
||||
for tx_hash in &prepped_block.block.txs {
|
||||
let tx = txs
|
||||
.remove(tx_hash)
|
||||
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
|
||||
ordered_txs.push(Arc::new(tx));
|
||||
}
|
||||
drop(txs);
|
||||
}
|
||||
|
||||
verify_prepped_main_chain_block(
|
||||
prepped_block,
|
||||
prepared_txs,
|
||||
ordered_txs,
|
||||
context_svc,
|
||||
tx_verifier_svc,
|
||||
Some(context),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn verify_prepped_main_chain_block<C, TxV>(
|
||||
prepped_block: PreparedBlock,
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
context_svc: C,
|
||||
tx_verifier_svc: TxV,
|
||||
cached_context: Option<RawBlockChainContext>,
|
||||
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
> + Send
|
||||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
|
||||
{
|
||||
let context = if let Some(context) = cached_context {
|
||||
context
|
||||
} else {
|
||||
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||
.oneshot(BlockChainContextRequest::GetContext)
|
||||
.await
|
||||
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||
else {
|
||||
panic!("Context service returned wrong response!");
|
||||
};
|
||||
|
||||
let context = checked_context.unchecked_blockchain_context().clone();
|
||||
|
||||
tracing::debug!("got blockchain context: {context:?}");
|
||||
|
||||
context
|
||||
};
|
||||
|
||||
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
|
||||
|
||||
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
||||
.map_err(ConsensusError::Block)?;
|
||||
|
||||
if prepped_block.block.txs.len() != txs.len() {
|
||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||
}
|
||||
|
||||
if !prepped_block.block.txs.is_empty() {
|
||||
for (expected_tx_hash, tx) in prepped_block.block.txs.iter().zip(txs.iter()) {
|
||||
if expected_tx_hash != &tx.tx_hash {
|
||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||
}
|
||||
}
|
||||
|
||||
tx_verifier_svc
|
||||
.oneshot(VerifyTxRequest::Prepped {
|
||||
txs: txs.clone(),
|
||||
current_chain_height: context.chain_height,
|
||||
top_hash: context.top_hash,
|
||||
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
||||
hf: context.current_hf,
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
|
||||
let block_weight =
|
||||
prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
||||
let total_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
|
||||
|
||||
tracing::debug!("Verifying block header.");
|
||||
let (_, generated_coins) = check_block(
|
||||
&prepped_block.block,
|
||||
total_fees,
|
||||
block_weight,
|
||||
prepped_block.block_blob.len(),
|
||||
&context.context_to_verify_block,
|
||||
)
|
||||
.map_err(ConsensusError::Block)?;
|
||||
|
||||
Ok(VerifyBlockResponse::MainChain(VerifiedBlockInformation {
|
||||
block_hash: prepped_block.block_hash,
|
||||
block: prepped_block.block,
|
||||
block_blob: prepped_block.block_blob,
|
||||
txs: txs
|
||||
.into_iter()
|
||||
.map(|tx| {
|
||||
// Note: it would be possible for the transaction verification service to hold onto the tx after the call
|
||||
// if one of txs was invalid and the rest are still in rayon threads.
|
||||
let tx = Arc::into_inner(tx).expect(
|
||||
"Transaction verification service should not hold onto valid transactions.",
|
||||
);
|
||||
|
||||
VerifiedTransactionInformation {
|
||||
tx_blob: tx.tx_blob,
|
||||
tx_weight: tx.tx_weight,
|
||||
fee: tx.fee,
|
||||
tx_hash: tx.tx_hash,
|
||||
tx: tx.tx,
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
pow_hash: prepped_block.pow_hash,
|
||||
generated_coins,
|
||||
weight: block_weight,
|
||||
height: context.chain_height,
|
||||
long_term_weight: context.next_block_long_term_weight(block_weight),
|
||||
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -4,32 +4,30 @@
|
|||
//! This is used during contextual validation, this does not have all the data for contextual validation
|
||||
//! (outputs) for that you will need a [`Database`].
|
||||
//!
|
||||
|
||||
use std::{
|
||||
cmp::min,
|
||||
collections::HashMap,
|
||||
future::Future,
|
||||
ops::DerefMut,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::{
|
||||
lock::{Mutex, OwnedMutexGuard, OwnedMutexLockFuture},
|
||||
FutureExt,
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
use futures::{channel::oneshot, FutureExt};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::PollSender;
|
||||
use tower::Service;
|
||||
|
||||
use monero_consensus::{blocks::ContextToVerifyBlock, current_unix_timestamp, HardFork};
|
||||
use cuprate_consensus_rules::{blocks::ContextToVerifyBlock, current_unix_timestamp, HardFork};
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
|
||||
use crate::{Database, ExtendedConsensusError};
|
||||
|
||||
pub(crate) mod difficulty;
|
||||
pub(crate) mod hardforks;
|
||||
pub(crate) mod rx_vms;
|
||||
pub(crate) mod weight;
|
||||
|
||||
mod task;
|
||||
mod tokens;
|
||||
|
||||
pub use difficulty::DifficultyCacheConfig;
|
||||
|
@ -40,13 +38,18 @@ pub use weight::BlockWeightsCacheConfig;
|
|||
|
||||
const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60;
|
||||
|
||||
/// Config for the context service.
|
||||
pub struct ContextConfig {
|
||||
/// Hard-forks config.
|
||||
pub hard_fork_cfg: HardForkConfig,
|
||||
/// Difficulty config.
|
||||
pub difficulty_cfg: DifficultyCacheConfig,
|
||||
/// Block weight config.
|
||||
pub weights_config: BlockWeightsCacheConfig,
|
||||
}
|
||||
|
||||
impl ContextConfig {
|
||||
/// Get the config for main-net.
|
||||
pub fn main_net() -> ContextConfig {
|
||||
ContextConfig {
|
||||
hard_fork_cfg: HardForkConfig::main_net(),
|
||||
|
@ -55,26 +58,33 @@ impl ContextConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Get the config for stage-net.
|
||||
pub fn stage_net() -> ContextConfig {
|
||||
ContextConfig {
|
||||
hard_fork_cfg: HardForkConfig::stage_net(),
|
||||
// These 2 have the same config as main-net.
|
||||
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
||||
weights_config: BlockWeightsCacheConfig::main_net(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the config for test-net.
|
||||
pub fn test_net() -> ContextConfig {
|
||||
ContextConfig {
|
||||
hard_fork_cfg: HardForkConfig::test_net(),
|
||||
// These 2 have the same config as main-net.
|
||||
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
||||
weights_config: BlockWeightsCacheConfig::main_net(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the blockchain context service.
|
||||
///
|
||||
/// This function will request a lot of data from the database so it may take a while.
|
||||
pub async fn initialize_blockchain_context<D>(
|
||||
cfg: ContextConfig,
|
||||
mut database: D,
|
||||
database: D,
|
||||
) -> Result<
|
||||
impl Service<
|
||||
BlockChainContextRequest,
|
||||
|
@ -93,74 +103,16 @@ where
|
|||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
let ContextConfig {
|
||||
difficulty_cfg,
|
||||
weights_config,
|
||||
hard_fork_cfg,
|
||||
} = cfg;
|
||||
let context_task = task::ContextTask::init_context(cfg, database).await?;
|
||||
|
||||
tracing::debug!("Initialising blockchain context");
|
||||
// TODO: make buffer size configurable.
|
||||
let (tx, rx) = mpsc::channel(15);
|
||||
|
||||
let DatabaseResponse::ChainHeight(chain_height, top_block_hash) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
tokio::spawn(context_task.run(rx));
|
||||
|
||||
let DatabaseResponse::GeneratedCoins(already_generated_coins) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::GeneratedCoins)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
let db = database.clone();
|
||||
let hardfork_state_handle = tokio::spawn(async move {
|
||||
hardforks::HardForkState::init_from_chain_height(chain_height, hard_fork_cfg, db).await
|
||||
});
|
||||
|
||||
let db = database.clone();
|
||||
let difficulty_cache_handle = tokio::spawn(async move {
|
||||
difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db).await
|
||||
});
|
||||
|
||||
let db = database.clone();
|
||||
let weight_cache_handle = tokio::spawn(async move {
|
||||
weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db).await
|
||||
});
|
||||
|
||||
let hardfork_state = hardfork_state_handle.await.unwrap()?;
|
||||
let current_hf = hardfork_state.current_hardfork();
|
||||
|
||||
let db = database.clone();
|
||||
let rx_seed_handle = tokio::spawn(async move {
|
||||
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, ¤t_hf, db).await
|
||||
});
|
||||
|
||||
let context_svc = BlockChainContextService {
|
||||
internal_blockchain_context: Arc::new(
|
||||
InternalBlockChainContext {
|
||||
current_validity_token: ValidityToken::new(),
|
||||
current_reorg_token: ReOrgToken::new(),
|
||||
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
|
||||
weight_cache: weight_cache_handle.await.unwrap()?,
|
||||
rx_seed_cache: rx_seed_handle.await.unwrap()?,
|
||||
hardfork_state,
|
||||
chain_height,
|
||||
already_generated_coins,
|
||||
top_block_hash,
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
lock_state: MutexLockState::Locked,
|
||||
};
|
||||
|
||||
Ok(context_svc)
|
||||
Ok(BlockChainContextService {
|
||||
channel: PollSender::new(tx),
|
||||
})
|
||||
}
|
||||
|
||||
/// Raw blockchain context, gotten from [`BlockChainContext`]. This data may turn invalid so is not ok to keep
|
||||
|
@ -169,12 +121,14 @@ where
|
|||
pub struct RawBlockChainContext {
|
||||
/// The current cumulative difficulty.
|
||||
pub cumulative_difficulty: u128,
|
||||
/// A token which is used to signal if a reorg has happened since creating the token.
|
||||
pub re_org_token: ReOrgToken,
|
||||
/// RandomX VMs, this maps seeds height to VM. Will definitely contain the VM required to calculate the current blocks
|
||||
/// POW hash (if a RX VM is required), may contain more.
|
||||
pub rx_vms: HashMap<u64, Arc<RandomXVM>>,
|
||||
/// Context to verify a block, as needed by [`cuprate-consensus-rules`]
|
||||
pub context_to_verify_block: ContextToVerifyBlock,
|
||||
/// The median long term block weight.
|
||||
median_long_term_weight: usize,
|
||||
/// The top blocks timestamp (will be [`None`] if the top block is the genesis).
|
||||
top_block_timestamp: Option<u64>,
|
||||
}
|
||||
|
||||
|
@ -188,7 +142,7 @@ impl std::ops::Deref for RawBlockChainContext {
|
|||
impl RawBlockChainContext {
|
||||
/// Returns the timestamp the should be used when checking locked outputs.
|
||||
///
|
||||
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions/unlock_time.html#getting-the-current-time>
|
||||
/// ref: <https://cuprate.github.io/monero-book/consensus_rules/transactions/unlock_time.html#getting-the-current-time>
|
||||
pub fn current_adjusted_timestamp_for_time_lock(&self) -> u64 {
|
||||
if self.current_hf < HardFork::V13 || self.median_block_timestamp.is_none() {
|
||||
current_unix_timestamp()
|
||||
|
@ -208,14 +162,7 @@ impl RawBlockChainContext {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn block_blob_size_limit(&self) -> usize {
|
||||
self.effective_median_weight * 2 - 600
|
||||
}
|
||||
|
||||
pub fn block_weight_limit(&self) -> usize {
|
||||
self.median_weight_for_block_reward * 2
|
||||
}
|
||||
|
||||
/// Returns the next blocks long term weight from it's block weight.
|
||||
pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize {
|
||||
weight::calculate_block_long_term_weight(
|
||||
&self.current_hf,
|
||||
|
@ -259,20 +206,31 @@ impl BlockChainContext {
|
|||
}
|
||||
}
|
||||
|
||||
/// Data needed from a new block to add it to the context cache.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UpdateBlockchainCacheData {
|
||||
pub new_top_hash: [u8; 32],
|
||||
pub struct NewBlockData {
|
||||
/// The blocks hash.
|
||||
pub block_hash: [u8; 32],
|
||||
/// The blocks height.
|
||||
pub height: u64,
|
||||
/// The blocks timestamp.
|
||||
pub timestamp: u64,
|
||||
/// The blocks weight.
|
||||
pub weight: usize,
|
||||
/// long term weight of this block.
|
||||
pub long_term_weight: usize,
|
||||
/// The coins generated by this block.
|
||||
pub generated_coins: u64,
|
||||
/// The blocks hf vote.
|
||||
pub vote: HardFork,
|
||||
/// The cumulative difficulty of the chain.
|
||||
pub cumulative_difficulty: u128,
|
||||
}
|
||||
|
||||
/// A request to the blockchain context cache.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum BlockChainContextRequest {
|
||||
/// Get the current blockchain context.
|
||||
GetContext,
|
||||
/// Get the next difficulties for these blocks.
|
||||
///
|
||||
|
@ -280,49 +238,30 @@ pub enum BlockChainContextRequest {
|
|||
///
|
||||
/// The number of difficulties returned will be one more than the number of timestamps/ hfs.
|
||||
BatchGetDifficulties(Vec<(u64, HardFork)>),
|
||||
/// Add a VM that has been created outside of the blockchain context service to the blockchain context.
|
||||
/// This is useful when batch calculating POW as you may need to create a new VM if you batch a lot of blocks together,
|
||||
/// it would be wasteful to then not give this VM to the context service to then use when it needs to init a VM with the same
|
||||
/// seed.
|
||||
///
|
||||
/// This should include the seed used to init this VM and the VM.
|
||||
NewRXVM(([u8; 32], Arc<RandomXVM>)),
|
||||
Update(UpdateBlockchainCacheData),
|
||||
/// A request to add a new block to the cache.
|
||||
Update(NewBlockData),
|
||||
}
|
||||
|
||||
pub enum BlockChainContextResponse {
|
||||
/// Blockchain context response.
|
||||
Context(BlockChainContext),
|
||||
/// A list of difficulties.
|
||||
BatchDifficulties(Vec<u128>),
|
||||
/// Ok response.
|
||||
Ok,
|
||||
}
|
||||
struct InternalBlockChainContext {
|
||||
/// A token used to invalidate previous contexts when a new
|
||||
/// block is added to the chain.
|
||||
current_validity_token: ValidityToken,
|
||||
/// A token which is used to signal a reorg has happened.
|
||||
current_reorg_token: ReOrgToken,
|
||||
|
||||
difficulty_cache: difficulty::DifficultyCache,
|
||||
weight_cache: weight::BlockWeightsCache,
|
||||
rx_seed_cache: rx_vms::RandomXVMCache,
|
||||
hardfork_state: hardforks::HardForkState,
|
||||
|
||||
chain_height: u64,
|
||||
top_block_hash: [u8; 32],
|
||||
already_generated_coins: u64,
|
||||
}
|
||||
|
||||
enum MutexLockState {
|
||||
Locked,
|
||||
Acquiring(OwnedMutexLockFuture<InternalBlockChainContext>),
|
||||
Acquired(OwnedMutexGuard<InternalBlockChainContext>),
|
||||
}
|
||||
/// The blockchain context service.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockChainContextService {
|
||||
internal_blockchain_context: Arc<Mutex<InternalBlockChainContext>>,
|
||||
lock_state: MutexLockState,
|
||||
}
|
||||
|
||||
impl Clone for BlockChainContextService {
|
||||
fn clone(&self) -> Self {
|
||||
BlockChainContextService {
|
||||
internal_blockchain_context: self.internal_blockchain_context.clone(),
|
||||
lock_state: MutexLockState::Locked,
|
||||
}
|
||||
}
|
||||
channel: PollSender<task::ContextTaskRequest>,
|
||||
}
|
||||
|
||||
impl Service<BlockChainContextRequest> for BlockChainContextService {
|
||||
|
@ -332,111 +271,25 @@ impl Service<BlockChainContextRequest> for BlockChainContextService {
|
|||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
loop {
|
||||
match &mut self.lock_state {
|
||||
MutexLockState::Locked => {
|
||||
self.lock_state = MutexLockState::Acquiring(
|
||||
Arc::clone(&self.internal_blockchain_context).lock_owned(),
|
||||
)
|
||||
}
|
||||
MutexLockState::Acquiring(lock) => {
|
||||
self.lock_state = MutexLockState::Acquired(futures::ready!(lock.poll_unpin(cx)))
|
||||
}
|
||||
MutexLockState::Acquired(_) => return Poll::Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
self.channel
|
||||
.poll_reserve(cx)
|
||||
.map_err(|_| "Context service channel closed".into())
|
||||
}
|
||||
|
||||
fn call(&mut self, req: BlockChainContextRequest) -> Self::Future {
|
||||
let MutexLockState::Acquired(mut internal_blockchain_context) =
|
||||
std::mem::replace(&mut self.lock_state, MutexLockState::Locked)
|
||||
else {
|
||||
panic!("poll_ready() was not called first!")
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let req = task::ContextTaskRequest {
|
||||
req,
|
||||
tx,
|
||||
span: tracing::Span::current(),
|
||||
};
|
||||
|
||||
let res = self.channel.send_item(req);
|
||||
|
||||
async move {
|
||||
let InternalBlockChainContext {
|
||||
current_validity_token,
|
||||
current_reorg_token,
|
||||
difficulty_cache,
|
||||
weight_cache,
|
||||
rx_seed_cache,
|
||||
hardfork_state,
|
||||
chain_height,
|
||||
top_block_hash,
|
||||
already_generated_coins,
|
||||
} = internal_blockchain_context.deref_mut();
|
||||
|
||||
let res = match req {
|
||||
BlockChainContextRequest::GetContext => {
|
||||
let current_hf = hardfork_state.current_hardfork();
|
||||
|
||||
BlockChainContextResponse::Context(BlockChainContext {
|
||||
validity_token: current_validity_token.clone(),
|
||||
raw: RawBlockChainContext {
|
||||
context_to_verify_block: ContextToVerifyBlock {
|
||||
median_weight_for_block_reward: weight_cache
|
||||
.median_for_block_reward(¤t_hf),
|
||||
effective_median_weight: weight_cache
|
||||
.effective_median_block_weight(¤t_hf),
|
||||
top_hash: *top_block_hash,
|
||||
median_block_timestamp: difficulty_cache.median_timestamp(
|
||||
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
|
||||
),
|
||||
chain_height: *chain_height,
|
||||
current_hf,
|
||||
next_difficulty: difficulty_cache.next_difficulty(¤t_hf),
|
||||
already_generated_coins: *already_generated_coins,
|
||||
},
|
||||
rx_vms: rx_seed_cache.get_vms(),
|
||||
cumulative_difficulty: difficulty_cache.cumulative_difficulty(),
|
||||
median_long_term_weight: weight_cache.median_long_term_weight(),
|
||||
top_block_timestamp: difficulty_cache.top_block_timestamp(),
|
||||
re_org_token: current_reorg_token.clone(),
|
||||
},
|
||||
})
|
||||
}
|
||||
BlockChainContextRequest::BatchGetDifficulties(blocks) => {
|
||||
let next_diffs = difficulty_cache
|
||||
.next_difficulties(blocks, &hardfork_state.current_hardfork());
|
||||
BlockChainContextResponse::BatchDifficulties(next_diffs)
|
||||
}
|
||||
BlockChainContextRequest::NewRXVM(vm) => {
|
||||
rx_seed_cache.add_vm(vm);
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
BlockChainContextRequest::Update(new) => {
|
||||
// Cancel the validity token and replace it with a new one.
|
||||
std::mem::replace(current_validity_token, ValidityToken::new())
|
||||
.set_data_invalid();
|
||||
|
||||
difficulty_cache.new_block(
|
||||
new.height,
|
||||
new.timestamp,
|
||||
new.cumulative_difficulty,
|
||||
);
|
||||
|
||||
weight_cache.new_block(new.height, new.weight, new.long_term_weight);
|
||||
|
||||
hardfork_state.new_block(new.vote, new.height);
|
||||
|
||||
rx_seed_cache
|
||||
.new_block(
|
||||
new.height,
|
||||
&new.new_top_hash,
|
||||
&hardfork_state.current_hardfork(),
|
||||
)
|
||||
.await;
|
||||
|
||||
*chain_height = new.height + 1;
|
||||
*top_block_hash = new.new_top_hash;
|
||||
*already_generated_coins =
|
||||
already_generated_coins.saturating_add(new.generated_coins);
|
||||
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
res.map_err(|_| "Context service closed.")?;
|
||||
rx.await.expect("Oneshot closed without response!")
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
|
|
@ -1,11 +1,20 @@
|
|||
//! Difficulty Module
|
||||
//!
|
||||
//! This module handles keeping track of the data required to calculate block difficulty.
|
||||
//! This data is currently the cumulative difficulty of each block and its timestamp.
|
||||
//!
|
||||
//! The timestamps are also used in other consensus rules so instead of duplicating the same
|
||||
//! data in a different cache, the timestamps needed are retrieved from here.
|
||||
//!
|
||||
use std::{collections::VecDeque, ops::Range};
|
||||
|
||||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_helper::num::median;
|
||||
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError, HardFork};
|
||||
use crate::{Database, ExtendedConsensusError, HardFork};
|
||||
|
||||
/// The amount of blocks we account for to calculate difficulty
|
||||
const DIFFICULTY_WINDOW: usize = 720;
|
||||
|
@ -27,6 +36,10 @@ pub struct DifficultyCacheConfig {
|
|||
}
|
||||
|
||||
impl DifficultyCacheConfig {
|
||||
/// Create a new difficulty cache config.
|
||||
///
|
||||
/// # Notes
|
||||
/// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead.
|
||||
pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig {
|
||||
DifficultyCacheConfig { window, cut, lag }
|
||||
}
|
||||
|
@ -41,7 +54,9 @@ impl DifficultyCacheConfig {
|
|||
self.window - 2 * self.cut
|
||||
}
|
||||
|
||||
pub fn main_net() -> DifficultyCacheConfig {
|
||||
/// Returns the config needed for [`Mainnet`](cuprate_helper::network::Network::Mainnet). This is also the
|
||||
/// config for all other current networks.
|
||||
pub const fn main_net() -> DifficultyCacheConfig {
|
||||
DifficultyCacheConfig {
|
||||
window: DIFFICULTY_WINDOW,
|
||||
cut: DIFFICULTY_CUT,
|
||||
|
@ -66,6 +81,7 @@ pub(crate) struct DifficultyCache {
|
|||
}
|
||||
|
||||
impl DifficultyCache {
|
||||
/// Initialize the difficulty cache from the specified chain height.
|
||||
#[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: u64,
|
||||
|
@ -100,13 +116,19 @@ impl DifficultyCache {
|
|||
Ok(diff)
|
||||
}
|
||||
|
||||
/// Add a new block to the difficulty cache.
|
||||
pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) {
|
||||
assert_eq!(self.last_accounted_height + 1, height);
|
||||
self.last_accounted_height += 1;
|
||||
|
||||
tracing::debug!(
|
||||
"Accounting for new blocks timestamp ({timestamp}) and cumulative_difficulty ({cumulative_difficulty})",
|
||||
);
|
||||
|
||||
self.timestamps.push_back(timestamp);
|
||||
self.cumulative_difficulties
|
||||
.push_back(cumulative_difficulty);
|
||||
|
||||
if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() {
|
||||
self.timestamps.pop_front();
|
||||
self.cumulative_difficulties.pop_front();
|
||||
|
@ -117,47 +139,28 @@ impl DifficultyCache {
|
|||
///
|
||||
/// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty
|
||||
pub fn next_difficulty(&self, hf: &HardFork) -> u128 {
|
||||
if self.timestamps.len() <= 1 {
|
||||
return 1;
|
||||
}
|
||||
|
||||
let mut timestamps = self.timestamps.clone();
|
||||
if timestamps.len() > self.config.window {
|
||||
// remove the lag.
|
||||
timestamps.drain(self.config.window..);
|
||||
};
|
||||
let timestamps_slice = timestamps.make_contiguous();
|
||||
|
||||
let (window_start, window_end) = get_window_start_and_end(
|
||||
timestamps_slice.len(),
|
||||
self.config.accounted_window_len(),
|
||||
self.config.window,
|
||||
);
|
||||
|
||||
// We don't sort the whole timestamp list
|
||||
let mut time_span = u128::from(
|
||||
*timestamps_slice.select_nth_unstable(window_end - 1).1
|
||||
- *timestamps_slice.select_nth_unstable(window_start).1,
|
||||
);
|
||||
|
||||
let windowed_work = self.cumulative_difficulties[window_end - 1]
|
||||
- self.cumulative_difficulties[window_start];
|
||||
|
||||
if time_span == 0 {
|
||||
time_span = 1;
|
||||
}
|
||||
|
||||
// TODO: do checked operations here and unwrap so we don't silently overflow?
|
||||
(windowed_work * hf.block_time().as_secs() as u128 + time_span - 1) / time_span
|
||||
next_difficulty(
|
||||
&self.config,
|
||||
&self.timestamps,
|
||||
&self.cumulative_difficulties,
|
||||
hf,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the difficulties for multiple next blocks, using the provided timestamps and hard-forks when needed.
|
||||
///
|
||||
/// The first difficulty will be the same as the difficulty from [`DifficultyCache::next_difficulty`] after that the
|
||||
/// first timestamp and hf will be applied to the cache and the difficulty from that will be added to the list.
|
||||
///
|
||||
/// After all timestamps and hfs have been dealt with the cache will be returned back to its original state and the
|
||||
/// difficulties will be returned.
|
||||
pub fn next_difficulties(
|
||||
&mut self,
|
||||
&self,
|
||||
blocks: Vec<(u64, HardFork)>,
|
||||
current_hf: &HardFork,
|
||||
) -> Vec<u128> {
|
||||
let new_timestamps_len = blocks.len();
|
||||
let initial_len = self.timestamps.len();
|
||||
let mut timestamps = self.timestamps.clone();
|
||||
let mut cumulative_difficulties = self.cumulative_difficulties.clone();
|
||||
|
||||
let mut difficulties = Vec::with_capacity(blocks.len() + 1);
|
||||
|
||||
|
@ -166,30 +169,24 @@ impl DifficultyCache {
|
|||
let mut diff_info_popped = Vec::new();
|
||||
|
||||
for (new_timestamp, hf) in blocks {
|
||||
self.timestamps.push_back(new_timestamp);
|
||||
self.cumulative_difficulties
|
||||
.push_back(self.cumulative_difficulty() + *difficulties.last().unwrap());
|
||||
if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() {
|
||||
timestamps.push_back(new_timestamp);
|
||||
|
||||
let last_cum_diff = cumulative_difficulties.back().copied().unwrap_or(1);
|
||||
cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap());
|
||||
|
||||
if u64::try_from(timestamps.len()).unwrap() > self.config.total_block_count() {
|
||||
diff_info_popped.push((
|
||||
self.timestamps.pop_front().unwrap(),
|
||||
self.cumulative_difficulties.pop_front().unwrap(),
|
||||
timestamps.pop_front().unwrap(),
|
||||
cumulative_difficulties.pop_front().unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
difficulties.push(self.next_difficulty(&hf));
|
||||
}
|
||||
|
||||
self.cumulative_difficulties.drain(
|
||||
self.cumulative_difficulties
|
||||
.len()
|
||||
.saturating_sub(new_timestamps_len)..,
|
||||
);
|
||||
self.timestamps
|
||||
.drain(self.timestamps.len().saturating_sub(new_timestamps_len)..);
|
||||
|
||||
for (timestamp, cum_dif) in diff_info_popped.into_iter().take(initial_len).rev() {
|
||||
self.timestamps.push_front(timestamp);
|
||||
self.cumulative_difficulties.push_front(cum_dif);
|
||||
difficulties.push(next_difficulty(
|
||||
&self.config,
|
||||
×tamps,
|
||||
&cumulative_difficulties,
|
||||
&hf,
|
||||
));
|
||||
}
|
||||
|
||||
difficulties
|
||||
|
@ -227,11 +224,55 @@ impl DifficultyCache {
|
|||
self.cumulative_difficulties.back().copied().unwrap_or(1)
|
||||
}
|
||||
|
||||
/// Returns the top block's timestamp, returns [`None`] if the top block is the genesis block.
|
||||
pub fn top_block_timestamp(&self) -> Option<u64> {
|
||||
self.timestamps.back().copied()
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculates the next difficulty with the inputted config/timestamps/cumulative_difficulties.
|
||||
fn next_difficulty(
|
||||
config: &DifficultyCacheConfig,
|
||||
timestamps: &VecDeque<u64>,
|
||||
cumulative_difficulties: &VecDeque<u128>,
|
||||
hf: &HardFork,
|
||||
) -> u128 {
|
||||
if timestamps.len() <= 1 {
|
||||
return 1;
|
||||
}
|
||||
|
||||
let mut timestamps = timestamps.clone();
|
||||
|
||||
if timestamps.len() > config.window {
|
||||
// remove the lag.
|
||||
timestamps.drain(config.window..);
|
||||
};
|
||||
let timestamps_slice = timestamps.make_contiguous();
|
||||
|
||||
let (window_start, window_end) = get_window_start_and_end(
|
||||
timestamps_slice.len(),
|
||||
config.accounted_window_len(),
|
||||
config.window,
|
||||
);
|
||||
|
||||
// We don't sort the whole timestamp list
|
||||
let mut time_span = u128::from(
|
||||
*timestamps_slice.select_nth_unstable(window_end - 1).1
|
||||
- *timestamps_slice.select_nth_unstable(window_start).1,
|
||||
);
|
||||
|
||||
let windowed_work =
|
||||
cumulative_difficulties[window_end - 1] - cumulative_difficulties[window_start];
|
||||
|
||||
if time_span == 0 {
|
||||
time_span = 1;
|
||||
}
|
||||
|
||||
// TODO: do checked operations here and unwrap so we don't silently overflow?
|
||||
(windowed_work * hf.block_time().as_secs() as u128 + time_span - 1) / time_span
|
||||
}
|
||||
|
||||
/// Get the start and end of the window to calculate difficulty.
|
||||
fn get_window_start_and_end(
|
||||
window_len: usize,
|
||||
accounted_window: usize,
|
||||
|
@ -253,6 +294,7 @@ fn get_window_start_and_end(
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns the timestamps and cumulative difficulty for the blocks with heights in the specified range.
|
||||
#[instrument(name = "get_blocks_timestamps", skip(database), level = "info")]
|
||||
async fn get_blocks_in_pow_info<D: Database + Clone>(
|
||||
database: D,
|
||||
|
@ -260,8 +302,8 @@ async fn get_blocks_in_pow_info<D: Database + Clone>(
|
|||
) -> Result<(VecDeque<u64>, VecDeque<u128>), ExtendedConsensusError> {
|
||||
tracing::info!("Getting blocks timestamps");
|
||||
|
||||
let DatabaseResponse::BlockExtendedHeaderInRange(ext_header) = database
|
||||
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(block_heights))
|
||||
let BCResponse::BlockExtendedHeaderInRange(ext_header) = database
|
||||
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response");
|
||||
|
|
|
@ -3,11 +3,14 @@ use std::ops::Range;
|
|||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use monero_consensus::{HFVotes, HFsInfo, HardFork};
|
||||
use cuprate_consensus_rules::{HFVotes, HFsInfo, HardFork};
|
||||
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
|
||||
use crate::{Database, ExtendedConsensusError};
|
||||
|
||||
// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork
|
||||
/// The default amount of hard-fork votes to track to decide on activation of a hard-fork.
|
||||
///
|
||||
/// ref: <https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork>
|
||||
const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week
|
||||
|
||||
/// Configuration for hard-forks.
|
||||
|
@ -21,6 +24,7 @@ pub struct HardForkConfig {
|
|||
}
|
||||
|
||||
impl HardForkConfig {
|
||||
/// Config for main-net.
|
||||
pub const fn main_net() -> HardForkConfig {
|
||||
Self {
|
||||
info: HFsInfo::main_net(),
|
||||
|
@ -28,6 +32,7 @@ impl HardForkConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Config for stage-net.
|
||||
pub const fn stage_net() -> HardForkConfig {
|
||||
Self {
|
||||
info: HFsInfo::stage_net(),
|
||||
|
@ -35,6 +40,7 @@ impl HardForkConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Config for test-net.
|
||||
pub const fn test_net() -> HardForkConfig {
|
||||
Self {
|
||||
info: HFsInfo::test_net(),
|
||||
|
@ -46,15 +52,20 @@ impl HardForkConfig {
|
|||
/// A struct that keeps track of the current hard-fork and current votes.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HardForkState {
|
||||
/// The current active hard-fork.
|
||||
pub(crate) current_hardfork: HardFork,
|
||||
|
||||
/// The hard-fork config.
|
||||
pub(crate) config: HardForkConfig,
|
||||
/// The votes in the current window.
|
||||
pub(crate) votes: HFVotes,
|
||||
|
||||
/// The last block height accounted for.
|
||||
pub(crate) last_height: u64,
|
||||
}
|
||||
|
||||
impl HardForkState {
|
||||
/// Initialize the [`HardForkState`] from the specified chain height.
|
||||
#[instrument(name = "init_hardfork_state", skip(config, database), level = "info")]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: u64,
|
||||
|
@ -76,16 +87,17 @@ impl HardForkState {
|
|||
debug_assert_eq!(votes.total_votes(), config.window)
|
||||
}
|
||||
|
||||
let DatabaseResponse::BlockExtendedHeader(ext_header) = database
|
||||
let BCResponse::BlockExtendedHeader(ext_header) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::BlockExtendedHeader(chain_height - 1))
|
||||
.call(BCReadRequest::BlockExtendedHeader(chain_height - 1))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
let current_hardfork = ext_header.version;
|
||||
let current_hardfork =
|
||||
HardFork::from_version(ext_header.version).expect("Stored block has invalid hardfork");
|
||||
|
||||
let mut hfs = HardForkState {
|
||||
config,
|
||||
|
@ -105,7 +117,10 @@ impl HardForkState {
|
|||
Ok(hfs)
|
||||
}
|
||||
|
||||
/// Add a new block to the cache.
|
||||
pub fn new_block(&mut self, vote: HardFork, height: u64) {
|
||||
// We don't _need_ to take in `height` but it's for safety, so we don't silently loose track
|
||||
// of blocks.
|
||||
assert_eq!(self.last_height + 1, height);
|
||||
self.last_height += 1;
|
||||
|
||||
|
@ -115,6 +130,7 @@ impl HardForkState {
|
|||
vote
|
||||
);
|
||||
|
||||
// This function remove votes outside the window as well.
|
||||
self.votes.add_vote_for_hf(&vote);
|
||||
|
||||
if height > self.config.window {
|
||||
|
@ -136,11 +152,13 @@ impl HardForkState {
|
|||
);
|
||||
}
|
||||
|
||||
/// Returns the current hard-fork.
|
||||
pub fn current_hardfork(&self) -> HardFork {
|
||||
self.current_hardfork
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the block votes for blocks in the specified range.
|
||||
#[instrument(name = "get_votes", skip(database))]
|
||||
async fn get_votes_in_range<D: Database>(
|
||||
database: D,
|
||||
|
@ -149,15 +167,15 @@ async fn get_votes_in_range<D: Database>(
|
|||
) -> Result<HFVotes, ExtendedConsensusError> {
|
||||
let mut votes = HFVotes::new(window_size);
|
||||
|
||||
let DatabaseResponse::BlockExtendedHeaderInRange(vote_list) = database
|
||||
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(block_heights))
|
||||
let BCResponse::BlockExtendedHeaderInRange(vote_list) = database
|
||||
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
for hf_info in vote_list.into_iter() {
|
||||
votes.add_vote_for_hf(&hf_info.vote);
|
||||
votes.add_vote_for_hf(&HardFork::from_vote(hf_info.vote));
|
||||
}
|
||||
|
||||
Ok(votes)
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
//! RandomX VM Cache
|
||||
//!
|
||||
//! This module keeps track of the RandomX VM to calculate the next blocks PoW, if the block needs a randomX VM and potentially
|
||||
//! more VMs around this height.
|
||||
//!
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
sync::Arc,
|
||||
|
@ -8,26 +13,35 @@ use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner};
|
|||
use rayon::prelude::*;
|
||||
use thread_local::ThreadLocal;
|
||||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
blocks::{is_randomx_seed_height, RandomX, RX_SEEDHASH_EPOCH_BLOCKS},
|
||||
HardFork,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
|
||||
use crate::{Database, ExtendedConsensusError};
|
||||
|
||||
/// The amount of randomX VMs to keep in the cache.
|
||||
const RX_SEEDS_CACHED: usize = 2;
|
||||
|
||||
/// A multithreaded randomX VM.
|
||||
#[derive(Debug)]
|
||||
pub struct RandomXVM {
|
||||
/// These RandomX VMs all share the same cache.
|
||||
vms: ThreadLocal<VMInner>,
|
||||
/// The RandomX cache.
|
||||
cache: RandomXCache,
|
||||
/// The flags used to start the RandomX VMs.
|
||||
flags: RandomXFlag,
|
||||
}
|
||||
|
||||
impl RandomXVM {
|
||||
/// Create a new multithreaded randomX VM with the provided seed.
|
||||
pub fn new(seed: &[u8; 32]) -> Result<Self, RandomXError> {
|
||||
// TODO: allow passing in flags.
|
||||
let flags = RandomXFlag::get_recommended_flags();
|
||||
|
||||
let cache = RandomXCache::new(flags, seed.as_slice())?;
|
||||
|
@ -51,15 +65,21 @@ impl RandomX for RandomXVM {
|
|||
}
|
||||
}
|
||||
|
||||
/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a
|
||||
/// couple more around this VM.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RandomXVMCache {
|
||||
/// The top [`RX_SEEDS_CACHED`] RX seeds.
|
||||
pub(crate) seeds: VecDeque<(u64, [u8; 32])>,
|
||||
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
|
||||
pub(crate) vms: HashMap<u64, Arc<RandomXVM>>,
|
||||
|
||||
/// A single cached VM that was given to us from a part of Cuprate.
|
||||
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>,
|
||||
}
|
||||
|
||||
impl RandomXVMCache {
|
||||
#[instrument(name = "init_rx_vm_cache", level = "info", skip(database))]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: u64,
|
||||
hf: &HardFork,
|
||||
|
@ -68,9 +88,12 @@ impl RandomXVMCache {
|
|||
let seed_heights = get_last_rx_seed_heights(chain_height - 1, RX_SEEDS_CACHED);
|
||||
let seed_hashes = get_block_hashes(seed_heights.clone(), database).await?;
|
||||
|
||||
tracing::debug!("last {RX_SEEDS_CACHED} randomX seed heights: {seed_heights:?}",);
|
||||
|
||||
let seeds: VecDeque<(u64, [u8; 32])> = seed_heights.into_iter().zip(seed_hashes).collect();
|
||||
|
||||
let vms = if hf >= &HardFork::V12 {
|
||||
tracing::debug!("Creating RandomX VMs");
|
||||
let seeds_clone = seeds.clone();
|
||||
rayon_spawn_async(move || {
|
||||
seeds_clone
|
||||
|
@ -85,6 +108,7 @@ impl RandomXVMCache {
|
|||
})
|
||||
.await
|
||||
} else {
|
||||
tracing::debug!("We are before hard-fork 12 randomX VMs are not needed.");
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
|
@ -95,18 +119,25 @@ impl RandomXVMCache {
|
|||
})
|
||||
}
|
||||
|
||||
/// Add a randomX VM to the cache, with the seed it was created with.
|
||||
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVM>)) {
|
||||
self.cached_vm.replace(vm);
|
||||
}
|
||||
|
||||
/// Get the RandomX VMs.
|
||||
pub fn get_vms(&self) -> HashMap<u64, Arc<RandomXVM>> {
|
||||
self.vms.clone()
|
||||
}
|
||||
|
||||
/// Add a new block to the VM cache.
|
||||
///
|
||||
/// hash is the block hash not the blocks PoW hash.
|
||||
pub async fn new_block(&mut self, height: u64, hash: &[u8; 32], hf: &HardFork) {
|
||||
let should_make_vms = hf >= &HardFork::V12;
|
||||
if should_make_vms && self.vms.len() != self.seeds.len() {
|
||||
// this will only happen when syncing and rx activates.
|
||||
tracing::debug!("RandomX has activated, initialising VMs");
|
||||
|
||||
let seeds_clone = self.seeds.clone();
|
||||
self.vms = rayon_spawn_async(move || {
|
||||
seeds_clone
|
||||
|
@ -123,12 +154,21 @@ impl RandomXVMCache {
|
|||
}
|
||||
|
||||
if is_randomx_seed_height(height) {
|
||||
tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",);
|
||||
|
||||
self.seeds.push_front((height, *hash));
|
||||
|
||||
if should_make_vms {
|
||||
let new_vm = 'new_vm_block: {
|
||||
tracing::debug!(
|
||||
"Past hard-fork 12 initializing VM for seed: {}",
|
||||
hex::encode(hash)
|
||||
);
|
||||
|
||||
// Check if we have been given the RX VM from another part of Cuprate.
|
||||
if let Some((cached_hash, cached_vm)) = self.cached_vm.take() {
|
||||
if &cached_hash == hash {
|
||||
tracing::debug!("VM was already created.");
|
||||
break 'new_vm_block cached_vm;
|
||||
}
|
||||
};
|
||||
|
@ -153,6 +193,8 @@ impl RandomXVMCache {
|
|||
}
|
||||
}
|
||||
|
||||
/// Get the last `amount` of RX seeds, the top height returned here will not necessarily be the RX VM for the top block
|
||||
/// in the chain as VMs include some lag before a seed activates.
|
||||
pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) -> Vec<u64> {
|
||||
let mut seeds = Vec::with_capacity(amount);
|
||||
if is_randomx_seed_height(last_height) {
|
||||
|
@ -174,6 +216,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize)
|
|||
seeds
|
||||
}
|
||||
|
||||
/// Gets the block hashes for the heights specified.
|
||||
async fn get_block_hashes<D: Database + Clone>(
|
||||
heights: Vec<u64>,
|
||||
database: D,
|
||||
|
@ -183,10 +226,8 @@ async fn get_block_hashes<D: Database + Clone>(
|
|||
for height in heights {
|
||||
let db = database.clone();
|
||||
fut.push_back(async move {
|
||||
let DatabaseResponse::BlockHash(hash) = db
|
||||
.clone()
|
||||
.oneshot(DatabaseRequest::BlockHash(height))
|
||||
.await?
|
||||
let BCResponse::BlockHash(hash) =
|
||||
db.clone().oneshot(BCReadRequest::BlockHash(height)).await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
|
233
consensus/src/context/task.rs
Normal file
233
consensus/src/context/task.rs
Normal file
|
@ -0,0 +1,233 @@
|
|||
//! Context Task
|
||||
//!
|
||||
//! This module contains the async task that handles keeping track of blockchain context.
|
||||
//! It holds all the context caches and handles [`tower::Service`] requests.
|
||||
//!
|
||||
use futures::channel::oneshot;
|
||||
use tokio::sync::mpsc;
|
||||
use tower::ServiceExt;
|
||||
use tracing::Instrument;
|
||||
|
||||
use cuprate_consensus_rules::blocks::ContextToVerifyBlock;
|
||||
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
use super::{
|
||||
difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest,
|
||||
BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken,
|
||||
BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW,
|
||||
};
|
||||
use crate::{Database, ExtendedConsensusError};
|
||||
|
||||
/// A request from the context service to the context task.
|
||||
pub(super) struct ContextTaskRequest {
|
||||
/// The request.
|
||||
pub req: BlockChainContextRequest,
|
||||
/// The response channel.
|
||||
pub tx: oneshot::Sender<Result<BlockChainContextResponse, tower::BoxError>>,
|
||||
/// The tracing span of the requester.
|
||||
pub span: tracing::Span,
|
||||
}
|
||||
|
||||
/// The Context task that keeps the blockchain context and handles requests.
|
||||
pub struct ContextTask {
|
||||
/// A token used to invalidate previous contexts when a new
|
||||
/// block is added to the chain.
|
||||
current_validity_token: ValidityToken,
|
||||
|
||||
/// The difficulty cache.
|
||||
difficulty_cache: difficulty::DifficultyCache,
|
||||
/// The weight cache.
|
||||
weight_cache: weight::BlockWeightsCache,
|
||||
/// The RX VM cache.
|
||||
rx_vm_cache: rx_vms::RandomXVMCache,
|
||||
/// The hard-fork state cache.
|
||||
hardfork_state: hardforks::HardForkState,
|
||||
|
||||
/// The current chain height.
|
||||
chain_height: u64,
|
||||
/// The top block hash.
|
||||
top_block_hash: [u8; 32],
|
||||
/// The total amount of coins generated.
|
||||
already_generated_coins: u64,
|
||||
}
|
||||
|
||||
impl ContextTask {
|
||||
/// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a
|
||||
/// while to complete.
|
||||
pub async fn init_context<D>(
|
||||
cfg: ContextConfig,
|
||||
mut database: D,
|
||||
) -> Result<ContextTask, ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
let ContextConfig {
|
||||
difficulty_cfg,
|
||||
weights_config,
|
||||
hard_fork_cfg,
|
||||
} = cfg;
|
||||
|
||||
tracing::debug!("Initialising blockchain context");
|
||||
|
||||
let BCResponse::ChainHeight(chain_height, top_block_hash) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BCReadRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
let BCResponse::GeneratedCoins(already_generated_coins) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BCReadRequest::GeneratedCoins)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
let db = database.clone();
|
||||
let hardfork_state_handle = tokio::spawn(async move {
|
||||
hardforks::HardForkState::init_from_chain_height(chain_height, hard_fork_cfg, db).await
|
||||
});
|
||||
|
||||
let db = database.clone();
|
||||
let difficulty_cache_handle = tokio::spawn(async move {
|
||||
difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db)
|
||||
.await
|
||||
});
|
||||
|
||||
let db = database.clone();
|
||||
let weight_cache_handle = tokio::spawn(async move {
|
||||
weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db)
|
||||
.await
|
||||
});
|
||||
|
||||
// Wait for the hardfork state to finish first as we need it to start the randomX VM cache.
|
||||
let hardfork_state = hardfork_state_handle.await.unwrap()?;
|
||||
let current_hf = hardfork_state.current_hardfork();
|
||||
|
||||
let db = database.clone();
|
||||
let rx_seed_handle = tokio::spawn(async move {
|
||||
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, ¤t_hf, db).await
|
||||
});
|
||||
|
||||
let context_svc = ContextTask {
|
||||
current_validity_token: ValidityToken::new(),
|
||||
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
|
||||
weight_cache: weight_cache_handle.await.unwrap()?,
|
||||
rx_vm_cache: rx_seed_handle.await.unwrap()?,
|
||||
hardfork_state,
|
||||
chain_height,
|
||||
already_generated_coins,
|
||||
top_block_hash,
|
||||
};
|
||||
|
||||
Ok(context_svc)
|
||||
}
|
||||
|
||||
/// Handles a [`BlockChainContextRequest`] and returns a [`BlockChainContextResponse`].
|
||||
pub async fn handle_req(
|
||||
&mut self,
|
||||
req: BlockChainContextRequest,
|
||||
) -> Result<BlockChainContextResponse, tower::BoxError> {
|
||||
Ok(match req {
|
||||
BlockChainContextRequest::GetContext => {
|
||||
tracing::debug!("Getting blockchain context");
|
||||
|
||||
let current_hf = self.hardfork_state.current_hardfork();
|
||||
|
||||
BlockChainContextResponse::Context(BlockChainContext {
|
||||
validity_token: self.current_validity_token.clone(),
|
||||
raw: RawBlockChainContext {
|
||||
context_to_verify_block: ContextToVerifyBlock {
|
||||
median_weight_for_block_reward: self
|
||||
.weight_cache
|
||||
.median_for_block_reward(¤t_hf),
|
||||
effective_median_weight: self
|
||||
.weight_cache
|
||||
.effective_median_block_weight(¤t_hf),
|
||||
top_hash: self.top_block_hash,
|
||||
median_block_timestamp: self.difficulty_cache.median_timestamp(
|
||||
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
|
||||
),
|
||||
chain_height: self.chain_height,
|
||||
current_hf,
|
||||
next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf),
|
||||
already_generated_coins: self.already_generated_coins,
|
||||
},
|
||||
rx_vms: self.rx_vm_cache.get_vms(),
|
||||
cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(),
|
||||
median_long_term_weight: self.weight_cache.median_long_term_weight(),
|
||||
top_block_timestamp: self.difficulty_cache.top_block_timestamp(),
|
||||
},
|
||||
})
|
||||
}
|
||||
BlockChainContextRequest::BatchGetDifficulties(blocks) => {
|
||||
tracing::debug!("Getting batch difficulties len: {}", blocks.len() + 1);
|
||||
|
||||
let next_diffs = self
|
||||
.difficulty_cache
|
||||
.next_difficulties(blocks, &self.hardfork_state.current_hardfork());
|
||||
BlockChainContextResponse::BatchDifficulties(next_diffs)
|
||||
}
|
||||
BlockChainContextRequest::NewRXVM(vm) => {
|
||||
tracing::debug!("Adding randomX VM to cache.");
|
||||
|
||||
self.rx_vm_cache.add_vm(vm);
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
BlockChainContextRequest::Update(new) => {
|
||||
tracing::debug!(
|
||||
"Updating blockchain cache with new block, height: {}",
|
||||
new.height
|
||||
);
|
||||
// Cancel the validity token and replace it with a new one.
|
||||
std::mem::replace(&mut self.current_validity_token, ValidityToken::new())
|
||||
.set_data_invalid();
|
||||
|
||||
self.difficulty_cache.new_block(
|
||||
new.height,
|
||||
new.timestamp,
|
||||
new.cumulative_difficulty,
|
||||
);
|
||||
|
||||
self.weight_cache
|
||||
.new_block(new.height, new.weight, new.long_term_weight);
|
||||
|
||||
self.hardfork_state.new_block(new.vote, new.height);
|
||||
|
||||
self.rx_vm_cache
|
||||
.new_block(
|
||||
new.height,
|
||||
&new.block_hash,
|
||||
// We use the current hf and not the hf of the top block as when syncing we need to generate VMs
|
||||
// on the switch to RX not after it.
|
||||
&self.hardfork_state.current_hardfork(),
|
||||
)
|
||||
.await;
|
||||
|
||||
self.chain_height = new.height + 1;
|
||||
self.top_block_hash = new.block_hash;
|
||||
self.already_generated_coins = self
|
||||
.already_generated_coins
|
||||
.saturating_add(new.generated_coins);
|
||||
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Run the [`ContextTask`], the task will listen for requests on the passed in channel. When the channel closes the
|
||||
/// task will finish.
|
||||
pub async fn run(mut self, mut rx: mpsc::Receiver<ContextTaskRequest>) {
|
||||
while let Some(req) = rx.recv().await {
|
||||
let res = self.handle_req(req.req).instrument(req.span).await;
|
||||
let _ = req.tx.send(res);
|
||||
}
|
||||
|
||||
tracing::info!("Shutting down blockchain context task.");
|
||||
}
|
||||
}
|
|
@ -1,3 +1,10 @@
|
|||
//! Tokens
|
||||
//!
|
||||
//! This module contains tokens which keep track of the validity of certain data.
|
||||
//! Currently, there is 1 token:
|
||||
//! - [`ValidityToken`]
|
||||
//!
|
||||
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
/// A token representing if a piece of data is valid.
|
||||
|
@ -7,39 +14,20 @@ pub struct ValidityToken {
|
|||
}
|
||||
|
||||
impl ValidityToken {
|
||||
/// Creates a new [`ValidityToken`]
|
||||
pub fn new() -> ValidityToken {
|
||||
ValidityToken {
|
||||
token: CancellationToken::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the data is still valid.
|
||||
pub fn is_data_valid(&self) -> bool {
|
||||
!self.token.is_cancelled()
|
||||
}
|
||||
|
||||
/// Sets the data to invalid.
|
||||
pub fn set_data_invalid(self) {
|
||||
self.token.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
/// A token representing if a re-org has happened since it's creation.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ReOrgToken {
|
||||
token: CancellationToken,
|
||||
}
|
||||
|
||||
impl ReOrgToken {
|
||||
pub fn new() -> ReOrgToken {
|
||||
ReOrgToken {
|
||||
token: CancellationToken::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reorg_happened(&self) -> bool {
|
||||
self.token.is_cancelled()
|
||||
}
|
||||
|
||||
pub fn set_reorg_happened(self) {
|
||||
self.token.cancel()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,12 +16,15 @@ use rayon::prelude::*;
|
|||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_consensus_rules::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5};
|
||||
use cuprate_helper::{asynch::rayon_spawn_async, num::median};
|
||||
use monero_consensus::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5};
|
||||
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError, HardFork};
|
||||
use crate::{Database, ExtendedConsensusError, HardFork};
|
||||
|
||||
/// The short term block weight window.
|
||||
const SHORT_TERM_WINDOW: u64 = 100;
|
||||
/// The long term block weight window.
|
||||
const LONG_TERM_WINDOW: u64 = 100000;
|
||||
|
||||
/// Configuration for the block weight cache.
|
||||
|
@ -33,6 +36,7 @@ pub struct BlockWeightsCacheConfig {
|
|||
}
|
||||
|
||||
impl BlockWeightsCacheConfig {
|
||||
/// Creates a new [`BlockWeightsCacheConfig`]
|
||||
pub const fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig {
|
||||
BlockWeightsCacheConfig {
|
||||
short_term_window,
|
||||
|
@ -40,6 +44,7 @@ impl BlockWeightsCacheConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns the [`BlockWeightsCacheConfig`] for all networks (They are all the same as mainnet).
|
||||
pub fn main_net() -> BlockWeightsCacheConfig {
|
||||
BlockWeightsCacheConfig {
|
||||
short_term_window: SHORT_TERM_WINDOW,
|
||||
|
@ -55,7 +60,9 @@ impl BlockWeightsCacheConfig {
|
|||
/// this data it reduces the load on the database.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockWeightsCache {
|
||||
/// The short term block weights.
|
||||
short_term_block_weights: VecDeque<usize>,
|
||||
/// The long term block weights.
|
||||
long_term_weights: VecDeque<usize>,
|
||||
|
||||
/// The short term block weights sorted so we don't have to sort them every time we need
|
||||
|
@ -68,6 +75,7 @@ pub struct BlockWeightsCache {
|
|||
/// The height of the top block.
|
||||
tip_height: u64,
|
||||
|
||||
/// The block weight config.
|
||||
config: BlockWeightsCacheConfig,
|
||||
}
|
||||
|
||||
|
@ -131,6 +139,7 @@ impl BlockWeightsCache {
|
|||
long_term_weight
|
||||
);
|
||||
|
||||
// add the new block to the `long_term_weights` list and the sorted `cached_sorted_long_term_weights` list.
|
||||
self.long_term_weights.push_back(long_term_weight);
|
||||
match self
|
||||
.cached_sorted_long_term_weights
|
||||
|
@ -141,6 +150,7 @@ impl BlockWeightsCache {
|
|||
.insert(idx, long_term_weight),
|
||||
}
|
||||
|
||||
// If the list now has too many entries remove the oldest.
|
||||
if u64::try_from(self.long_term_weights.len()).unwrap() > self.config.long_term_window {
|
||||
let val = self
|
||||
.long_term_weights
|
||||
|
@ -153,6 +163,7 @@ impl BlockWeightsCache {
|
|||
};
|
||||
}
|
||||
|
||||
// add the block to the short_term_block_weights and the sorted cached_sorted_short_term_weights list.
|
||||
self.short_term_block_weights.push_back(block_weight);
|
||||
match self
|
||||
.cached_sorted_short_term_weights
|
||||
|
@ -163,6 +174,7 @@ impl BlockWeightsCache {
|
|||
.insert(idx, block_weight),
|
||||
}
|
||||
|
||||
// If there are now too many entries remove the oldest.
|
||||
if u64::try_from(self.short_term_block_weights.len()).unwrap()
|
||||
> self.config.short_term_window
|
||||
{
|
||||
|
@ -192,6 +204,7 @@ impl BlockWeightsCache {
|
|||
median(&self.cached_sorted_long_term_weights)
|
||||
}
|
||||
|
||||
/// Returns the median weight over the last [`SHORT_TERM_WINDOW`] blocks, or custom amount of blocks in the config.
|
||||
pub fn median_short_term_weight(&self) -> usize {
|
||||
median(&self.cached_sorted_short_term_weights)
|
||||
}
|
||||
|
@ -221,6 +234,7 @@ impl BlockWeightsCache {
|
|||
}
|
||||
}
|
||||
|
||||
/// Calculates the effective median with the long term and short term median.
|
||||
fn calculate_effective_median_block_weight(
|
||||
hf: &HardFork,
|
||||
median_short_term_weight: usize,
|
||||
|
@ -247,6 +261,7 @@ fn calculate_effective_median_block_weight(
|
|||
effective_median.max(penalty_free_zone(hf))
|
||||
}
|
||||
|
||||
/// Calculates a blocks long term weight.
|
||||
pub fn calculate_block_long_term_weight(
|
||||
hf: &HardFork,
|
||||
block_weight: usize,
|
||||
|
@ -270,6 +285,7 @@ pub fn calculate_block_long_term_weight(
|
|||
min(short_term_constraint, adjusted_block_weight)
|
||||
}
|
||||
|
||||
/// Gets the block weights from the blocks with heights in the range provided.
|
||||
#[instrument(name = "get_block_weights", skip(database))]
|
||||
async fn get_blocks_weight_in_range<D: Database + Clone>(
|
||||
range: Range<u64>,
|
||||
|
@ -277,8 +293,8 @@ async fn get_blocks_weight_in_range<D: Database + Clone>(
|
|||
) -> Result<Vec<usize>, ExtendedConsensusError> {
|
||||
tracing::info!("getting block weights.");
|
||||
|
||||
let DatabaseResponse::BlockExtendedHeaderInRange(ext_headers) = database
|
||||
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(range))
|
||||
let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database
|
||||
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(range))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
|
@ -290,6 +306,7 @@ async fn get_blocks_weight_in_range<D: Database + Clone>(
|
|||
.collect())
|
||||
}
|
||||
|
||||
/// Gets the block long term weights from the blocks with heights in the range provided.
|
||||
#[instrument(name = "get_long_term_weights", skip(database), level = "info")]
|
||||
async fn get_long_term_weight_in_range<D: Database + Clone>(
|
||||
range: Range<u64>,
|
||||
|
@ -297,8 +314,8 @@ async fn get_long_term_weight_in_range<D: Database + Clone>(
|
|||
) -> Result<Vec<usize>, ExtendedConsensusError> {
|
||||
tracing::info!("getting block long term weights.");
|
||||
|
||||
let DatabaseResponse::BlockExtendedHeaderInRange(ext_headers) = database
|
||||
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(range))
|
||||
let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database
|
||||
.oneshot(BCReadRequest::BlockExtendedHeaderInRange(range))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
|
|
|
@ -1,65 +1,59 @@
|
|||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
future::Future,
|
||||
};
|
||||
|
||||
use monero_consensus::{transactions::OutputOnChain, ConsensusError, HardFork};
|
||||
//! Cuprate Consensus
|
||||
//!
|
||||
//! This crate contains 3 [`tower::Service`]s that implement Monero's consensus rules:
|
||||
//!
|
||||
//! - [`BlockChainContextService`] Which handles keeping the current state of the blockchain.
|
||||
//! - [`BlockVerifierService`] Which handles block verification.
|
||||
//! - [`TxVerifierService`] Which handles transaction verification.
|
||||
//!
|
||||
//! This crate is generic over the database which is implemented as a [`tower::Service`]. To
|
||||
//! implement a database you need to have a service which accepts [`BCReadRequest`] and responds
|
||||
//! with [`BCResponse`].
|
||||
//!
|
||||
use cuprate_consensus_rules::{ConsensusError, HardFork};
|
||||
|
||||
mod batch_verifier;
|
||||
pub mod block;
|
||||
pub mod context;
|
||||
pub mod randomx;
|
||||
#[cfg(feature = "binaries")]
|
||||
pub mod rpc;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
pub mod transactions;
|
||||
|
||||
pub use block::{
|
||||
PrePreparedBlock, VerifiedBlockInformation, VerifyBlockRequest, VerifyBlockResponse,
|
||||
};
|
||||
pub use block::{BlockVerifierService, VerifyBlockRequest, VerifyBlockResponse};
|
||||
pub use context::{
|
||||
initialize_blockchain_context, BlockChainContext, BlockChainContextRequest,
|
||||
BlockChainContextResponse, ContextConfig,
|
||||
BlockChainContextResponse, BlockChainContextService, ContextConfig,
|
||||
};
|
||||
pub use transactions::{VerifyTxRequest, VerifyTxResponse};
|
||||
pub use transactions::{TxVerifierService, VerifyTxRequest, VerifyTxResponse};
|
||||
|
||||
// re-export.
|
||||
pub use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
/// An Error returned from one of the consensus services.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ExtendedConsensusError {
|
||||
/// A consensus error.
|
||||
#[error("{0}")]
|
||||
ConErr(#[from] monero_consensus::ConsensusError),
|
||||
ConErr(#[from] ConsensusError),
|
||||
/// A database error.
|
||||
#[error("Database error: {0}")]
|
||||
DBErr(#[from] tower::BoxError),
|
||||
/// The transactions passed in with this block were not the ones needed.
|
||||
#[error("The transactions passed in with the block are incorrect.")]
|
||||
TxsIncludedWithBlockIncorrect,
|
||||
/// One or more statements in the batch verifier was invalid.
|
||||
#[error("One or more statements in the batch verifier was invalid.")]
|
||||
OneOrMoreBatchVerificationStatementsInvalid,
|
||||
}
|
||||
|
||||
// TODO: instead of (ab)using generic returns return the acc type
|
||||
/// Initialize the 2 verifier [`tower::Service`]s (block and transaction).
|
||||
pub async fn initialize_verifier<D, Ctx>(
|
||||
database: D,
|
||||
ctx_svc: Ctx,
|
||||
) -> Result<
|
||||
(
|
||||
impl tower::Service<
|
||||
VerifyBlockRequest,
|
||||
Response = VerifyBlockResponse,
|
||||
Error = ExtendedConsensusError,
|
||||
Future = impl Future<Output = Result<VerifyBlockResponse, ExtendedConsensusError>>
|
||||
+ Send
|
||||
+ 'static,
|
||||
> + Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
impl tower::Service<
|
||||
VerifyTxRequest,
|
||||
Response = VerifyTxResponse,
|
||||
Error = ExtendedConsensusError,
|
||||
Future = impl Future<Output = Result<VerifyTxResponse, ExtendedConsensusError>>
|
||||
+ Send
|
||||
+ 'static,
|
||||
> + Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
BlockVerifierService<Ctx, TxVerifierService<D>, D>,
|
||||
TxVerifierService<D>,
|
||||
),
|
||||
ConsensusError,
|
||||
>
|
||||
|
@ -76,73 +70,41 @@ where
|
|||
+ 'static,
|
||||
Ctx::Future: Send + 'static,
|
||||
{
|
||||
let tx_svc = transactions::TxVerifierService::new(database.clone());
|
||||
let block_svc = block::BlockVerifierService::new(ctx_svc, tx_svc.clone(), database);
|
||||
let tx_svc = TxVerifierService::new(database.clone());
|
||||
let block_svc = BlockVerifierService::new(ctx_svc, tx_svc.clone(), database);
|
||||
Ok((block_svc, tx_svc))
|
||||
}
|
||||
|
||||
pub trait Database:
|
||||
tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
|
||||
{
|
||||
}
|
||||
|
||||
impl<T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>>
|
||||
Database for T
|
||||
{
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct ExtendedBlockHeader {
|
||||
pub version: HardFork,
|
||||
pub vote: HardFork,
|
||||
|
||||
pub timestamp: u64,
|
||||
pub cumulative_difficulty: u128,
|
||||
|
||||
pub block_weight: usize,
|
||||
pub long_term_weight: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DatabaseRequest {
|
||||
BlockExtendedHeader(u64),
|
||||
BlockHash(u64),
|
||||
|
||||
BlockExtendedHeaderInRange(std::ops::Range<u64>),
|
||||
|
||||
ChainHeight,
|
||||
GeneratedCoins,
|
||||
|
||||
Outputs(HashMap<u64, HashSet<u64>>),
|
||||
NumberOutputsWithAmount(Vec<u64>),
|
||||
|
||||
CheckKIsNotSpent(HashSet<[u8; 32]>),
|
||||
|
||||
#[cfg(feature = "binaries")]
|
||||
BlockBatchInRange(std::ops::Range<u64>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum DatabaseResponse {
|
||||
BlockExtendedHeader(ExtendedBlockHeader),
|
||||
BlockHash([u8; 32]),
|
||||
|
||||
BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
|
||||
|
||||
ChainHeight(u64, [u8; 32]),
|
||||
GeneratedCoins(u64),
|
||||
|
||||
Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
|
||||
NumberOutputsWithAmount(HashMap<u64, usize>),
|
||||
|
||||
/// returns true if key images are spent
|
||||
CheckKIsNotSpent(bool),
|
||||
|
||||
#[cfg(feature = "binaries")]
|
||||
BlockBatchInRange(
|
||||
Vec<(
|
||||
monero_serai::block::Block,
|
||||
Vec<monero_serai::transaction::Transaction>,
|
||||
)>,
|
||||
),
|
||||
use __private::Database;
|
||||
|
||||
pub mod __private {
|
||||
use std::future::Future;
|
||||
|
||||
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
/// A type alias trait used to represent a database, so we don't have to write [`tower::Service`] bounds
|
||||
/// everywhere.
|
||||
///
|
||||
/// Automatically implemented for:
|
||||
/// ```ignore
|
||||
/// tower::Service<BCReadRequest, Response = BCResponse, Error = tower::BoxError>
|
||||
/// ```
|
||||
pub trait Database:
|
||||
tower::Service<
|
||||
BCReadRequest,
|
||||
Response = BCResponse,
|
||||
Error = tower::BoxError,
|
||||
Future = Self::Future2,
|
||||
>
|
||||
{
|
||||
type Future2: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static;
|
||||
}
|
||||
|
||||
impl<T: tower::Service<BCReadRequest, Response = BCResponse, Error = tower::BoxError>>
|
||||
crate::Database for T
|
||||
where
|
||||
T::Future: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static,
|
||||
{
|
||||
type Future2 = T::Future;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner};
|
||||
use thread_local::ThreadLocal;
|
||||
|
||||
use monero_consensus::blocks::RandomX;
|
||||
|
||||
pub struct RandomXVM {
|
||||
vms: ThreadLocal<VMInner>,
|
||||
cache: RandomXCache,
|
||||
flags: RandomXFlag,
|
||||
}
|
||||
|
||||
impl RandomXVM {
|
||||
pub fn new(seed: [u8; 32]) -> Result<Self, RandomXError> {
|
||||
let flags = RandomXFlag::get_recommended_flags();
|
||||
|
||||
let cache = RandomXCache::new(flags, &seed)?;
|
||||
|
||||
Ok(RandomXVM {
|
||||
vms: ThreadLocal::new(),
|
||||
cache,
|
||||
flags,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RandomX for RandomXVM {
|
||||
type Error = RandomXError;
|
||||
|
||||
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> {
|
||||
self.vms
|
||||
.get_or_try(|| VMInner::new(self.flags, Some(self.cache.clone()), None))?
|
||||
.calculate_hash(buf)
|
||||
.map(|out| out.try_into().unwrap())
|
||||
}
|
||||
}
|
|
@ -1,288 +0,0 @@
|
|||
use std::{
|
||||
cmp::min,
|
||||
collections::{HashMap, HashSet},
|
||||
future::Future,
|
||||
ops::Range,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::{
|
||||
stream::{FuturesOrdered, FuturesUnordered},
|
||||
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tower::{balance::p2c::Balance, ServiceExt};
|
||||
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
|
||||
use crate::{DatabaseRequest, DatabaseResponse};
|
||||
|
||||
pub mod cache;
|
||||
mod connection;
|
||||
mod discover;
|
||||
|
||||
use cache::ScanningCache;
|
||||
|
||||
const MAX_OUTS_PER_RPC: usize = 5000; // the cap for monerod is 5000
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct RpcConfig {
|
||||
pub max_blocks_per_node: u64,
|
||||
pub max_block_headers_per_node: u64,
|
||||
}
|
||||
|
||||
impl RpcConfig {
|
||||
pub fn block_batch_size(&self) -> u64 {
|
||||
self.max_blocks_per_node * 3
|
||||
}
|
||||
|
||||
pub fn new(max_blocks_per_node: u64, max_block_headers_per_node: u64) -> RpcConfig {
|
||||
RpcConfig {
|
||||
max_block_headers_per_node,
|
||||
max_blocks_per_node,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Attempts(u64);
|
||||
|
||||
impl<Req: Clone, Res, E> tower::retry::Policy<Req, Res, E> for Attempts {
|
||||
type Future = futures::future::Ready<Self>;
|
||||
fn retry(&self, _: &Req, result: Result<&Res, &E>) -> Option<Self::Future> {
|
||||
if result.is_err() {
|
||||
if self.0 == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(futures::future::ready(Attempts(self.0 - 1)))
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_request(&self, req: &Req) -> Option<Req> {
|
||||
Some(req.clone())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_rpc_load_balancer(
|
||||
addresses: Vec<String>,
|
||||
cache: Arc<RwLock<ScanningCache>>,
|
||||
config: Arc<std::sync::RwLock<RpcConfig>>,
|
||||
) -> impl tower::Service<
|
||||
DatabaseRequest,
|
||||
Response = DatabaseResponse,
|
||||
Error = tower::BoxError,
|
||||
Future = Pin<
|
||||
Box<dyn Future<Output = Result<DatabaseResponse, tower::BoxError>> + Send + 'static>,
|
||||
>,
|
||||
> + Clone {
|
||||
let (rpc_discoverer_tx, rpc_discoverer_rx) = futures::channel::mpsc::channel(0);
|
||||
|
||||
let rpc_balance = Balance::new(Box::pin(
|
||||
rpc_discoverer_rx.map(Result::<_, tower::BoxError>::Ok),
|
||||
));
|
||||
let rpc_buffer = tower::buffer::Buffer::new(rpc_balance, 50);
|
||||
let rpcs = tower::retry::Retry::new(Attempts(10), rpc_buffer);
|
||||
|
||||
let discover = discover::RPCDiscover {
|
||||
initial_list: addresses,
|
||||
ok_channel: rpc_discoverer_tx,
|
||||
already_connected: Default::default(),
|
||||
cache: cache.clone(),
|
||||
};
|
||||
|
||||
tokio::spawn(discover.run());
|
||||
|
||||
RpcBalancer {
|
||||
rpcs,
|
||||
config,
|
||||
cache,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RpcBalancer<T: Clone> {
|
||||
rpcs: T,
|
||||
config: Arc<std::sync::RwLock<RpcConfig>>,
|
||||
cache: Arc<RwLock<ScanningCache>>,
|
||||
}
|
||||
|
||||
impl<T> tower::Service<DatabaseRequest> for RpcBalancer<T>
|
||||
where
|
||||
T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static,
|
||||
T::Future: Send + 'static,
|
||||
{
|
||||
type Response = DatabaseResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
|
||||
let this = self.rpcs.clone();
|
||||
let config_mutex = self.config.clone();
|
||||
let config = config_mutex.clone();
|
||||
|
||||
let cache = self.cache.clone();
|
||||
|
||||
match req {
|
||||
DatabaseRequest::CheckKIsNotSpent(kis) => async move {
|
||||
Ok(DatabaseResponse::CheckKIsNotSpent(
|
||||
cache.read().await.are_kis_spent(kis),
|
||||
))
|
||||
}
|
||||
.boxed(),
|
||||
DatabaseRequest::GeneratedCoins => async move {
|
||||
Ok(DatabaseResponse::GeneratedCoins(
|
||||
cache.read().await.already_generated_coins,
|
||||
))
|
||||
}
|
||||
.boxed(),
|
||||
DatabaseRequest::NumberOutputsWithAmount(amt) => async move {
|
||||
Ok(DatabaseResponse::NumberOutputsWithAmount(
|
||||
cache.read().await.numb_outs(&amt),
|
||||
))
|
||||
}
|
||||
.boxed(),
|
||||
DatabaseRequest::BlockBatchInRange(range) => {
|
||||
let resp_to_ret = |resp: DatabaseResponse| {
|
||||
let DatabaseResponse::BlockBatchInRange(pow_info) = resp else {
|
||||
panic!("Database sent incorrect response");
|
||||
};
|
||||
pow_info
|
||||
};
|
||||
split_range_request(
|
||||
this,
|
||||
range,
|
||||
DatabaseRequest::BlockBatchInRange,
|
||||
DatabaseResponse::BlockBatchInRange,
|
||||
resp_to_ret,
|
||||
config.read().unwrap().max_blocks_per_node,
|
||||
)
|
||||
.boxed()
|
||||
}
|
||||
DatabaseRequest::BlockExtendedHeaderInRange(range) => {
|
||||
let resp_to_ret = |resp: DatabaseResponse| {
|
||||
let DatabaseResponse::BlockExtendedHeaderInRange(pow_info) = resp else {
|
||||
panic!("Database sent incorrect response");
|
||||
};
|
||||
pow_info
|
||||
};
|
||||
split_range_request(
|
||||
this,
|
||||
range,
|
||||
DatabaseRequest::BlockExtendedHeaderInRange,
|
||||
DatabaseResponse::BlockExtendedHeaderInRange,
|
||||
resp_to_ret,
|
||||
config.read().unwrap().max_block_headers_per_node,
|
||||
)
|
||||
.boxed()
|
||||
}
|
||||
DatabaseRequest::Outputs(outs) => async move {
|
||||
let split_outs = rayon_spawn_async(|| {
|
||||
let mut split_outs: Vec<HashMap<u64, HashSet<u64>>> = Vec::new();
|
||||
let mut i: usize = 0;
|
||||
for (amount, ixs) in outs {
|
||||
if ixs.len() > MAX_OUTS_PER_RPC {
|
||||
for ii in (0..ixs.len()).step_by(MAX_OUTS_PER_RPC) {
|
||||
let mut amt_map = HashSet::with_capacity(MAX_OUTS_PER_RPC);
|
||||
amt_map.extend(ixs.iter().skip(ii).copied().take(MAX_OUTS_PER_RPC));
|
||||
|
||||
let mut map = HashMap::new();
|
||||
map.insert(amount, amt_map);
|
||||
split_outs.push(map);
|
||||
i += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(map) = split_outs.get_mut(i.saturating_sub(1)) {
|
||||
if map.iter().map(|(_, amt_map)| amt_map.len()).sum::<usize>()
|
||||
+ ixs.len()
|
||||
< MAX_OUTS_PER_RPC
|
||||
{
|
||||
assert!(map.insert(amount, ixs).is_none());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let mut map = HashMap::new();
|
||||
map.insert(amount, ixs);
|
||||
split_outs.push(map);
|
||||
i += 1;
|
||||
}
|
||||
split_outs
|
||||
})
|
||||
.await;
|
||||
|
||||
let mut futs = FuturesUnordered::from_iter(
|
||||
split_outs
|
||||
.into_iter()
|
||||
.map(|map| this.clone().oneshot(DatabaseRequest::Outputs(map))),
|
||||
);
|
||||
|
||||
let mut outs = HashMap::new();
|
||||
|
||||
while let Some(out_response) = futs.next().await {
|
||||
let DatabaseResponse::Outputs(out_response) = out_response? else {
|
||||
panic!("RPC sent incorrect response!");
|
||||
};
|
||||
out_response.into_iter().for_each(|(amt, amt_map)| {
|
||||
outs.entry(amt).or_insert_with(HashMap::new).extend(amt_map)
|
||||
});
|
||||
}
|
||||
Ok(DatabaseResponse::Outputs(outs))
|
||||
}
|
||||
.boxed(),
|
||||
req => this.oneshot(req).boxed(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn split_range_request<T, Ret>(
|
||||
rpc: T,
|
||||
range: Range<u64>,
|
||||
req: impl Fn(Range<u64>) -> DatabaseRequest + Send + 'static,
|
||||
resp: impl FnOnce(Vec<Ret>) -> DatabaseResponse + Send + 'static,
|
||||
resp_to_ret: impl Fn(DatabaseResponse) -> Vec<Ret> + Copy + Send + 'static,
|
||||
max_request_per_rpc: u64,
|
||||
) -> impl Future<Output = Result<DatabaseResponse, tower::BoxError>> + Send + 'static
|
||||
where
|
||||
T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static,
|
||||
T::Future: Send + 'static,
|
||||
Ret: Send + 'static,
|
||||
{
|
||||
let iter = (0..range.clone().count() as u64)
|
||||
.step_by(max_request_per_rpc as usize)
|
||||
.map(|i| {
|
||||
let new_range =
|
||||
(range.start + i)..(min(range.start + i + max_request_per_rpc, range.end));
|
||||
rpc.clone().oneshot(req(new_range)).map_ok(resp_to_ret)
|
||||
});
|
||||
|
||||
let fut = FuturesOrdered::from_iter(iter);
|
||||
|
||||
let mut res = Vec::with_capacity(range.count());
|
||||
|
||||
async move {
|
||||
for mut rpc_res in fut.try_collect::<Vec<Vec<_>>>().await?.into_iter() {
|
||||
res.append(&mut rpc_res)
|
||||
}
|
||||
|
||||
Ok(resp(res))
|
||||
}
|
||||
}
|
|
@ -1,146 +0,0 @@
|
|||
#![cfg(feature = "binaries")]
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
collections::HashSet,
|
||||
fmt::{Display, Formatter},
|
||||
io::{BufWriter, Write},
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use borsh::{BorshDeserialize, BorshSerialize};
|
||||
use monero_serai::transaction::{Input, Timelock, Transaction};
|
||||
use tracing_subscriber::fmt::MakeWriter;
|
||||
|
||||
use crate::transactions::TransactionVerificationData;
|
||||
|
||||
/// A cache which can keep chain state while scanning.
|
||||
///
|
||||
/// Because we are using a RPC interface with a node we need to keep track
|
||||
/// of certain data that the node doesn't hold or give us like the number
|
||||
/// of outputs at a certain time.
|
||||
#[derive(Debug, Default, Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub struct ScanningCache {
|
||||
// network: u8,
|
||||
numb_outs: HashMap<u64, usize>,
|
||||
time_locked_out: HashMap<[u8; 32], u64>,
|
||||
kis: HashSet<[u8; 32]>,
|
||||
pub already_generated_coins: u64,
|
||||
/// The height of the *next* block to scan.
|
||||
pub height: u64,
|
||||
}
|
||||
|
||||
impl ScanningCache {
|
||||
pub fn save(&self, file: &Path) -> Result<(), tower::BoxError> {
|
||||
let file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(file)?;
|
||||
let mut writer = BufWriter::new(file.make_writer());
|
||||
borsh::to_writer(&mut writer, &self)?;
|
||||
writer.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load(file: &Path) -> Result<ScanningCache, tower::BoxError> {
|
||||
let mut file = std::fs::OpenOptions::new().read(true).open(file)?;
|
||||
|
||||
let data: ScanningCache = borsh::from_reader(&mut file)?;
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
pub fn add_new_block_data(
|
||||
&mut self,
|
||||
generated_coins: u64,
|
||||
miner_tx: &Transaction,
|
||||
txs: &[Arc<TransactionVerificationData>],
|
||||
) {
|
||||
self.add_tx_time_lock(miner_tx.hash(), miner_tx.prefix.timelock);
|
||||
miner_tx.prefix.outputs.iter().for_each(|out| {
|
||||
self.add_outs(miner_tx.prefix.version == 2, out.amount.unwrap_or(0), 1)
|
||||
});
|
||||
|
||||
txs.iter().for_each(|tx| {
|
||||
self.add_tx_time_lock(tx.tx_hash, tx.tx.prefix.timelock);
|
||||
tx.tx.prefix.outputs.iter().for_each(|out| {
|
||||
self.add_outs(tx.tx.prefix.version == 2, out.amount.unwrap_or(0), 1)
|
||||
});
|
||||
|
||||
tx.tx.prefix.inputs.iter().for_each(|inp| match inp {
|
||||
Input::ToKey { key_image, .. } => {
|
||||
assert!(self.kis.insert(key_image.compress().to_bytes()))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
})
|
||||
});
|
||||
|
||||
self.already_generated_coins = self.already_generated_coins.saturating_add(generated_coins);
|
||||
self.height += 1;
|
||||
}
|
||||
|
||||
/// Returns true if any kis are included in our spent set.
|
||||
pub fn are_kis_spent(&self, kis: HashSet<[u8; 32]>) -> bool {
|
||||
!self.kis.is_disjoint(&kis)
|
||||
}
|
||||
|
||||
pub fn outputs_time_lock(&self, tx: &[u8; 32]) -> Timelock {
|
||||
let time_lock = self.time_locked_out.get(tx).copied().unwrap_or(0);
|
||||
match time_lock {
|
||||
0 => Timelock::None,
|
||||
block if block < 500_000_000 => Timelock::Block(block as usize),
|
||||
time => Timelock::Time(time),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_tx_time_lock(&mut self, tx: [u8; 32], time_lock: Timelock) {
|
||||
match time_lock {
|
||||
Timelock::None => (),
|
||||
lock => {
|
||||
self.time_locked_out.insert(
|
||||
tx,
|
||||
match lock {
|
||||
Timelock::None => unreachable!(),
|
||||
Timelock::Block(x) => x as u64,
|
||||
Timelock::Time(x) => x,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn total_outs(&self) -> usize {
|
||||
self.numb_outs.values().sum()
|
||||
}
|
||||
|
||||
pub fn numb_outs(&self, amounts: &[u64]) -> HashMap<u64, usize> {
|
||||
amounts
|
||||
.iter()
|
||||
.map(|amount| (*amount, *self.numb_outs.get(amount).unwrap_or(&0)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn add_outs(&mut self, is_v2: bool, amount: u64, count: usize) {
|
||||
let amount = if is_v2 { 0 } else { amount };
|
||||
|
||||
if let Some(numb_outs) = self.numb_outs.get_mut(&amount) {
|
||||
*numb_outs += count;
|
||||
} else {
|
||||
self.numb_outs.insert(amount, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ScanningCache {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let rct_outs = *self.numb_outs(&[0]).get(&0).unwrap();
|
||||
let total_outs = self.total_outs();
|
||||
|
||||
f.debug_struct("Cache")
|
||||
.field("next_block", &self.height)
|
||||
.field("rct_outs", &rct_outs)
|
||||
.field("total_outs", &total_outs)
|
||||
.finish()
|
||||
}
|
||||
}
|
|
@ -1,476 +0,0 @@
|
|||
use std::ops::Deref;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
ops::Range,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use curve25519_dalek::edwards::CompressedEdwardsY;
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
StreamExt,
|
||||
};
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
rpc::{HttpRpc, Rpc},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use monero_wire::common::TransactionBlobs;
|
||||
use rayon::prelude::*;
|
||||
use serde::Deserialize;
|
||||
use serde_json::json;
|
||||
use tokio::{
|
||||
sync::RwLock,
|
||||
task::JoinHandle,
|
||||
time::{timeout, Duration},
|
||||
};
|
||||
use tower::Service;
|
||||
use tracing::{instrument, Instrument};
|
||||
|
||||
use cuprate_helper::asynch::{rayon_spawn_async, InfallibleOneshotReceiver};
|
||||
|
||||
use super::ScanningCache;
|
||||
use crate::{DatabaseRequest, DatabaseResponse, ExtendedBlockHeader, HardFork, OutputOnChain};
|
||||
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(300);
|
||||
const OUTPUTS_TIMEOUT: Duration = Duration::from_secs(50);
|
||||
|
||||
pub struct RpcConnectionSvc {
|
||||
pub(crate) address: String,
|
||||
|
||||
pub(crate) rpc_task_handle: JoinHandle<()>,
|
||||
pub(crate) rpc_task_chan: mpsc::Sender<RpcReq>,
|
||||
}
|
||||
|
||||
impl Service<DatabaseRequest> for RpcConnectionSvc {
|
||||
type Response = DatabaseResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = InfallibleOneshotReceiver<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.rpc_task_handle.is_finished() {
|
||||
return Poll::Ready(Err("RPC task has exited!".into()));
|
||||
}
|
||||
self.rpc_task_chan.poll_ready(cx).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let req = RpcReq {
|
||||
req,
|
||||
res_chan: tx,
|
||||
span: tracing::info_span!(parent: &tracing::Span::current(), "rpc", addr = &self.address),
|
||||
};
|
||||
|
||||
self.rpc_task_chan
|
||||
.try_send(req)
|
||||
.expect("poll_ready should be called first!");
|
||||
|
||||
rx.into()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RpcReq {
|
||||
req: DatabaseRequest,
|
||||
res_chan: oneshot::Sender<Result<DatabaseResponse, tower::BoxError>>,
|
||||
span: tracing::Span,
|
||||
}
|
||||
|
||||
pub struct RpcConnection {
|
||||
pub(crate) address: String,
|
||||
|
||||
pub(crate) con: Rpc<HttpRpc>,
|
||||
pub(crate) cache: Arc<RwLock<ScanningCache>>,
|
||||
|
||||
pub(crate) req_chan: mpsc::Receiver<RpcReq>,
|
||||
}
|
||||
|
||||
impl RpcConnection {
|
||||
async fn get_block_hash(&self, height: u64) -> Result<[u8; 32], tower::BoxError> {
|
||||
self.con
|
||||
.get_block_hash(height.try_into().unwrap())
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn get_extended_block_header(
|
||||
&self,
|
||||
height: u64,
|
||||
) -> Result<ExtendedBlockHeader, tower::BoxError> {
|
||||
tracing::info!("Retrieving block info with height: {}", height);
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Response {
|
||||
block_header: BlockInfo,
|
||||
}
|
||||
|
||||
let info = {
|
||||
let res = self
|
||||
.con
|
||||
.json_rpc_call::<Response>(
|
||||
"get_block_header_by_height",
|
||||
Some(json!({"height": height})),
|
||||
)
|
||||
.await?;
|
||||
res.block_header
|
||||
};
|
||||
|
||||
Ok(ExtendedBlockHeader {
|
||||
version: HardFork::from_version(info.major_version)
|
||||
.expect("previously checked block has incorrect version"),
|
||||
vote: HardFork::from_vote(info.minor_version),
|
||||
timestamp: info.timestamp,
|
||||
cumulative_difficulty: u128_from_low_high(
|
||||
info.cumulative_difficulty,
|
||||
info.cumulative_difficulty_top64,
|
||||
),
|
||||
block_weight: info.block_weight,
|
||||
long_term_weight: info.long_term_weight,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_extended_block_header_in_range(
|
||||
&self,
|
||||
range: Range<u64>,
|
||||
) -> Result<Vec<ExtendedBlockHeader>, tower::BoxError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Response {
|
||||
headers: Vec<BlockInfo>,
|
||||
}
|
||||
|
||||
let res = self
|
||||
.con
|
||||
.json_rpc_call::<Response>(
|
||||
"get_block_headers_range",
|
||||
Some(json!({"start_height": range.start, "end_height": range.end - 1})),
|
||||
)
|
||||
.await?;
|
||||
|
||||
tracing::info!("Retrieved block headers in range: {:?}", range);
|
||||
|
||||
Ok(rayon_spawn_async(|| {
|
||||
res.headers
|
||||
.into_iter()
|
||||
.map(|info| ExtendedBlockHeader {
|
||||
version: HardFork::from_version(info.major_version)
|
||||
.expect("previously checked block has incorrect version"),
|
||||
vote: HardFork::from_vote(info.minor_version),
|
||||
timestamp: info.timestamp,
|
||||
cumulative_difficulty: u128_from_low_high(
|
||||
info.cumulative_difficulty,
|
||||
info.cumulative_difficulty_top64,
|
||||
),
|
||||
block_weight: info.block_weight,
|
||||
long_term_weight: info.long_term_weight,
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.await)
|
||||
}
|
||||
|
||||
async fn get_blocks_in_range(
|
||||
&self,
|
||||
range: Range<u64>,
|
||||
) -> Result<Vec<(Block, Vec<Transaction>)>, tower::BoxError> {
|
||||
tracing::info!("Getting blocks in range: {:?}", range);
|
||||
|
||||
mod items {
|
||||
use monero_wire::common::BlockCompleteEntry;
|
||||
|
||||
pub struct Request {
|
||||
pub heights: Vec<u64>,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
Request,
|
||||
heights: Vec<u64>,
|
||||
);
|
||||
|
||||
pub struct Response {
|
||||
pub blocks: Vec<BlockCompleteEntry>,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
Response,
|
||||
blocks: Vec<BlockCompleteEntry>,
|
||||
);
|
||||
}
|
||||
use items::*;
|
||||
|
||||
let res = self
|
||||
.con
|
||||
.bin_call(
|
||||
"get_blocks_by_height.bin",
|
||||
epee_encoding::to_bytes(Request {
|
||||
heights: range.collect(),
|
||||
})?
|
||||
.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let address = self.address.clone();
|
||||
rayon_spawn_async(move || {
|
||||
let blocks: Response =
|
||||
epee_encoding::from_bytes(&mut epee_encoding::macros::bytes::Bytes::from(res))?;
|
||||
|
||||
blocks
|
||||
.blocks
|
||||
.into_par_iter()
|
||||
.map(|b| {
|
||||
let block = Block::read(&mut b.block.deref())?;
|
||||
|
||||
let txs = match b.txs {
|
||||
TransactionBlobs::Pruned(_) => return Err("node sent pruned txs!".into()),
|
||||
TransactionBlobs::Normal(txs) => txs
|
||||
.into_par_iter()
|
||||
.map(|tx| Transaction::read(&mut tx.deref()))
|
||||
.collect::<Result<_, _>>()?,
|
||||
TransactionBlobs::None => vec![],
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
block.txs.len(),
|
||||
txs.len(),
|
||||
"node: {}, height: {}, node is pruned, which is not supported!",
|
||||
address,
|
||||
block.number().unwrap(),
|
||||
);
|
||||
|
||||
Ok((block, txs))
|
||||
})
|
||||
.collect::<Result<_, tower::BoxError>>()
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_outputs(
|
||||
&self,
|
||||
out_ids: HashMap<u64, HashSet<u64>>,
|
||||
) -> Result<HashMap<u64, HashMap<u64, OutputOnChain>>, tower::BoxError> {
|
||||
tracing::info!(
|
||||
"Getting outputs len: {}",
|
||||
out_ids.values().map(|amt_map| amt_map.len()).sum::<usize>()
|
||||
);
|
||||
|
||||
mod items {
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct OutputID {
|
||||
pub amount: u64,
|
||||
pub index: u64,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
OutputID,
|
||||
amount: u64,
|
||||
index: u64,
|
||||
);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Request {
|
||||
pub outputs: Vec<OutputID>,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
Request,
|
||||
outputs: Vec<OutputID>,
|
||||
);
|
||||
|
||||
pub struct OutputRes {
|
||||
pub height: u64,
|
||||
pub key: [u8; 32],
|
||||
pub mask: [u8; 32],
|
||||
pub txid: [u8; 32],
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
OutputRes,
|
||||
height: u64,
|
||||
key: [u8; 32],
|
||||
mask: [u8; 32],
|
||||
txid: [u8; 32],
|
||||
);
|
||||
|
||||
pub struct Response {
|
||||
pub outs: Vec<OutputRes>,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
Response,
|
||||
outs: Vec<OutputRes>,
|
||||
);
|
||||
}
|
||||
|
||||
use items::*;
|
||||
|
||||
let outputs = rayon_spawn_async(|| {
|
||||
out_ids
|
||||
.into_iter()
|
||||
.flat_map(|(amt, amt_map)| {
|
||||
amt_map
|
||||
.into_iter()
|
||||
.map(|amt_idx| OutputID {
|
||||
amount: amt,
|
||||
index: amt_idx,
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self
|
||||
.con
|
||||
.bin_call(
|
||||
"get_outs.bin",
|
||||
epee_encoding::to_bytes(Request {
|
||||
outputs: outputs.clone(),
|
||||
})?
|
||||
.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cache = self.cache.clone().read_owned().await;
|
||||
|
||||
let span = tracing::Span::current();
|
||||
rayon_spawn_async(move || {
|
||||
let outs: Response =
|
||||
epee_encoding::from_bytes(&mut epee_encoding::macros::bytes::Bytes::from(res))?;
|
||||
|
||||
tracing::info!(parent: &span, "Got outputs len: {}", outs.outs.len());
|
||||
|
||||
let mut ret = HashMap::new();
|
||||
|
||||
for (out, idx) in outs.outs.into_iter().zip(outputs) {
|
||||
ret.entry(idx.amount).or_insert_with(HashMap::new).insert(
|
||||
idx.index,
|
||||
OutputOnChain {
|
||||
height: out.height,
|
||||
time_lock: cache.outputs_time_lock(&out.txid),
|
||||
// we unwrap these as we are checking already approved rings so if these points are bad
|
||||
// then a bad proof has been approved.
|
||||
key: CompressedEdwardsY::from_slice(&out.key)
|
||||
.unwrap()
|
||||
.decompress(),
|
||||
commitment: CompressedEdwardsY::from_slice(&out.mask)
|
||||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap(),
|
||||
},
|
||||
);
|
||||
}
|
||||
Ok(ret)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_request(
|
||||
&mut self,
|
||||
req: DatabaseRequest,
|
||||
) -> Result<DatabaseResponse, tower::BoxError> {
|
||||
match req {
|
||||
DatabaseRequest::BlockHash(height) => {
|
||||
timeout(DEFAULT_TIMEOUT, self.get_block_hash(height))
|
||||
.await?
|
||||
.map(DatabaseResponse::BlockHash)
|
||||
}
|
||||
DatabaseRequest::ChainHeight => {
|
||||
let height = self.cache.read().await.height;
|
||||
|
||||
let hash = timeout(DEFAULT_TIMEOUT, self.get_block_hash(height - 1)).await??;
|
||||
|
||||
Ok(DatabaseResponse::ChainHeight(height, hash))
|
||||
}
|
||||
DatabaseRequest::BlockExtendedHeader(id) => {
|
||||
timeout(DEFAULT_TIMEOUT, self.get_extended_block_header(id))
|
||||
.await?
|
||||
.map(DatabaseResponse::BlockExtendedHeader)
|
||||
}
|
||||
DatabaseRequest::BlockExtendedHeaderInRange(range) => timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
self.get_extended_block_header_in_range(range),
|
||||
)
|
||||
.await?
|
||||
.map(DatabaseResponse::BlockExtendedHeaderInRange),
|
||||
DatabaseRequest::BlockBatchInRange(range) => {
|
||||
timeout(DEFAULT_TIMEOUT, self.get_blocks_in_range(range))
|
||||
.await?
|
||||
.map(DatabaseResponse::BlockBatchInRange)
|
||||
}
|
||||
DatabaseRequest::Outputs(out_ids) => {
|
||||
timeout(OUTPUTS_TIMEOUT, self.get_outputs(out_ids))
|
||||
.await?
|
||||
.map(DatabaseResponse::Outputs)
|
||||
}
|
||||
DatabaseRequest::NumberOutputsWithAmount(_)
|
||||
| DatabaseRequest::GeneratedCoins
|
||||
| DatabaseRequest::CheckKIsNotSpent(_) => {
|
||||
panic!("Request does not need RPC connection!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip(self), fields(addr = self.address))]
|
||||
pub async fn check_rpc_alive(&self) -> Result<(), tower::BoxError> {
|
||||
tracing::debug!("Checking RPC connection");
|
||||
|
||||
let res = timeout(Duration::from_secs(10), self.con.get_height()).await;
|
||||
let ok = matches!(res, Ok(Ok(_)));
|
||||
|
||||
if !ok {
|
||||
tracing::warn!("RPC connection test failed");
|
||||
return Err("RPC connection test failed".into());
|
||||
}
|
||||
tracing::info!("RPC connection Ok");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
while let Some(req) = self.req_chan.next().await {
|
||||
let RpcReq {
|
||||
req,
|
||||
span,
|
||||
res_chan,
|
||||
} = req;
|
||||
|
||||
let res = self.handle_request(req).instrument(span.clone()).await;
|
||||
|
||||
let is_err = res.is_err();
|
||||
if is_err {
|
||||
tracing::warn!(parent: &span, "Error from RPC: {:?}", res)
|
||||
}
|
||||
|
||||
let _ = res_chan.send(res);
|
||||
|
||||
if is_err && self.check_rpc_alive().await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
tracing::warn!("Shutting down RPC connection: {}", self.address);
|
||||
|
||||
self.req_chan.close();
|
||||
while let Some(req) = self.req_chan.try_next().unwrap() {
|
||||
let _ = req.res_chan.send(Err("RPC connection closed!".into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockInfo {
|
||||
cumulative_difficulty: u64,
|
||||
cumulative_difficulty_top64: u64,
|
||||
timestamp: u64,
|
||||
block_weight: usize,
|
||||
long_term_weight: usize,
|
||||
|
||||
major_version: u8,
|
||||
minor_version: u8,
|
||||
}
|
||||
|
||||
fn u128_from_low_high(low: u64, high: u64) -> u128 {
|
||||
let res: u128 = high as u128;
|
||||
res << 64 | low as u128
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use futures::{
|
||||
channel::mpsc::{self, SendError},
|
||||
stream::FuturesUnordered,
|
||||
SinkExt, StreamExt,
|
||||
};
|
||||
use monero_serai::rpc::HttpRpc;
|
||||
use tokio::sync::RwLock;
|
||||
use tower::{discover::Change, load::PeakEwma};
|
||||
use tracing::instrument;
|
||||
|
||||
use super::{
|
||||
cache::ScanningCache,
|
||||
connection::{RpcConnection, RpcConnectionSvc},
|
||||
};
|
||||
|
||||
#[instrument(skip(cache))]
|
||||
async fn check_rpc(addr: String, cache: Arc<RwLock<ScanningCache>>) -> Option<RpcConnectionSvc> {
|
||||
tracing::debug!("Sending request to node.");
|
||||
|
||||
let con = HttpRpc::with_custom_timeout(addr.clone(), Duration::from_secs(u64::MAX))
|
||||
.await
|
||||
.ok()?;
|
||||
let (tx, rx) = mpsc::channel(0);
|
||||
let rpc = RpcConnection {
|
||||
address: addr.clone(),
|
||||
con,
|
||||
cache,
|
||||
req_chan: rx,
|
||||
};
|
||||
|
||||
rpc.check_rpc_alive().await.ok()?;
|
||||
let handle = tokio::spawn(rpc.run());
|
||||
|
||||
Some(RpcConnectionSvc {
|
||||
address: addr,
|
||||
rpc_task_chan: tx,
|
||||
rpc_task_handle: handle,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) struct RPCDiscover {
|
||||
pub initial_list: Vec<String>,
|
||||
pub ok_channel: mpsc::Sender<Change<usize, PeakEwma<RpcConnectionSvc>>>,
|
||||
pub already_connected: usize,
|
||||
pub cache: Arc<RwLock<ScanningCache>>,
|
||||
}
|
||||
|
||||
impl RPCDiscover {
|
||||
async fn found_rpc(&mut self, rpc: RpcConnectionSvc) -> Result<(), SendError> {
|
||||
self.already_connected += 1;
|
||||
|
||||
self.ok_channel
|
||||
.send(Change::Insert(
|
||||
self.already_connected,
|
||||
PeakEwma::new(
|
||||
rpc,
|
||||
Duration::from_secs(5000),
|
||||
3000.0,
|
||||
tower::load::CompleteOnResponse::default(),
|
||||
),
|
||||
))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
if !self.initial_list.is_empty() {
|
||||
let mut fut = FuturesUnordered::from_iter(
|
||||
self.initial_list
|
||||
.drain(..)
|
||||
.map(|addr| check_rpc(addr, self.cache.clone())),
|
||||
);
|
||||
|
||||
while let Some(res) = fut.next().await {
|
||||
if let Some(rpc) = res {
|
||||
if self.found_rpc(rpc).await.is_err() {
|
||||
tracing::info!("Stopping RPC discover channel closed!");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,7 +5,7 @@ use tower::ServiceExt;
|
|||
use crate::{
|
||||
context::{
|
||||
initialize_blockchain_context, BlockChainContextRequest, BlockChainContextResponse,
|
||||
ContextConfig, UpdateBlockchainCacheData,
|
||||
ContextConfig, NewBlockData,
|
||||
},
|
||||
tests::mock_db::*,
|
||||
HardFork,
|
||||
|
@ -52,18 +52,16 @@ async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> {
|
|||
assert!(context.is_still_valid());
|
||||
|
||||
ctx_svc
|
||||
.oneshot(BlockChainContextRequest::Update(
|
||||
UpdateBlockchainCacheData {
|
||||
new_top_hash: [0; 32],
|
||||
height: BLOCKCHAIN_HEIGHT,
|
||||
timestamp: 0,
|
||||
weight: 0,
|
||||
long_term_weight: 0,
|
||||
generated_coins: 0,
|
||||
vote: HardFork::V1,
|
||||
cumulative_difficulty: 0,
|
||||
},
|
||||
))
|
||||
.oneshot(BlockChainContextRequest::Update(NewBlockData {
|
||||
block_hash: [0; 32],
|
||||
height: BLOCKCHAIN_HEIGHT,
|
||||
timestamp: 0,
|
||||
weight: 0,
|
||||
long_term_weight: 0,
|
||||
generated_coins: 0,
|
||||
vote: HardFork::V1,
|
||||
cumulative_difficulty: 0,
|
||||
}))
|
||||
.await?;
|
||||
|
||||
assert!(!context.is_still_valid());
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use monero_consensus::HardFork;
|
||||
use cuprate_consensus_rules::HardFork;
|
||||
|
||||
pub static HFS_2688888_2689608: [(HardFork, HardFork); 720] =
|
||||
include!("./data/hfs_2688888_2689608");
|
||||
|
|
|
@ -181,7 +181,7 @@ proptest! {
|
|||
|
||||
#[test]
|
||||
fn claculating_multiple_diffs_does_not_change_state(
|
||||
mut diff_cache in random_difficulty_cache(),
|
||||
diff_cache in random_difficulty_cache(),
|
||||
timestamps in any_with::<Vec<u64>>(size_range(0..1000).lift()),
|
||||
hf in any::<HardFork>(),
|
||||
) {
|
||||
|
@ -189,7 +189,7 @@ proptest! {
|
|||
|
||||
diff_cache.next_difficulties(timestamps.into_iter().zip([hf].into_iter().cycle()).collect(), &hf);
|
||||
|
||||
assert_eq!(diff_cache, cache);
|
||||
prop_assert_eq!(diff_cache, cache);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -203,7 +203,7 @@ proptest! {
|
|||
let diffs = diff_cache.next_difficulties(timestamps.clone(), &hf);
|
||||
|
||||
for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) {
|
||||
assert_eq!(diff_cache.next_difficulty(×tamp.1), diff);
|
||||
prop_assert_eq!(diff_cache.next_difficulty(×tamp.1), diff);
|
||||
diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty());
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use monero_consensus::hard_forks::{HFInfo, HardFork, NUMB_OF_HARD_FORKS};
|
||||
use monero_consensus::HFsInfo;
|
||||
use cuprate_consensus_rules::hard_forks::{HFInfo, HFsInfo, HardFork, NUMB_OF_HARD_FORKS};
|
||||
|
||||
use crate::{
|
||||
context::{hardforks::HardForkState, HardForkConfig},
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::collections::VecDeque;
|
|||
use proptest::prelude::*;
|
||||
use tokio::runtime::Builder;
|
||||
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
blocks::{is_randomx_seed_height, randomx_seed_height},
|
||||
HardFork,
|
||||
};
|
||||
|
|
|
@ -15,7 +15,12 @@ use proptest::{
|
|||
use proptest_derive::Arbitrary;
|
||||
use tower::{BoxError, Service};
|
||||
|
||||
use crate::{DatabaseRequest, DatabaseResponse, ExtendedBlockHeader, HardFork};
|
||||
use cuprate_types::{
|
||||
blockchain::{BCReadRequest, BCResponse},
|
||||
ExtendedBlockHeader,
|
||||
};
|
||||
|
||||
use crate::HardFork;
|
||||
|
||||
prop_compose! {
|
||||
/// Generates an arbitrary full [`DummyDatabase`], it is not safe to do consensus checks on the returned database
|
||||
|
@ -56,8 +61,8 @@ pub struct DummyBlockExtendedHeader {
|
|||
impl From<DummyBlockExtendedHeader> for ExtendedBlockHeader {
|
||||
fn from(value: DummyBlockExtendedHeader) -> Self {
|
||||
ExtendedBlockHeader {
|
||||
version: value.version.unwrap_or(HardFork::V1),
|
||||
vote: value.vote.unwrap_or(HardFork::V1),
|
||||
version: value.version.unwrap_or(HardFork::V1) as u8,
|
||||
vote: value.vote.unwrap_or(HardFork::V1) as u8,
|
||||
timestamp: value.timestamp.unwrap_or_default(),
|
||||
cumulative_difficulty: value.cumulative_difficulty.unwrap_or_default(),
|
||||
block_weight: value.block_weight.unwrap_or_default(),
|
||||
|
@ -122,8 +127,8 @@ pub struct DummyDatabase {
|
|||
dummy_height: Option<usize>,
|
||||
}
|
||||
|
||||
impl Service<DatabaseRequest> for DummyDatabase {
|
||||
type Response = DatabaseResponse;
|
||||
impl Service<BCReadRequest> for DummyDatabase {
|
||||
type Response = BCResponse;
|
||||
type Error = BoxError;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
@ -132,13 +137,13 @@ impl Service<DatabaseRequest> for DummyDatabase {
|
|||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
|
||||
fn call(&mut self, req: BCReadRequest) -> Self::Future {
|
||||
let blocks = self.blocks.clone();
|
||||
let dummy_height = self.dummy_height;
|
||||
|
||||
async move {
|
||||
Ok(match req {
|
||||
DatabaseRequest::BlockExtendedHeader(id) => {
|
||||
BCReadRequest::BlockExtendedHeader(id) => {
|
||||
let mut id = usize::try_from(id).unwrap();
|
||||
if let Some(dummy_height) = dummy_height {
|
||||
let block_len = blocks.read().unwrap().len();
|
||||
|
@ -146,7 +151,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
|
|||
id -= dummy_height - block_len;
|
||||
}
|
||||
|
||||
DatabaseResponse::BlockExtendedHeader(
|
||||
BCResponse::BlockExtendedHeader(
|
||||
blocks
|
||||
.read()
|
||||
.unwrap()
|
||||
|
@ -156,12 +161,12 @@ impl Service<DatabaseRequest> for DummyDatabase {
|
|||
.ok_or("block not in database!")?,
|
||||
)
|
||||
}
|
||||
DatabaseRequest::BlockHash(id) => {
|
||||
BCReadRequest::BlockHash(id) => {
|
||||
let mut hash = [0; 32];
|
||||
hash[0..8].copy_from_slice(&id.to_le_bytes());
|
||||
DatabaseResponse::BlockHash(hash)
|
||||
BCResponse::BlockHash(hash)
|
||||
}
|
||||
DatabaseRequest::BlockExtendedHeaderInRange(range) => {
|
||||
BCReadRequest::BlockExtendedHeaderInRange(range) => {
|
||||
let mut end = usize::try_from(range.end).unwrap();
|
||||
let mut start = usize::try_from(range.start).unwrap();
|
||||
|
||||
|
@ -172,7 +177,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
|
|||
start -= dummy_height - block_len;
|
||||
}
|
||||
|
||||
DatabaseResponse::BlockExtendedHeaderInRange(
|
||||
BCResponse::BlockExtendedHeaderInRange(
|
||||
blocks
|
||||
.read()
|
||||
.unwrap()
|
||||
|
@ -184,7 +189,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
|
|||
.collect(),
|
||||
)
|
||||
}
|
||||
DatabaseRequest::ChainHeight => {
|
||||
BCReadRequest::ChainHeight => {
|
||||
let height: u64 = dummy_height
|
||||
.unwrap_or(blocks.read().unwrap().len())
|
||||
.try_into()
|
||||
|
@ -193,9 +198,9 @@ impl Service<DatabaseRequest> for DummyDatabase {
|
|||
let mut top_hash = [0; 32];
|
||||
top_hash[0..8].copy_from_slice(&height.to_le_bytes());
|
||||
|
||||
DatabaseResponse::ChainHeight(height, top_hash)
|
||||
BCResponse::ChainHeight(height, top_hash)
|
||||
}
|
||||
DatabaseRequest::GeneratedCoins => DatabaseResponse::GeneratedCoins(0),
|
||||
BCReadRequest::GeneratedCoins => BCResponse::GeneratedCoins(0),
|
||||
_ => unimplemented!("the context svc should not need these requests!"),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,94 +1,105 @@
|
|||
//! # Transaction Verifier Service.
|
||||
//!
|
||||
//! This module contains the [`TxVerifierService`] which handles consensus validation of transactions.
|
||||
//!
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
future::Future,
|
||||
ops::Deref,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
sync::{Arc, Mutex as StdMutex},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use monero_serai::ringct::RctType;
|
||||
use monero_serai::transaction::Transaction;
|
||||
use monero_serai::{
|
||||
ringct::RctType,
|
||||
transaction::{Input, Timelock, Transaction},
|
||||
};
|
||||
use rayon::prelude::*;
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
transactions::{
|
||||
check_transaction_contextual, check_transaction_semantic, RingCTError, TransactionError,
|
||||
TxRingMembersInfo,
|
||||
check_decoy_info, check_transaction_contextual, check_transaction_semantic,
|
||||
output_unlocked, TransactionError,
|
||||
},
|
||||
ConsensusError, HardFork, TxVersion,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
|
||||
|
||||
use crate::{
|
||||
batch_verifier::MultiThreadedBatchVerifier, context::ReOrgToken, Database, DatabaseRequest,
|
||||
DatabaseResponse, ExtendedConsensusError,
|
||||
batch_verifier::MultiThreadedBatchVerifier,
|
||||
transactions::contextual_data::{batch_get_decoy_info, batch_get_ring_member_info},
|
||||
Database, ExtendedConsensusError,
|
||||
};
|
||||
|
||||
pub mod contextual_data;
|
||||
mod output_cache;
|
||||
|
||||
pub use output_cache::OutputCache;
|
||||
/// A struct representing the type of validation that needs to be completed for this transaction.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
enum VerificationNeeded {
|
||||
/// Both semantic validation and contextual validation are needed.
|
||||
SemanticAndContextual,
|
||||
/// Only contextual validation is needed.
|
||||
Contextual,
|
||||
}
|
||||
|
||||
pub async fn batch_setup_txs(
|
||||
txs: Vec<(Vec<Transaction>, HardFork)>,
|
||||
) -> Result<Vec<Vec<Arc<TransactionVerificationData>>>, ExtendedConsensusError> {
|
||||
let batch_verifier = Arc::new(MultiThreadedBatchVerifier::new(rayon::current_num_threads()));
|
||||
/// Represents if a transaction has been fully validated and under what conditions
|
||||
/// the transaction is valid in the future.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum CachedVerificationState {
|
||||
/// The transaction has not been validated.
|
||||
NotVerified,
|
||||
/// The transaction is valid* if the block represented by this hash is in the blockchain and the [`HardFork`]
|
||||
/// is the same.
|
||||
///
|
||||
/// *V1 transactions require checks on their ring-length even if this hash is in the blockchain.
|
||||
ValidAtHashAndHF([u8; 32], HardFork),
|
||||
/// The transaction is valid* if the block represented by this hash is in the blockchain _and_ this
|
||||
/// given time lock is unlocked. The time lock here will represent the youngest used time based lock
|
||||
/// (If the transaction uses any time based time locks). This is because time locks are not monotonic
|
||||
/// so unlocked outputs could become re-locked.
|
||||
///
|
||||
/// *V1 transactions require checks on their ring-length even if this hash is in the blockchain.
|
||||
ValidAtHashAndHFWithTimeBasedLock([u8; 32], HardFork, Timelock),
|
||||
}
|
||||
|
||||
// Move out of the async runtime and use rayon to parallelize the serialisation and hashing of the txs.
|
||||
let txs = rayon_spawn_async(move || {
|
||||
let txs = txs
|
||||
.into_par_iter()
|
||||
.map(|(txs, hf)| {
|
||||
txs.into_par_iter()
|
||||
.map(|tx| {
|
||||
Ok(Arc::new(TransactionVerificationData::new(
|
||||
tx,
|
||||
&hf,
|
||||
batch_verifier.clone(),
|
||||
)?))
|
||||
})
|
||||
.collect::<Result<Vec<_>, ConsensusError>>()
|
||||
})
|
||||
.collect::<Result<Vec<_>, ConsensusError>>()?;
|
||||
|
||||
if !Arc::into_inner(batch_verifier).unwrap().verify() {
|
||||
Err(ConsensusError::Transaction(TransactionError::RingCTError(
|
||||
RingCTError::BulletproofsRangeInvalid,
|
||||
)))?
|
||||
impl CachedVerificationState {
|
||||
/// Returns the block hash this is valid for if in state [`CachedVerificationState::ValidAtHashAndHF`] or [`CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock`].
|
||||
fn verified_at_block_hash(&self) -> Option<[u8; 32]> {
|
||||
match self {
|
||||
CachedVerificationState::NotVerified => None,
|
||||
CachedVerificationState::ValidAtHashAndHF(hash, _)
|
||||
| CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(hash, _, _) => Some(*hash),
|
||||
}
|
||||
|
||||
Ok::<_, ConsensusError>(txs)
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(txs)
|
||||
}
|
||||
}
|
||||
|
||||
/// Data needed to verify a transaction.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
pub struct TransactionVerificationData {
|
||||
/// The transaction we are verifying
|
||||
pub tx: Transaction,
|
||||
/// The [`TxVersion`] of this tx.
|
||||
pub version: TxVersion,
|
||||
/// The serialised transaction.
|
||||
pub tx_blob: Vec<u8>,
|
||||
/// The weight of the transaction.
|
||||
pub tx_weight: usize,
|
||||
/// The fee this transaction has paid.
|
||||
pub fee: u64,
|
||||
/// The hash of this transaction.
|
||||
pub tx_hash: [u8; 32],
|
||||
/// We put this behind a mutex as the information is not constant and is based of past outputs idxs
|
||||
/// which could change on re-orgs.
|
||||
rings_member_info: std::sync::Mutex<Option<(TxRingMembersInfo, ReOrgToken)>>,
|
||||
/// The verification state of this transaction.
|
||||
pub cached_verification_state: StdMutex<CachedVerificationState>,
|
||||
}
|
||||
|
||||
impl TransactionVerificationData {
|
||||
pub fn new(
|
||||
tx: Transaction,
|
||||
hf: &HardFork,
|
||||
verifier: Arc<MultiThreadedBatchVerifier>,
|
||||
) -> Result<TransactionVerificationData, ConsensusError> {
|
||||
/// Creates a new [`TransactionVerificationData`] from the given [`Transaction`].
|
||||
pub fn new(tx: Transaction) -> Result<TransactionVerificationData, ConsensusError> {
|
||||
let tx_hash = tx.hash();
|
||||
let tx_blob = tx.serialize();
|
||||
|
||||
|
@ -101,17 +112,12 @@ impl TransactionVerificationData {
|
|||
_ => tx_blob.len(),
|
||||
};
|
||||
|
||||
let fee = verifier.queue_statement(|verifier| {
|
||||
check_transaction_semantic(&tx, tx_blob.len(), tx_weight, &tx_hash, hf, verifier)
|
||||
.map_err(ConsensusError::Transaction)
|
||||
})?;
|
||||
|
||||
Ok(TransactionVerificationData {
|
||||
tx_hash,
|
||||
tx_blob,
|
||||
tx_weight,
|
||||
fee,
|
||||
rings_member_info: std::sync::Mutex::new(None),
|
||||
fee: tx.rct_signatures.base.fee,
|
||||
cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified),
|
||||
version: TxVersion::from_raw(tx.prefix.version)
|
||||
.ok_or(TransactionError::TransactionVersionInvalid)?,
|
||||
tx,
|
||||
|
@ -119,24 +125,49 @@ impl TransactionVerificationData {
|
|||
}
|
||||
}
|
||||
|
||||
/// A request to verify a transaction.
|
||||
pub enum VerifyTxRequest {
|
||||
/// Verifies transactions in the context of a block.
|
||||
Block {
|
||||
/// Verifies a batch of prepared txs.
|
||||
Prepped {
|
||||
/// The transactions to verify.
|
||||
// TODO: Can we use references to remove the Vec? wont play nicely with Service though
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
/// The current chain height.
|
||||
current_chain_height: u64,
|
||||
/// The top block hash.
|
||||
top_hash: [u8; 32],
|
||||
/// The value for time to use to check time locked outputs.
|
||||
time_for_time_lock: u64,
|
||||
/// The current [`HardFork`]
|
||||
hf: HardFork,
|
||||
},
|
||||
/// Verifies a batch of new txs.
|
||||
/// Returning [`VerifyTxResponse::OkPrepped`]
|
||||
New {
|
||||
/// The transactions to verify.
|
||||
txs: Vec<Transaction>,
|
||||
/// The current chain height.
|
||||
current_chain_height: u64,
|
||||
/// The top block hash.
|
||||
top_hash: [u8; 32],
|
||||
/// The value for time to use to check time locked outputs.
|
||||
time_for_time_lock: u64,
|
||||
/// The current [`HardFork`]
|
||||
hf: HardFork,
|
||||
re_org_token: ReOrgToken,
|
||||
},
|
||||
}
|
||||
|
||||
/// A response from a verify transaction request.
|
||||
#[derive(Debug)]
|
||||
pub enum VerifyTxResponse {
|
||||
BatchSetupOk(Vec<Arc<TransactionVerificationData>>),
|
||||
OkPrepped(Vec<Arc<TransactionVerificationData>>),
|
||||
Ok,
|
||||
}
|
||||
|
||||
/// The transaction verifier service.
|
||||
#[derive(Clone)]
|
||||
pub struct TxVerifierService<D: Clone> {
|
||||
pub struct TxVerifierService<D> {
|
||||
/// The database.
|
||||
database: D,
|
||||
}
|
||||
|
||||
|
@ -145,6 +176,7 @@ where
|
|||
D: Database + Clone + Send + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
/// Creates a new [`TxVerifierService`].
|
||||
pub fn new(database: D) -> TxVerifierService<D> {
|
||||
TxVerifierService { database }
|
||||
}
|
||||
|
@ -169,20 +201,38 @@ where
|
|||
|
||||
async move {
|
||||
match req {
|
||||
VerifyTxRequest::Block {
|
||||
VerifyTxRequest::New {
|
||||
txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
re_org_token,
|
||||
} => {
|
||||
verify_transactions_for_block(
|
||||
prep_and_verify_transactions(
|
||||
database,
|
||||
txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
VerifyTxRequest::Prepped {
|
||||
txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
} => {
|
||||
verify_prepped_transactions(
|
||||
database,
|
||||
&txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
re_org_token,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
@ -192,88 +242,315 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[instrument(name = "verify_txs", skip_all, level = "info")]
|
||||
async fn verify_transactions_for_block<D>(
|
||||
/// Prepares transactions for verification, then verifies them.
|
||||
async fn prep_and_verify_transactions<D>(
|
||||
database: D,
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
txs: Vec<Transaction>,
|
||||
current_chain_height: u64,
|
||||
top_hash: [u8; 32],
|
||||
time_for_time_lock: u64,
|
||||
hf: HardFork,
|
||||
re_org_token: ReOrgToken,
|
||||
) -> Result<VerifyTxResponse, ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
tracing::debug!("Verifying transactions for block, amount: {}", txs.len());
|
||||
let span = tracing::info_span!("prep_txs", amt = txs.len());
|
||||
|
||||
contextual_data::batch_refresh_ring_member_info(
|
||||
&txs,
|
||||
&hf,
|
||||
re_org_token,
|
||||
database.clone(),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let spent_kis = Arc::new(std::sync::Mutex::new(HashSet::new()));
|
||||
|
||||
let cloned_spent_kis = spent_kis.clone();
|
||||
|
||||
rayon_spawn_async(move || {
|
||||
txs.par_iter().try_for_each(|tx| {
|
||||
verify_transaction_for_block(
|
||||
tx,
|
||||
current_chain_height,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
cloned_spent_kis.clone(),
|
||||
)
|
||||
})
|
||||
tracing::debug!(parent: &span, "prepping transactions for verification.");
|
||||
let txs = rayon_spawn_async(|| {
|
||||
txs.into_par_iter()
|
||||
.map(|tx| TransactionVerificationData::new(tx).map(Arc::new))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
})
|
||||
.await?;
|
||||
|
||||
let DatabaseResponse::CheckKIsNotSpent(kis_spent) = database
|
||||
.oneshot(DatabaseRequest::CheckKIsNotSpent(
|
||||
Arc::into_inner(spent_kis).unwrap().into_inner().unwrap(),
|
||||
))
|
||||
verify_prepped_transactions(
|
||||
database,
|
||||
&txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(VerifyTxResponse::OkPrepped(txs))
|
||||
}
|
||||
|
||||
#[instrument(name = "verify_txs", skip_all, fields(amt = txs.len()) level = "info")]
|
||||
async fn verify_prepped_transactions<D>(
|
||||
mut database: D,
|
||||
txs: &[Arc<TransactionVerificationData>],
|
||||
current_chain_height: u64,
|
||||
top_hash: [u8; 32],
|
||||
time_for_time_lock: u64,
|
||||
hf: HardFork,
|
||||
) -> Result<VerifyTxResponse, ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
tracing::debug!("Verifying transactions");
|
||||
|
||||
tracing::trace!("Checking for duplicate key images");
|
||||
|
||||
let mut spent_kis = HashSet::with_capacity(txs.len());
|
||||
|
||||
txs.iter().try_for_each(|tx| {
|
||||
tx.tx.prefix.inputs.iter().try_for_each(|input| {
|
||||
if let Input::ToKey { key_image, .. } = input {
|
||||
if !spent_kis.insert(key_image.compress().0) {
|
||||
tracing::debug!("Duplicate key image found in batch.");
|
||||
return Err(ConsensusError::Transaction(TransactionError::KeyImageSpent));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
})?;
|
||||
|
||||
let BCResponse::KeyImagesSpent(kis_spent) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BCReadRequest::KeyImagesSpent(spent_kis))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
if kis_spent {
|
||||
tracing::debug!("One or more key images in batch already spent.");
|
||||
Err(ConsensusError::Transaction(TransactionError::KeyImageSpent))?;
|
||||
}
|
||||
|
||||
let mut verified_at_block_hashes = txs
|
||||
.iter()
|
||||
.filter_map(|txs| {
|
||||
txs.cached_verification_state
|
||||
.lock()
|
||||
.unwrap()
|
||||
.verified_at_block_hash()
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
tracing::trace!(
|
||||
"Verified at hashes len: {}.",
|
||||
verified_at_block_hashes.len()
|
||||
);
|
||||
|
||||
if !verified_at_block_hashes.is_empty() {
|
||||
tracing::trace!("Filtering block hashes not in the main chain.");
|
||||
|
||||
let BCResponse::FilterUnknownHashes(known_hashes) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BCReadRequest::FilterUnknownHashes(verified_at_block_hashes))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database returned wrong response!");
|
||||
};
|
||||
verified_at_block_hashes = known_hashes;
|
||||
}
|
||||
|
||||
let (txs_needing_full_verification, txs_needing_partial_verification) =
|
||||
transactions_needing_verification(
|
||||
txs,
|
||||
verified_at_block_hashes,
|
||||
&hf,
|
||||
current_chain_height,
|
||||
time_for_time_lock,
|
||||
)?;
|
||||
|
||||
futures::try_join!(
|
||||
verify_transactions_decoy_info(txs_needing_partial_verification, hf, database.clone()),
|
||||
verify_transactions(
|
||||
txs_needing_full_verification,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
database
|
||||
)
|
||||
)?;
|
||||
|
||||
Ok(VerifyTxResponse::Ok)
|
||||
}
|
||||
|
||||
fn verify_transaction_for_block(
|
||||
tx_verification_data: &TransactionVerificationData,
|
||||
#[allow(clippy::type_complexity)] // I don't think the return is too complex
|
||||
fn transactions_needing_verification(
|
||||
txs: &[Arc<TransactionVerificationData>],
|
||||
hashes_in_main_chain: HashSet<[u8; 32]>,
|
||||
current_hf: &HardFork,
|
||||
current_chain_height: u64,
|
||||
time_for_time_lock: u64,
|
||||
) -> Result<
|
||||
(
|
||||
Vec<(Arc<TransactionVerificationData>, VerificationNeeded)>,
|
||||
Vec<Arc<TransactionVerificationData>>,
|
||||
),
|
||||
ConsensusError,
|
||||
> {
|
||||
// txs needing full validation: semantic and/or contextual
|
||||
let mut full_validation_transactions = Vec::new();
|
||||
// txs needing partial _contextual_ validation, not semantic.
|
||||
let mut partial_validation_transactions = Vec::new();
|
||||
|
||||
for tx in txs.iter() {
|
||||
let guard = tx.cached_verification_state.lock().unwrap();
|
||||
|
||||
match guard.deref() {
|
||||
CachedVerificationState::NotVerified => {
|
||||
drop(guard);
|
||||
full_validation_transactions
|
||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
||||
continue;
|
||||
}
|
||||
CachedVerificationState::ValidAtHashAndHF(hash, hf) => {
|
||||
if current_hf != hf {
|
||||
drop(guard);
|
||||
full_validation_transactions
|
||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
||||
continue;
|
||||
}
|
||||
|
||||
if !hashes_in_main_chain.contains(hash) {
|
||||
drop(guard);
|
||||
full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(hash, hf, lock) => {
|
||||
if current_hf != hf {
|
||||
drop(guard);
|
||||
full_validation_transactions
|
||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
||||
continue;
|
||||
}
|
||||
|
||||
if !hashes_in_main_chain.contains(hash) {
|
||||
drop(guard);
|
||||
full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual));
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the time lock is still locked then the transaction is invalid.
|
||||
if !output_unlocked(lock, current_chain_height, time_for_time_lock, hf) {
|
||||
return Err(ConsensusError::Transaction(
|
||||
TransactionError::OneOrMoreRingMembersLocked,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tx.version == TxVersion::RingSignatures {
|
||||
drop(guard);
|
||||
partial_validation_transactions.push(tx.clone());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
Ok((
|
||||
full_validation_transactions,
|
||||
partial_validation_transactions,
|
||||
))
|
||||
}
|
||||
|
||||
async fn verify_transactions_decoy_info<D>(
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
hf: HardFork,
|
||||
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), ConsensusError> {
|
||||
tracing::debug!(
|
||||
"Verifying transaction: {}",
|
||||
hex::encode(tx_verification_data.tx_hash)
|
||||
);
|
||||
|
||||
let rings_member_info_lock = tx_verification_data.rings_member_info.lock().unwrap();
|
||||
let rings_member_info = match rings_member_info_lock.deref() {
|
||||
Some(rings_member_info) => rings_member_info,
|
||||
None => panic!("rings_member_info needs to be set to be able to verify!"),
|
||||
};
|
||||
|
||||
check_transaction_contextual(
|
||||
&tx_verification_data.tx,
|
||||
&rings_member_info.0,
|
||||
current_chain_height,
|
||||
time_for_time_lock,
|
||||
&hf,
|
||||
spent_kis,
|
||||
)?;
|
||||
database: D,
|
||||
) -> Result<(), ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
batch_get_decoy_info(&txs, hf, database)
|
||||
.await?
|
||||
.try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, &hf)?)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn verify_transactions<D>(
|
||||
txs: Vec<(Arc<TransactionVerificationData>, VerificationNeeded)>,
|
||||
current_chain_height: u64,
|
||||
top_hash: [u8; 32],
|
||||
current_time_lock_timestamp: u64,
|
||||
hf: HardFork,
|
||||
database: D,
|
||||
) -> Result<(), ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
let txs_ring_member_info =
|
||||
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?;
|
||||
|
||||
rayon_spawn_async(move || {
|
||||
let batch_verifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads());
|
||||
|
||||
txs.par_iter()
|
||||
.zip(txs_ring_member_info.par_iter())
|
||||
.try_for_each(|((tx, verification_needed), ring)| {
|
||||
// do semantic validation if needed.
|
||||
if *verification_needed == VerificationNeeded::SemanticAndContextual {
|
||||
let fee = check_transaction_semantic(
|
||||
&tx.tx,
|
||||
tx.tx_blob.len(),
|
||||
tx.tx_weight,
|
||||
&tx.tx_hash,
|
||||
&hf,
|
||||
&batch_verifier,
|
||||
)?;
|
||||
// make sure monero-serai calculated the same fee.
|
||||
assert_eq!(fee, tx.fee);
|
||||
}
|
||||
|
||||
// Both variants of `VerificationNeeded` require contextual validation.
|
||||
check_transaction_contextual(
|
||||
&tx.tx,
|
||||
ring,
|
||||
current_chain_height,
|
||||
current_time_lock_timestamp,
|
||||
&hf,
|
||||
)?;
|
||||
|
||||
Ok::<_, ConsensusError>(())
|
||||
})?;
|
||||
|
||||
if !batch_verifier.verify() {
|
||||
return Err(ExtendedConsensusError::OneOrMoreBatchVerificationStatementsInvalid);
|
||||
}
|
||||
|
||||
txs.iter()
|
||||
.zip(txs_ring_member_info)
|
||||
.for_each(|((tx, _), ring)| {
|
||||
if ring.time_locked_outs.is_empty() {
|
||||
*tx.cached_verification_state.lock().unwrap() =
|
||||
CachedVerificationState::ValidAtHashAndHF(top_hash, hf);
|
||||
} else {
|
||||
let youngest_timebased_lock = ring
|
||||
.time_locked_outs
|
||||
.iter()
|
||||
.filter_map(|lock| match lock {
|
||||
Timelock::Time(time) => Some(*time),
|
||||
_ => None,
|
||||
})
|
||||
.min();
|
||||
|
||||
*tx.cached_verification_state.lock().unwrap() =
|
||||
if let Some(time) = youngest_timebased_lock {
|
||||
CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(
|
||||
top_hash,
|
||||
hf,
|
||||
Timelock::Time(time),
|
||||
)
|
||||
} else {
|
||||
CachedVerificationState::ValidAtHashAndHF(top_hash, hf)
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//! # Contextual Data
|
||||
//!
|
||||
//! This module contains [`TxRingMembersInfo`] which is a struct made up from blockchain information about the
|
||||
//! This module fills [`TxRingMembersInfo`] which is a struct made up from blockchain information about the
|
||||
//! ring members of inputs. This module does minimal consensus checks, only when needed, and should not be relied
|
||||
//! upon to do any.
|
||||
//!
|
||||
|
@ -10,166 +10,142 @@
|
|||
//!
|
||||
//! Because this data is unique for *every* transaction and the context service is just for blockchain state data.
|
||||
//!
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
ops::Deref,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use monero_serai::transaction::Input;
|
||||
use monero_serai::transaction::{Input, Timelock};
|
||||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
transactions::{
|
||||
get_ring_members_for_inputs, insert_ring_member_ids, DecoyInfo, TxRingMembersInfo,
|
||||
get_absolute_offsets, insert_ring_member_ids, DecoyInfo, Rings, TransactionError,
|
||||
TxRingMembersInfo,
|
||||
},
|
||||
ConsensusError, HardFork,
|
||||
ConsensusError, HardFork, TxVersion,
|
||||
};
|
||||
use cuprate_types::{
|
||||
blockchain::{BCReadRequest, BCResponse},
|
||||
OutputOnChain,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
context::ReOrgToken,
|
||||
transactions::{output_cache::OutputCache, TransactionVerificationData},
|
||||
Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError,
|
||||
};
|
||||
use crate::{transactions::TransactionVerificationData, Database, ExtendedConsensusError};
|
||||
|
||||
pub async fn batch_refresh_ring_member_info<'a, D: Database + Clone + Send + Sync + 'static>(
|
||||
txs_verification_data: &'a [Arc<TransactionVerificationData>],
|
||||
hf: &HardFork,
|
||||
re_org_token: ReOrgToken,
|
||||
mut database: D,
|
||||
out_cache: Option<&OutputCache<'a>>,
|
||||
) -> Result<(), ExtendedConsensusError> {
|
||||
let (txs_needing_full_refresh, txs_needing_partial_refresh) =
|
||||
ring_member_info_needing_refresh(txs_verification_data, hf);
|
||||
|
||||
if !txs_needing_full_refresh.is_empty() {
|
||||
batch_fill_ring_member_info(
|
||||
txs_needing_full_refresh.iter(),
|
||||
hf,
|
||||
re_org_token,
|
||||
database.clone(),
|
||||
out_cache,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let unique_input_amounts = txs_needing_partial_refresh
|
||||
/// Get the ring members for the inputs from the outputs on the chain.
|
||||
///
|
||||
/// Will error if `outputs` does not contain the outputs needed.
|
||||
fn get_ring_members_for_inputs(
|
||||
get_outputs: impl Fn(u64, u64) -> Option<OutputOnChain>,
|
||||
inputs: &[Input],
|
||||
) -> Result<Vec<Vec<OutputOnChain>>, TransactionError> {
|
||||
inputs
|
||||
.iter()
|
||||
.flat_map(|tx_info| {
|
||||
tx_info
|
||||
.tx
|
||||
.prefix
|
||||
.inputs
|
||||
.iter()
|
||||
.map(|input| match input {
|
||||
Input::ToKey { amount, .. } => amount.unwrap_or(0),
|
||||
_ => 0,
|
||||
})
|
||||
.collect::<HashSet<_>>()
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let DatabaseResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::NumberOutputsWithAmount(
|
||||
unique_input_amounts.into_iter().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
};
|
||||
|
||||
for tx_v_data in txs_needing_partial_refresh {
|
||||
let decoy_info = if hf != &HardFork::V1 {
|
||||
// this data is only needed after hard-fork 1.
|
||||
Some(
|
||||
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, &outputs_with_amount, hf)
|
||||
.map_err(ConsensusError::Transaction)?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Temporarily acquirer the mutex lock to add the ring member info.
|
||||
tx_v_data
|
||||
.rings_member_info
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_mut()
|
||||
// this unwrap is safe as otherwise this would require a full refresh not a partial one.
|
||||
.unwrap()
|
||||
.0
|
||||
.decoy_info = decoy_info;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This function returns the transaction verification data that need refreshing.
|
||||
///
|
||||
/// The first returned vec needs a full refresh.
|
||||
/// The second returned vec only needs a partial refresh.
|
||||
///
|
||||
/// A full refresh is a refresh of all the ring members and the decoy info.
|
||||
/// A partial refresh is just a refresh of the decoy info.
|
||||
fn ring_member_info_needing_refresh(
|
||||
txs_verification_data: &[Arc<TransactionVerificationData>],
|
||||
hf: &HardFork,
|
||||
) -> (
|
||||
Vec<Arc<TransactionVerificationData>>,
|
||||
Vec<Arc<TransactionVerificationData>>,
|
||||
) {
|
||||
let mut txs_needing_full_refresh = Vec::new();
|
||||
let mut txs_needing_partial_refresh = Vec::new();
|
||||
|
||||
for tx in txs_verification_data {
|
||||
let tx_ring_member_info = tx.rings_member_info.lock().unwrap();
|
||||
|
||||
// if we don't have ring members or if a re-org has happened do a full refresh.
|
||||
if let Some(tx_ring_member_info) = tx_ring_member_info.deref() {
|
||||
if tx_ring_member_info.1.reorg_happened() {
|
||||
txs_needing_full_refresh.push(tx.clone());
|
||||
continue;
|
||||
.map(|inp| match inp {
|
||||
Input::ToKey {
|
||||
amount,
|
||||
key_offsets,
|
||||
..
|
||||
} => {
|
||||
let offsets = get_absolute_offsets(key_offsets)?;
|
||||
Ok(offsets
|
||||
.iter()
|
||||
.map(|offset| {
|
||||
get_outputs(amount.unwrap_or(0), *offset)
|
||||
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)
|
||||
})
|
||||
.collect::<Result<_, TransactionError>>()?)
|
||||
}
|
||||
} else {
|
||||
txs_needing_full_refresh.push(tx.clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
// if any input does not have a 0 amount do a partial refresh, this is because some decoy info
|
||||
// data is based on the amount of non-ringCT outputs at a certain point.
|
||||
// Or if a hf has happened as this will change the default minimum decoys.
|
||||
if &tx_ring_member_info
|
||||
.as_ref()
|
||||
.expect("We just checked if this was None")
|
||||
.0
|
||||
.hf
|
||||
!= hf
|
||||
|| tx.tx.prefix.inputs.iter().any(|inp| match inp {
|
||||
Input::Gen(_) => false,
|
||||
Input::ToKey { amount, .. } => amount.is_some(),
|
||||
})
|
||||
{
|
||||
txs_needing_partial_refresh.push(tx.clone());
|
||||
}
|
||||
}
|
||||
|
||||
(txs_needing_full_refresh, txs_needing_partial_refresh)
|
||||
_ => Err(TransactionError::IncorrectInputType),
|
||||
})
|
||||
.collect::<Result<_, TransactionError>>()
|
||||
}
|
||||
|
||||
/// Fills the `rings_member_info` field on the inputted [`TransactionVerificationData`].
|
||||
/// Construct a [`TxRingMembersInfo`] struct.
|
||||
///
|
||||
/// The used outs must be all the ring members used in the transactions inputs.
|
||||
pub fn new_ring_member_info(
|
||||
used_outs: Vec<Vec<OutputOnChain>>,
|
||||
decoy_info: Option<DecoyInfo>,
|
||||
tx_version: TxVersion,
|
||||
) -> Result<TxRingMembersInfo, TransactionError> {
|
||||
Ok(TxRingMembersInfo {
|
||||
youngest_used_out_height: used_outs
|
||||
.iter()
|
||||
.map(|inp_outs| {
|
||||
inp_outs
|
||||
.iter()
|
||||
// the output with the highest height is the youngest
|
||||
.map(|out| out.height)
|
||||
.max()
|
||||
.expect("Input must have ring members")
|
||||
})
|
||||
.max()
|
||||
.expect("Tx must have inputs"),
|
||||
time_locked_outs: used_outs
|
||||
.iter()
|
||||
.flat_map(|inp_outs| {
|
||||
inp_outs
|
||||
.iter()
|
||||
.filter_map(|out| match out.time_lock {
|
||||
Timelock::None => None,
|
||||
lock => Some(lock),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect(),
|
||||
rings: new_rings(used_outs, tx_version)?,
|
||||
decoy_info,
|
||||
})
|
||||
}
|
||||
|
||||
/// Builds the [`Rings`] for the transaction inputs, from the given outputs.
|
||||
fn new_rings(
|
||||
outputs: Vec<Vec<OutputOnChain>>,
|
||||
tx_version: TxVersion,
|
||||
) -> Result<Rings, TransactionError> {
|
||||
Ok(match tx_version {
|
||||
TxVersion::RingSignatures => Rings::Legacy(
|
||||
outputs
|
||||
.into_iter()
|
||||
.map(|inp_outs| {
|
||||
inp_outs
|
||||
.into_iter()
|
||||
.map(|out| out.key.ok_or(TransactionError::RingMemberNotFoundOrInvalid))
|
||||
.collect::<Result<Vec<_>, TransactionError>>()
|
||||
})
|
||||
.collect::<Result<Vec<_>, TransactionError>>()?,
|
||||
),
|
||||
TxVersion::RingCT => Rings::RingCT(
|
||||
outputs
|
||||
.into_iter()
|
||||
.map(|inp_outs| {
|
||||
inp_outs
|
||||
.into_iter()
|
||||
.map(|out| {
|
||||
Ok([
|
||||
out.key
|
||||
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)?,
|
||||
out.commitment,
|
||||
])
|
||||
})
|
||||
.collect::<Result<_, TransactionError>>()
|
||||
})
|
||||
.collect::<Result<_, _>>()?,
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
/// Retrieves the [`TxRingMembersInfo`] for the inputted [`TransactionVerificationData`].
|
||||
///
|
||||
/// This function batch gets all the ring members for the inputted transactions and fills in data about
|
||||
/// them.
|
||||
pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync + 'static>(
|
||||
pub async fn batch_get_ring_member_info<D: Database>(
|
||||
txs_verification_data: impl Iterator<Item = &Arc<TransactionVerificationData>> + Clone,
|
||||
hf: &HardFork,
|
||||
re_org_token: ReOrgToken,
|
||||
mut database: D,
|
||||
out_cache: Option<&OutputCache<'a>>,
|
||||
) -> Result<(), ExtendedConsensusError> {
|
||||
) -> Result<Vec<TxRingMembersInfo>, ExtendedConsensusError> {
|
||||
let mut output_ids = HashMap::new();
|
||||
|
||||
for tx_v_data in txs_verification_data.clone() {
|
||||
|
@ -177,19 +153,19 @@ pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync +
|
|||
.map_err(ConsensusError::Transaction)?;
|
||||
}
|
||||
|
||||
let DatabaseResponse::Outputs(outputs) = database
|
||||
let BCResponse::Outputs(outputs) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::Outputs(output_ids))
|
||||
.call(BCReadRequest::Outputs(output_ids))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
};
|
||||
|
||||
let DatabaseResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
let BCResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::NumberOutputsWithAmount(
|
||||
.call(BCReadRequest::NumberOutputsWithAmount(
|
||||
outputs.keys().copied().collect(),
|
||||
))
|
||||
.await?
|
||||
|
@ -197,38 +173,84 @@ pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync +
|
|||
panic!("Database sent incorrect response!")
|
||||
};
|
||||
|
||||
for tx_v_data in txs_verification_data {
|
||||
let ring_members_for_tx = get_ring_members_for_inputs(
|
||||
|amt, idx| {
|
||||
if let Some(cached_outs) = out_cache {
|
||||
if let Some(out) = cached_outs.get_out(amt, idx) {
|
||||
return Some(out);
|
||||
}
|
||||
}
|
||||
Ok(txs_verification_data
|
||||
.map(move |tx_v_data| {
|
||||
let numb_outputs = |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0);
|
||||
|
||||
outputs.get(&amt)?.get(&idx)
|
||||
},
|
||||
&tx_v_data.tx.prefix.inputs,
|
||||
)
|
||||
.map_err(ConsensusError::Transaction)?;
|
||||
|
||||
let decoy_info = if hf != &HardFork::V1 {
|
||||
// this data is only needed after hard-fork 1.
|
||||
Some(
|
||||
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, &outputs_with_amount, hf)
|
||||
.map_err(ConsensusError::Transaction)?,
|
||||
let ring_members_for_tx = get_ring_members_for_inputs(
|
||||
|amt, idx| outputs.get(&amt)?.get(&idx).copied(),
|
||||
&tx_v_data.tx.prefix.inputs,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
.map_err(ConsensusError::Transaction)?;
|
||||
|
||||
// Temporarily acquirer the mutex lock to add the ring member info.
|
||||
let _ = tx_v_data.rings_member_info.lock().unwrap().insert((
|
||||
TxRingMembersInfo::new(ring_members_for_tx, decoy_info, tx_v_data.version, *hf)
|
||||
.map_err(ConsensusError::Transaction)?,
|
||||
re_org_token.clone(),
|
||||
));
|
||||
}
|
||||
let decoy_info = if hf != &HardFork::V1 {
|
||||
// this data is only needed after hard-fork 1.
|
||||
Some(
|
||||
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, numb_outputs, hf)
|
||||
.map_err(ConsensusError::Transaction)?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(())
|
||||
new_ring_member_info(ring_members_for_tx, decoy_info, tx_v_data.version)
|
||||
.map_err(ConsensusError::Transaction)
|
||||
})
|
||||
.collect::<Result<_, _>>()?)
|
||||
}
|
||||
|
||||
/// Refreshes the transactions [`TxRingMembersInfo`], if needed.
|
||||
///
|
||||
/// # Panics
|
||||
/// This functions panics if `hf == HardFork::V1` as decoy info
|
||||
/// should not be needed for V1.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>(
|
||||
txs_verification_data: &'a [Arc<TransactionVerificationData>],
|
||||
hf: HardFork,
|
||||
mut database: D,
|
||||
) -> Result<impl Iterator<Item = Result<DecoyInfo, ConsensusError>> + 'a, ExtendedConsensusError> {
|
||||
// decoy info is not needed for V1.
|
||||
assert_ne!(hf, HardFork::V1);
|
||||
|
||||
tracing::debug!(
|
||||
"Retrieving decoy info for {} txs.",
|
||||
txs_verification_data.len()
|
||||
);
|
||||
|
||||
// Get all the different input amounts.
|
||||
let unique_input_amounts = txs_verification_data
|
||||
.iter()
|
||||
.flat_map(|tx_info| {
|
||||
tx_info.tx.prefix.inputs.iter().map(|input| match input {
|
||||
Input::ToKey { amount, .. } => amount.unwrap_or(0),
|
||||
_ => 0,
|
||||
})
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
tracing::debug!(
|
||||
"Getting the amount of outputs with certain amounts for {} amounts",
|
||||
unique_input_amounts.len()
|
||||
);
|
||||
|
||||
let BCResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BCReadRequest::NumberOutputsWithAmount(
|
||||
unique_input_amounts.into_iter().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
};
|
||||
|
||||
Ok(txs_verification_data.iter().map(move |tx_v_data| {
|
||||
DecoyInfo::new(
|
||||
&tx_v_data.tx.prefix.inputs,
|
||||
|amt| outputs_with_amount.get(&amt).copied().unwrap_or(0),
|
||||
&hf,
|
||||
)
|
||||
.map_err(ConsensusError::Transaction)
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -1,153 +0,0 @@
|
|||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
iter::once,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, EdwardsPoint, Scalar,
|
||||
};
|
||||
use monero_consensus::{
|
||||
blocks::BlockError,
|
||||
miner_tx::MinerTxError,
|
||||
transactions::{OutputOnChain, TransactionError},
|
||||
ConsensusError,
|
||||
};
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
transaction::{Input, Timelock},
|
||||
H,
|
||||
};
|
||||
use tower::ServiceExt;
|
||||
|
||||
use crate::{
|
||||
transactions::TransactionVerificationData, Database, DatabaseRequest, DatabaseResponse,
|
||||
ExtendedConsensusError,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CachedAmount<'a> {
|
||||
Clear(u64),
|
||||
Commitment(&'a EdwardsPoint),
|
||||
}
|
||||
|
||||
impl<'a> CachedAmount<'a> {
|
||||
fn get_commitment(&self) -> EdwardsPoint {
|
||||
match self {
|
||||
CachedAmount::Commitment(commitment) => **commitment,
|
||||
// TODO: Setup a table with common amounts.
|
||||
CachedAmount::Clear(amt) => ED25519_BASEPOINT_POINT + H() * Scalar::from(*amt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CachedOutput<'a> {
|
||||
height: u64,
|
||||
time_lock: &'a Timelock,
|
||||
key: &'a CompressedEdwardsY,
|
||||
amount: CachedAmount<'a>,
|
||||
|
||||
cached_created: OnceLock<OutputOnChain>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct OutputCache<'a>(HashMap<u64, BTreeMap<u64, CachedOutput<'a>>>);
|
||||
|
||||
impl<'a> OutputCache<'a> {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Self {
|
||||
OutputCache(HashMap::new())
|
||||
}
|
||||
|
||||
pub fn get_out(&self, amt: u64, idx: u64) -> Option<&OutputOnChain> {
|
||||
let cached_out = self.0.get(&amt)?.get(&idx)?;
|
||||
|
||||
Some(cached_out.cached_created.get_or_init(|| OutputOnChain {
|
||||
height: cached_out.height,
|
||||
time_lock: *cached_out.time_lock,
|
||||
key: cached_out.key.decompress(),
|
||||
commitment: cached_out.amount.get_commitment(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn extend_from_block<'b: 'a, D: Database>(
|
||||
&mut self,
|
||||
blocks: impl Iterator<Item = (&'b Block, &'b [Arc<TransactionVerificationData>])> + 'b,
|
||||
database: &mut D,
|
||||
) -> Result<(), ExtendedConsensusError> {
|
||||
let mut idx_needed = HashMap::new();
|
||||
|
||||
for (block, txs) in blocks {
|
||||
for tx in once(&block.miner_tx).chain(txs.iter().map(|tx| &tx.tx)) {
|
||||
let is_rct = tx.prefix.version == 2;
|
||||
let is_miner = matches!(tx.prefix.inputs.as_slice(), &[Input::Gen(_)]);
|
||||
|
||||
for (i, out) in tx.prefix.outputs.iter().enumerate() {
|
||||
let amt = out.amount.unwrap_or(0);
|
||||
// The amt this output will be stored under.
|
||||
let amt_table_key = if is_rct { 0 } else { amt };
|
||||
|
||||
let amount_commitment = match (is_rct, is_miner) {
|
||||
(true, false) => CachedAmount::Commitment(
|
||||
tx.rct_signatures.base.commitments.get(i).ok_or(
|
||||
ConsensusError::Transaction(TransactionError::NonZeroOutputForV2),
|
||||
)?,
|
||||
),
|
||||
_ => CachedAmount::Clear(amt),
|
||||
};
|
||||
let output_to_cache = CachedOutput {
|
||||
height: block.number().ok_or(ConsensusError::Block(
|
||||
BlockError::MinerTxError(MinerTxError::InputNotOfTypeGen),
|
||||
))?,
|
||||
time_lock: &tx.prefix.timelock,
|
||||
key: &out.key,
|
||||
amount: amount_commitment,
|
||||
|
||||
cached_created: OnceLock::new(),
|
||||
};
|
||||
|
||||
let Some(amt_table) = self.0.get_mut(&amt_table_key) else {
|
||||
idx_needed
|
||||
.entry(amt_table_key)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(output_to_cache);
|
||||
continue;
|
||||
};
|
||||
|
||||
let top_idx = *amt_table.last_key_value().unwrap().0;
|
||||
amt_table.insert(top_idx + 1, output_to_cache);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if idx_needed.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let DatabaseResponse::NumberOutputsWithAmount(numb_outs) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::NumberOutputsWithAmount(
|
||||
idx_needed.keys().copied().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
for (amt_table_key, out) in idx_needed {
|
||||
let numb_outs = *numb_outs
|
||||
.get(&amt_table_key)
|
||||
.expect("DB did not return all results!");
|
||||
|
||||
self.0.entry(amt_table_key).or_default().extend(
|
||||
out.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, out)| (u64::try_from(i + numb_outs).unwrap(), out)),
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
144
consensus/tests/verify_correct_txs.rs
Normal file
144
consensus/tests/verify_correct_txs.rs
Normal file
|
@ -0,0 +1,144 @@
|
|||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
future::ready,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY};
|
||||
use monero_serai::transaction::{Timelock, Transaction};
|
||||
use tower::{service_fn, Service, ServiceExt};
|
||||
|
||||
use cuprate_consensus::{
|
||||
TxVerifierService, VerifyTxRequest, VerifyTxResponse, __private::Database,
|
||||
};
|
||||
use cuprate_types::{
|
||||
blockchain::{BCReadRequest, BCResponse},
|
||||
OutputOnChain,
|
||||
};
|
||||
|
||||
use cuprate_consensus_rules::HardFork;
|
||||
|
||||
use cuprate_test_utils::data::TX_E2D393;
|
||||
|
||||
fn dummy_database(outputs: BTreeMap<u64, OutputOnChain>) -> impl Database + Clone {
|
||||
let outputs = Arc::new(outputs);
|
||||
|
||||
service_fn(move |req: BCReadRequest| {
|
||||
ready(Ok(match req {
|
||||
BCReadRequest::NumberOutputsWithAmount(_) => {
|
||||
BCResponse::NumberOutputsWithAmount(HashMap::new())
|
||||
}
|
||||
BCReadRequest::Outputs(outs) => {
|
||||
let idxs = outs.get(&0).unwrap();
|
||||
|
||||
let mut ret = HashMap::new();
|
||||
|
||||
ret.insert(
|
||||
0_u64,
|
||||
idxs.iter()
|
||||
.map(|idx| (*idx, *outputs.get(idx).unwrap()))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
);
|
||||
|
||||
BCResponse::Outputs(ret)
|
||||
}
|
||||
BCReadRequest::KeyImagesSpent(_) => BCResponse::KeyImagesSpent(false),
|
||||
_ => panic!("Database request not needed for this test"),
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
macro_rules! test_verify_valid_v2_tx {
|
||||
(
|
||||
$test_name: ident,
|
||||
$tx: ident,
|
||||
Rings: $([
|
||||
$($idx: literal: ($ring_member: literal, $commitment: literal),)+
|
||||
],)+
|
||||
$hf: ident
|
||||
) => {
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(const_item_mutation)]
|
||||
async fn $test_name() {
|
||||
let members = vec![
|
||||
$($(($idx,
|
||||
OutputOnChain {
|
||||
height: 0,
|
||||
time_lock: Timelock::None,
|
||||
commitment: CompressedEdwardsY::from_slice(&hex_literal::hex!($commitment))
|
||||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap(),
|
||||
key: CompressedEdwardsY::from_slice(&hex_literal::hex!($ring_member))
|
||||
.unwrap()
|
||||
.decompress(),
|
||||
}),)+)+
|
||||
];
|
||||
|
||||
let map = BTreeMap::from_iter(members);
|
||||
let database = dummy_database(map);
|
||||
|
||||
let mut tx_verifier = TxVerifierService::new(database);
|
||||
|
||||
assert!(matches!(tx_verifier.ready().await.unwrap().call(
|
||||
VerifyTxRequest::New {
|
||||
txs: vec![Transaction::read(&mut $tx).unwrap()].into(),
|
||||
current_chain_height: 10,
|
||||
top_hash: [0; 32],
|
||||
hf: HardFork::$hf,
|
||||
time_for_time_lock: u64::MAX
|
||||
}
|
||||
).await.unwrap(), VerifyTxResponse::OkPrepped(_)));
|
||||
|
||||
// Check verification fails if we put random ring members
|
||||
|
||||
let members = vec![
|
||||
$($(($idx,
|
||||
OutputOnChain {
|
||||
height: 0,
|
||||
time_lock: Timelock::None,
|
||||
commitment: ED25519_BASEPOINT_POINT,
|
||||
key: CompressedEdwardsY::from_slice(&hex_literal::hex!($ring_member))
|
||||
.unwrap()
|
||||
.decompress(),
|
||||
}),)+)+
|
||||
];
|
||||
|
||||
let map = BTreeMap::from_iter(members);
|
||||
let database = dummy_database(map);
|
||||
|
||||
let mut tx_verifier = TxVerifierService::new(database);
|
||||
|
||||
assert!(tx_verifier.ready().await.unwrap().call(
|
||||
VerifyTxRequest::New {
|
||||
txs: vec![Transaction::read(&mut $tx).unwrap()].into(),
|
||||
current_chain_height: 10,
|
||||
top_hash: [0; 32],
|
||||
hf: HardFork::$hf,
|
||||
time_for_time_lock: u64::MAX
|
||||
}
|
||||
).await.is_err());
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test_verify_valid_v2_tx! {
|
||||
verify_tx_e2d393,
|
||||
TX_E2D393,
|
||||
Rings: [
|
||||
7567582: ("5fa4f8b160c0877476e78094d0ce4951b20f43088f6e3698fa4d3154069c7c1b", "9a41189729e8cf113cee0b126e22653f3f551227947f54fbbb16ae8d535d757d"),
|
||||
7958047: ("0febe3d139bf3db267c2efdc714ea9b42e437a5aa16e42848a835d009108fcdf", "ecca12345c02c6b0348cfa988a0d86d34e3a89cd8b53dd4ffdb860cee0eda487"),// miner amt: 3551239030364
|
||||
8224417: ("bdd1fb8a725ae15ce37bc8090925126396f87c2972d728814f2d622baa77ebf6", "24624e957c351727deadafda531f7bed433220e72dc85f8aa8d3d32cd7df42e1"),
|
||||
8225772: ("cddef0210ed3113f3362ecb7aa43003c6c3ed4bcac09dc4d9d8d015472c8a3d8", "f61b954879a0f3cc3540f0364ad108fe286162f993f4b435b42038c29d07b8c2"),
|
||||
8234785: ("4edf5a8448e133fcb7914ea161dbb8eb0057e44284d0315839d9fce4cdb063e8", "1cec1e2f88268d6f164f07f79c663bd1af09920a9254164f518faff45dd42138"),
|
||||
8247173: ("cbee0e5fa9c31689b174862a6eb0a164a2d807d2862ac0ad50c0030f0af6c5e7", "f229752b609d923cda89735ed2a42a9af6fc3e3219ac164f17d5eac4f85f391c"),
|
||||
8285361: ("f16dbd9542e7dd575c15e2c9217f5cecb6d134383e5e8416da4affab132f1ff8", "7e31ad658fff150b0ae3a9329e353522ed20dd3ac8df8cd965fa4369164857b4"),
|
||||
8308826: ("4ce2b333cc421237fc96f1a0719d4ac0892f0ff457f3a14f2e499fc045cd4714", "2f7f240e42cbd3a5f02b0b185465263b6a4c6df609dcf928314ea7ddbec3d3dc"),// miner amt: 3408911250482
|
||||
8312407: ("ead8dfb7423f5c3fa7f10663ce885d27d1b7eeb634ac05fd74d3b080440819bf", "236c3fde472978aff92aeb6e752eeb681dfdbb9a84d7e049238f7f544b85062a"),
|
||||
8314321: ("24d3dadeef6b0aff3ee7288cd391823b0020ba3fab42085f66765fc2a164f879", "bffce0393f1fc96e3d83a057208b506c9f7ad52e012e20b228918932c6c8287a"),
|
||||
8315222: ("a8b165589dffa4c31c27fb432cfdd4855b0d04102b79e439720bb80198d5b9c0", "c3febd29c1a3cc397639ff7fdb357d22a900821bef956af626651f2a916cf6f6"),
|
||||
],
|
||||
V9
|
||||
}
|
|
@ -120,6 +120,7 @@ allow = [
|
|||
|
||||
# Font licenses.
|
||||
"Unicode-DFS-2016", # https://spdx.org/licenses/Unicode-DFS-2016.html
|
||||
"Unicode-3.0", # https://spdx.org/licenses/Unicode-3.0.html
|
||||
# "LicenseRef-UFL-1.0", # https://tldrlegal.com/license/ubuntu-font-license,-1.0
|
||||
# "OFL-1.1", # https://spdx.org/licenses/OFL-1.1.html
|
||||
]
|
||||
|
|
|
@ -11,7 +11,7 @@ monero-pruning = { path = "../../pruning" }
|
|||
monero-wire = { path= "../../net/monero-wire" }
|
||||
monero-p2p = { path = "../monero-p2p" }
|
||||
|
||||
tower = { workspace = true, features = ["util", "buffer"] }
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
tokio = { workspace = true, features = ["time", "fs", "rt"]}
|
||||
tokio-util = { workspace = true, features = ["time"] }
|
||||
|
||||
|
|
|
@ -409,6 +409,9 @@ impl<Z: NetworkZone> Service<AddressBookRequest<Z>> for AddressBook<Z> {
|
|||
AddressBookRequest::GetWhitePeers(len) => {
|
||||
Ok(AddressBookResponse::Peers(self.get_white_peers(len)))
|
||||
}
|
||||
AddressBookRequest::IsPeerBanned(addr) => Ok(AddressBookResponse::IsPeerBanned(
|
||||
self.is_peer_banned(&addr),
|
||||
)),
|
||||
};
|
||||
|
||||
ready(response)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
//!
|
||||
//! This module holds the logic for persistent peer storage.
|
||||
//! Cuprates address book is modeled as a [`tower::Service`]
|
||||
//! The request is [`AddressBookRequest`] and the response is
|
||||
//! The request is [`AddressBookRequest`](monero_p2p::services::AddressBookRequest) and the response is
|
||||
//! [`AddressBookResponse`](monero_p2p::services::AddressBookResponse).
|
||||
//!
|
||||
//! Cuprate, like monerod, actually has multiple address books, one
|
||||
|
@ -13,9 +13,7 @@
|
|||
//!
|
||||
use std::{io::ErrorKind, path::PathBuf, time::Duration};
|
||||
|
||||
use tower::buffer::Buffer;
|
||||
|
||||
use monero_p2p::{services::AddressBookRequest, NetworkZone};
|
||||
use monero_p2p::NetworkZone;
|
||||
|
||||
mod book;
|
||||
mod peer_list;
|
||||
|
@ -65,7 +63,7 @@ pub enum AddressBookError {
|
|||
/// Initializes the P2P address book for a specific network zone.
|
||||
pub async fn init_address_book<Z: NetworkZone>(
|
||||
cfg: AddressBookConfig,
|
||||
) -> Result<Buffer<book::AddressBook<Z>, AddressBookRequest<Z>>, std::io::Error> {
|
||||
) -> Result<book::AddressBook<Z>, std::io::Error> {
|
||||
tracing::info!(
|
||||
"Loading peers from file: {} ",
|
||||
cfg.peer_store_file.display()
|
||||
|
@ -82,5 +80,5 @@ pub async fn init_address_book<Z: NetworkZone>(
|
|||
|
||||
let address_book = book::AddressBook::<Z>::new(cfg, white_list, gray_list, Vec::new());
|
||||
|
||||
Ok(Buffer::new(address_book, 150))
|
||||
Ok(address_book)
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ pub fn new_buffer<T>(max_item_weight: usize) -> (BufferAppender<T>, BufferStream
|
|||
queue: tx,
|
||||
sink_waker: sink_waker.clone(),
|
||||
capacity: capacity_atomic.clone(),
|
||||
max_item_weight: capacity,
|
||||
max_item_weight,
|
||||
},
|
||||
BufferStream {
|
||||
queue: rx,
|
||||
|
|
|
@ -11,12 +11,13 @@ monero-wire = { path = "../../net/monero-wire" }
|
|||
monero-p2p = { path = "../monero-p2p", features = ["borsh"] }
|
||||
monero-address-book = { path = "../address-book" }
|
||||
monero-pruning = { path = "../../pruning" }
|
||||
cuprate-helper = { path = "../../helper", features = ["asynch"] }
|
||||
cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false }
|
||||
async-buffer = { path = "../async-buffer" }
|
||||
|
||||
monero-serai = { workspace = true, features = ["std"] }
|
||||
|
||||
tower = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt"] }
|
||||
tower = { workspace = true, features = ["buffer"] }
|
||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||
rayon = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-stream = { workspace = true, features = ["sync", "time"] }
|
||||
|
@ -26,7 +27,6 @@ dashmap = { workspace = true }
|
|||
|
||||
thiserror = { workspace = true }
|
||||
bytes = { workspace = true, features = ["std"] }
|
||||
indexmap = { workspace = true, features = ["std"] }
|
||||
rand = { workspace = true, features = ["std", "std_rng"] }
|
||||
rand_distr = { workspace = true, features = ["std"] }
|
||||
hex = { workspace = true, features = ["std"] }
|
||||
|
@ -34,3 +34,5 @@ tracing = { workspace = true, features = ["std", "attributes"] }
|
|||
|
||||
[dev-dependencies]
|
||||
cuprate-test-utils = { path = "../../test-utils" }
|
||||
indexmap = { workspace = true }
|
||||
proptest = { workspace = true }
|
||||
|
|
1181
p2p/cuprate-p2p/src/block_downloader.rs
Normal file
1181
p2p/cuprate-p2p/src/block_downloader.rs
Normal file
File diff suppressed because it is too large
Load diff
190
p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs
Normal file
190
p2p/cuprate-p2p/src/block_downloader/chain_tracker.rs
Normal file
|
@ -0,0 +1,190 @@
|
|||
use std::{cmp::min, collections::VecDeque};
|
||||
|
||||
use fixed_bytes::ByteArrayVec;
|
||||
|
||||
use monero_p2p::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone};
|
||||
use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT};
|
||||
|
||||
use crate::constants::MEDIUM_BAN;
|
||||
|
||||
/// A new chain entry to add to our chain tracker.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ChainEntry<N: NetworkZone> {
|
||||
/// A list of block IDs.
|
||||
pub ids: Vec<[u8; 32]>,
|
||||
/// The peer who told us about this chain entry.
|
||||
pub peer: InternalPeerID<N::Addr>,
|
||||
/// The peer who told us about this chain entry's handle
|
||||
pub handle: ConnectionHandle,
|
||||
}
|
||||
|
||||
/// A batch of blocks to retrieve.
|
||||
#[derive(Clone)]
|
||||
pub struct BlocksToRetrieve<N: NetworkZone> {
|
||||
/// The block IDs to get.
|
||||
pub ids: ByteArrayVec<32>,
|
||||
/// The expected height of the first block in `ids`.
|
||||
pub start_height: u64,
|
||||
/// The peer who told us about this batch.
|
||||
pub peer_who_told_us: InternalPeerID<N::Addr>,
|
||||
/// The peer who told us about this batch's handle.
|
||||
pub peer_who_told_us_handle: ConnectionHandle,
|
||||
/// The number of requests sent for this batch.
|
||||
pub requests_sent: usize,
|
||||
/// The number of times this batch has been requested from a peer and failed.
|
||||
pub failures: usize,
|
||||
}
|
||||
|
||||
pub enum ChainTrackerError {
|
||||
NewEntryIsInvalid,
|
||||
NewEntryDoesNotFollowChain,
|
||||
}
|
||||
|
||||
/// # Chain Tracker
|
||||
///
|
||||
/// This struct allows following a single chain. It takes in [`ChainEntry`]s and
|
||||
/// allows getting [`BlocksToRetrieve`].
|
||||
pub struct ChainTracker<N: NetworkZone> {
|
||||
/// A list of [`ChainEntry`]s, in order.
|
||||
entries: VecDeque<ChainEntry<N>>,
|
||||
/// The height of the first block, in the first entry in entries.
|
||||
first_height: u64,
|
||||
/// The hash of the last block in the last entry.
|
||||
top_seen_hash: [u8; 32],
|
||||
/// The hash of the genesis block.
|
||||
our_genesis: [u8; 32],
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> ChainTracker<N> {
|
||||
/// Creates a new chain tracker.
|
||||
pub fn new(new_entry: ChainEntry<N>, first_height: u64, our_genesis: [u8; 32]) -> Self {
|
||||
let top_seen_hash = *new_entry.ids.last().unwrap();
|
||||
let mut entries = VecDeque::with_capacity(1);
|
||||
entries.push_back(new_entry);
|
||||
|
||||
Self {
|
||||
top_seen_hash,
|
||||
entries,
|
||||
first_height,
|
||||
our_genesis,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the peer is expected to have the next block after our highest seen block
|
||||
/// according to their pruning seed.
|
||||
pub fn should_ask_for_next_chain_entry(&self, seed: &PruningSeed) -> bool {
|
||||
seed.has_full_block(self.top_height(), CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
||||
}
|
||||
|
||||
/// Returns the simple history, the highest seen block and the genesis block.
|
||||
pub fn get_simple_history(&self) -> [[u8; 32]; 2] {
|
||||
[self.top_seen_hash, self.our_genesis]
|
||||
}
|
||||
|
||||
/// Returns the height of the highest block we are tracking.
|
||||
pub fn top_height(&self) -> u64 {
|
||||
let top_block_idx = self
|
||||
.entries
|
||||
.iter()
|
||||
.map(|entry| entry.ids.len())
|
||||
.sum::<usize>();
|
||||
|
||||
self.first_height + u64::try_from(top_block_idx).unwrap()
|
||||
}
|
||||
|
||||
/// Returns the total number of queued batches for a certain `batch_size`.
|
||||
pub fn block_requests_queued(&self, batch_size: usize) -> usize {
|
||||
self.entries
|
||||
.iter()
|
||||
.map(|entry| entry.ids.len().div_ceil(batch_size))
|
||||
.sum()
|
||||
}
|
||||
|
||||
/// Attempts to add an incoming [`ChainEntry`] to the chain tracker.
|
||||
pub fn add_entry(&mut self, mut chain_entry: ChainEntry<N>) -> Result<(), ChainTrackerError> {
|
||||
if chain_entry.ids.is_empty() {
|
||||
// The peer must send at lest one overlapping block.
|
||||
chain_entry.handle.ban_peer(MEDIUM_BAN);
|
||||
return Err(ChainTrackerError::NewEntryIsInvalid);
|
||||
}
|
||||
|
||||
if chain_entry.ids.len() == 1 {
|
||||
return Err(ChainTrackerError::NewEntryDoesNotFollowChain);
|
||||
}
|
||||
|
||||
if self
|
||||
.entries
|
||||
.back()
|
||||
.is_some_and(|last_entry| last_entry.ids.last().unwrap() != &chain_entry.ids[0])
|
||||
{
|
||||
return Err(ChainTrackerError::NewEntryDoesNotFollowChain);
|
||||
}
|
||||
|
||||
let new_entry = ChainEntry {
|
||||
// ignore the first block - we already know it.
|
||||
ids: chain_entry.ids.split_off(1),
|
||||
peer: chain_entry.peer,
|
||||
handle: chain_entry.handle,
|
||||
};
|
||||
|
||||
self.top_seen_hash = *new_entry.ids.last().unwrap();
|
||||
|
||||
self.entries.push_back(new_entry);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a batch of blocks to request.
|
||||
///
|
||||
/// The returned batches length will be less than or equal to `max_blocks`
|
||||
pub fn blocks_to_get(
|
||||
&mut self,
|
||||
pruning_seed: &PruningSeed,
|
||||
max_blocks: usize,
|
||||
) -> Option<BlocksToRetrieve<N>> {
|
||||
if !pruning_seed.has_full_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let entry = self.entries.front_mut()?;
|
||||
|
||||
// Calculate the ending index for us to get in this batch, will be the smallest out of `max_blocks`, the length of the batch or
|
||||
// the index of the next pruned block for this seed.
|
||||
let end_idx = min(
|
||||
min(entry.ids.len(), max_blocks),
|
||||
usize::try_from(
|
||||
pruning_seed
|
||||
.get_next_pruned_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
||||
// We check the first height is less than CRYPTONOTE_MAX_BLOCK_HEIGHT in response task.
|
||||
.unwrap()
|
||||
// Use a big value as a fallback if the seed does no pruning.
|
||||
.unwrap_or(CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
||||
- self.first_height,
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
if end_idx == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let ids_to_get = entry.ids.drain(0..end_idx).collect::<Vec<_>>();
|
||||
|
||||
let blocks = BlocksToRetrieve {
|
||||
ids: ids_to_get.into(),
|
||||
start_height: self.first_height,
|
||||
peer_who_told_us: entry.peer,
|
||||
peer_who_told_us_handle: entry.handle.clone(),
|
||||
requests_sent: 0,
|
||||
failures: 0,
|
||||
};
|
||||
|
||||
self.first_height += u64::try_from(end_idx).unwrap();
|
||||
|
||||
if entry.ids.is_empty() {
|
||||
self.entries.pop_front();
|
||||
}
|
||||
|
||||
Some(blocks)
|
||||
}
|
||||
}
|
323
p2p/cuprate-p2p/src/block_downloader/tests.rs
Normal file
323
p2p/cuprate-p2p/src/block_downloader/tests.rs
Normal file
|
@ -0,0 +1,323 @@
|
|||
use std::{
|
||||
fmt::{Debug, Formatter},
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use indexmap::IndexMap;
|
||||
use monero_serai::{
|
||||
block::{Block, BlockHeader},
|
||||
ringct::{RctBase, RctPrunable, RctSignatures},
|
||||
transaction::{Input, Timelock, Transaction, TransactionPrefix},
|
||||
};
|
||||
use proptest::{collection::vec, prelude::*};
|
||||
use tokio::{sync::Semaphore, time::timeout};
|
||||
use tower::{service_fn, Service};
|
||||
|
||||
use fixed_bytes::ByteArrayVec;
|
||||
use monero_p2p::{
|
||||
client::{mock_client, Client, InternalPeerID, PeerInformation},
|
||||
network_zones::ClearNet,
|
||||
services::{PeerSyncRequest, PeerSyncResponse},
|
||||
ConnectionDirection, NetworkZone, PeerRequest, PeerResponse,
|
||||
};
|
||||
use monero_pruning::PruningSeed;
|
||||
use monero_wire::{
|
||||
common::{BlockCompleteEntry, TransactionBlobs},
|
||||
protocol::{ChainResponse, GetObjectsResponse},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
block_downloader::{download_blocks, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse},
|
||||
client_pool::ClientPool,
|
||||
};
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig {
|
||||
cases: 4,
|
||||
max_shrink_iters: 10,
|
||||
timeout: 60 * 1000,
|
||||
.. ProptestConfig::default()
|
||||
})]
|
||||
|
||||
#[test]
|
||||
fn test_block_downloader(blockchain in dummy_blockchain_stragtegy(), peers in 1_usize..128) {
|
||||
let blockchain = Arc::new(blockchain);
|
||||
|
||||
let tokio_pool = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap();
|
||||
|
||||
tokio_pool.block_on(async move {
|
||||
timeout(Duration::from_secs(600), async move {
|
||||
let client_pool = ClientPool::new();
|
||||
|
||||
let mut peer_ids = Vec::with_capacity(peers);
|
||||
|
||||
for _ in 0..peers {
|
||||
let client = mock_block_downloader_client(blockchain.clone());
|
||||
|
||||
peer_ids.push(client.info.id);
|
||||
|
||||
client_pool.add_new_client(client);
|
||||
}
|
||||
|
||||
let stream = download_blocks(
|
||||
client_pool,
|
||||
SyncStateSvc(peer_ids) ,
|
||||
OurChainSvc {
|
||||
genesis: *blockchain.blocks.first().unwrap().0
|
||||
},
|
||||
BlockDownloaderConfig {
|
||||
buffer_size: 1_000,
|
||||
in_progress_queue_size: 10_000,
|
||||
check_client_pool_interval: Duration::from_secs(5),
|
||||
target_batch_size: 5_000,
|
||||
initial_batch_size: 1,
|
||||
});
|
||||
|
||||
let blocks = stream.map(|blocks| blocks.blocks).concat().await;
|
||||
|
||||
assert_eq!(blocks.len() + 1, blockchain.blocks.len());
|
||||
|
||||
for (i, block) in blocks.into_iter().enumerate() {
|
||||
assert_eq!(&block, blockchain.blocks.get_index(i + 1).unwrap().1);
|
||||
}
|
||||
}).await
|
||||
}).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a strategy to generate a [`Transaction`] that is valid for the block downloader.
|
||||
fn dummy_transaction_stragtegy(height: u64)
|
||||
(
|
||||
extra in vec(any::<u8>(), 0..1_000),
|
||||
timelock in 0_usize..50_000_000,
|
||||
)
|
||||
-> Transaction {
|
||||
Transaction {
|
||||
prefix: TransactionPrefix {
|
||||
version: 1,
|
||||
timelock: Timelock::Block(timelock),
|
||||
inputs: vec![Input::Gen(height)],
|
||||
outputs: vec![],
|
||||
extra,
|
||||
},
|
||||
signatures: vec![],
|
||||
rct_signatures: RctSignatures {
|
||||
base: RctBase {
|
||||
fee: 0,
|
||||
pseudo_outs: vec![],
|
||||
encrypted_amounts: vec![],
|
||||
commitments: vec![],
|
||||
},
|
||||
prunable: RctPrunable::Null
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a strategy to generate a [`Block`] that is valid for the block downloader.
|
||||
fn dummy_block_stragtegy(
|
||||
height: u64,
|
||||
previous: [u8; 32],
|
||||
)
|
||||
(
|
||||
miner_tx in dummy_transaction_stragtegy(height),
|
||||
txs in vec(dummy_transaction_stragtegy(height), 0..25)
|
||||
)
|
||||
-> (Block, Vec<Transaction>) {
|
||||
(
|
||||
Block {
|
||||
header: BlockHeader {
|
||||
major_version: 0,
|
||||
minor_version: 0,
|
||||
timestamp: 0,
|
||||
previous,
|
||||
nonce: 0,
|
||||
},
|
||||
miner_tx,
|
||||
txs: txs.iter().map(Transaction::hash).collect(),
|
||||
},
|
||||
txs
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A mock blockchain.
|
||||
struct MockBlockchain {
|
||||
blocks: IndexMap<[u8; 32], (Block, Vec<Transaction>)>,
|
||||
}
|
||||
|
||||
impl Debug for MockBlockchain {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str("MockBlockchain")
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a strategy to generate a [`MockBlockchain`].
|
||||
fn dummy_blockchain_stragtegy()(
|
||||
blocks in vec(dummy_block_stragtegy(0, [0; 32]), 1..50_000),
|
||||
) -> MockBlockchain {
|
||||
let mut blockchain = IndexMap::new();
|
||||
|
||||
for (height, mut block) in blocks.into_iter().enumerate() {
|
||||
if let Some(last) = blockchain.last() {
|
||||
block.0.header.previous = *last.0;
|
||||
block.0.miner_tx.prefix.inputs = vec![Input::Gen(height as u64)]
|
||||
}
|
||||
|
||||
blockchain.insert(block.0.hash(), block);
|
||||
}
|
||||
|
||||
MockBlockchain {
|
||||
blocks: blockchain
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn mock_block_downloader_client(blockchain: Arc<MockBlockchain>) -> Client<ClearNet> {
|
||||
let semaphore = Arc::new(Semaphore::new(1));
|
||||
|
||||
let (connection_guard, connection_handle) = monero_p2p::handles::HandleBuilder::new()
|
||||
.with_permit(semaphore.try_acquire_owned().unwrap())
|
||||
.build();
|
||||
|
||||
let request_handler = service_fn(move |req: PeerRequest| {
|
||||
let bc = blockchain.clone();
|
||||
|
||||
async move {
|
||||
match req {
|
||||
PeerRequest::GetChain(chain_req) => {
|
||||
let mut i = 0;
|
||||
while !bc.blocks.contains_key(&chain_req.block_ids[i]) {
|
||||
i += 1;
|
||||
|
||||
if i == chain_req.block_ids.len() {
|
||||
i -= 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let block_index = bc.blocks.get_index_of(&chain_req.block_ids[i]).unwrap();
|
||||
|
||||
let block_ids = bc
|
||||
.blocks
|
||||
.get_range(block_index..)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|(id, _)| *id)
|
||||
.take(200)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(PeerResponse::GetChain(ChainResponse {
|
||||
start_height: 0,
|
||||
total_height: 0,
|
||||
cumulative_difficulty_low64: 1,
|
||||
cumulative_difficulty_top64: 0,
|
||||
m_block_ids: block_ids.into(),
|
||||
m_block_weights: vec![],
|
||||
first_block: Default::default(),
|
||||
}))
|
||||
}
|
||||
|
||||
PeerRequest::GetObjects(obj) => {
|
||||
let mut res = Vec::with_capacity(obj.blocks.len());
|
||||
|
||||
for i in 0..obj.blocks.len() {
|
||||
let block = bc.blocks.get(&obj.blocks[i]).unwrap();
|
||||
|
||||
let block_entry = BlockCompleteEntry {
|
||||
pruned: false,
|
||||
block: block.0.serialize().into(),
|
||||
txs: TransactionBlobs::Normal(
|
||||
block
|
||||
.1
|
||||
.iter()
|
||||
.map(Transaction::serialize)
|
||||
.map(Into::into)
|
||||
.collect(),
|
||||
),
|
||||
block_weight: 0,
|
||||
};
|
||||
|
||||
res.push(block_entry);
|
||||
}
|
||||
|
||||
Ok(PeerResponse::GetObjects(GetObjectsResponse {
|
||||
blocks: res,
|
||||
missed_ids: ByteArrayVec::from([]),
|
||||
current_blockchain_height: 0,
|
||||
}))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
});
|
||||
|
||||
let info = PeerInformation {
|
||||
id: InternalPeerID::Unknown(rand::random()),
|
||||
handle: connection_handle,
|
||||
direction: ConnectionDirection::InBound,
|
||||
pruning_seed: PruningSeed::NotPruned,
|
||||
};
|
||||
|
||||
mock_client(info, connection_guard, request_handler)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SyncStateSvc<Z: NetworkZone>(Vec<InternalPeerID<Z::Addr>>);
|
||||
|
||||
impl Service<PeerSyncRequest<ClearNet>> for SyncStateSvc<ClearNet> {
|
||||
type Response = PeerSyncResponse<ClearNet>;
|
||||
type Error = tower::BoxError;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, _: PeerSyncRequest<ClearNet>) -> Self::Future {
|
||||
let peers = self.0.clone();
|
||||
|
||||
async move { Ok(PeerSyncResponse::PeersToSyncFrom(peers)) }.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
struct OurChainSvc {
|
||||
genesis: [u8; 32],
|
||||
}
|
||||
|
||||
impl Service<ChainSvcRequest> for OurChainSvc {
|
||||
type Response = ChainSvcResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: ChainSvcRequest) -> Self::Future {
|
||||
let genesis = self.genesis;
|
||||
|
||||
async move {
|
||||
Ok(match req {
|
||||
ChainSvcRequest::CompactHistory => ChainSvcResponse::CompactHistory {
|
||||
block_ids: vec![genesis],
|
||||
cumulative_difficulty: 1,
|
||||
},
|
||||
ChainSvcRequest::FindFirstUnknown(_) => ChainSvcResponse::FindFirstUnknown(1, 1),
|
||||
ChainSvcRequest::CumulativeDifficulty => ChainSvcResponse::CumulativeDifficulty(1),
|
||||
})
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
|
@ -151,6 +151,7 @@ pub enum BroadcastRequest<N: NetworkZone> {
|
|||
},
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BroadcastSvc<N: NetworkZone> {
|
||||
new_block_watch: watch::Sender<NewBlockInfo>,
|
||||
tx_broadcast_channel_outbound: broadcast::Sender<BroadcastTxInfo<N>>,
|
||||
|
|
|
@ -12,13 +12,14 @@
|
|||
//!
|
||||
use std::sync::Arc;
|
||||
|
||||
use dashmap::{DashMap, DashSet};
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{Instrument, Span};
|
||||
|
||||
use monero_p2p::{
|
||||
client::{Client, InternalPeerID},
|
||||
handles::ConnectionHandle,
|
||||
ConnectionDirection, NetworkZone,
|
||||
NetworkZone,
|
||||
};
|
||||
|
||||
pub(crate) mod disconnect_monitor;
|
||||
|
@ -32,12 +33,6 @@ pub use drop_guard_client::ClientPoolDropGuard;
|
|||
pub struct ClientPool<N: NetworkZone> {
|
||||
/// The connected [`Client`]s.
|
||||
clients: DashMap<InternalPeerID<N::Addr>, Client<N>>,
|
||||
/// A set of outbound clients, as these allow accesses/mutation from different threads,
|
||||
/// a peer ID in here does not mean the peer is necessarily in `clients` as it could have been removed
|
||||
/// by another thread. However, if the peer is in both here and `clients` it is definitely
|
||||
/// an outbound peer.
|
||||
outbound_clients: DashSet<InternalPeerID<N::Addr>>,
|
||||
|
||||
/// A channel to send new peer ids down to monitor for disconnect.
|
||||
new_connection_tx: mpsc::UnboundedSender<(ConnectionHandle, InternalPeerID<N::Addr>)>,
|
||||
}
|
||||
|
@ -49,11 +44,12 @@ impl<N: NetworkZone> ClientPool<N> {
|
|||
|
||||
let pool = Arc::new(ClientPool {
|
||||
clients: DashMap::new(),
|
||||
outbound_clients: DashSet::new(),
|
||||
new_connection_tx: tx,
|
||||
});
|
||||
|
||||
tokio::spawn(disconnect_monitor::disconnect_monitor(rx, pool.clone()));
|
||||
tokio::spawn(
|
||||
disconnect_monitor::disconnect_monitor(rx, pool.clone()).instrument(Span::current()),
|
||||
);
|
||||
|
||||
pool
|
||||
}
|
||||
|
@ -74,10 +70,6 @@ impl<N: NetworkZone> ClientPool<N> {
|
|||
return;
|
||||
}
|
||||
|
||||
if client.info.direction == ConnectionDirection::OutBound {
|
||||
self.outbound_clients.insert(id);
|
||||
}
|
||||
|
||||
let res = self.clients.insert(id, client);
|
||||
assert!(res.is_none());
|
||||
|
||||
|
@ -106,8 +98,6 @@ impl<N: NetworkZone> ClientPool<N> {
|
|||
///
|
||||
/// [`None`] is returned if the client did not exist in the pool.
|
||||
fn remove_client(&self, peer: &InternalPeerID<N::Addr>) -> Option<Client<N>> {
|
||||
self.outbound_clients.remove(peer);
|
||||
|
||||
self.clients.remove(peer).map(|(_, client)| client)
|
||||
}
|
||||
|
||||
|
@ -136,13 +126,16 @@ impl<N: NetworkZone> ClientPool<N> {
|
|||
pub fn borrow_clients<'a, 'b>(
|
||||
self: &'a Arc<Self>,
|
||||
peers: &'b [InternalPeerID<N::Addr>],
|
||||
) -> impl Iterator<Item = ClientPoolDropGuard<N>> + Captures<(&'a (), &'b ())> {
|
||||
) -> impl Iterator<Item = ClientPoolDropGuard<N>> + sealed::Captures<(&'a (), &'b ())> {
|
||||
peers.iter().filter_map(|peer| self.borrow_client(peer))
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: Remove me when 2024 Rust
|
||||
///
|
||||
/// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick
|
||||
trait Captures<U> {}
|
||||
impl<T: ?Sized, U> Captures<U> for T {}
|
||||
mod sealed {
|
||||
/// TODO: Remove me when 2024 Rust
|
||||
///
|
||||
/// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick
|
||||
pub trait Captures<U> {}
|
||||
|
||||
impl<T: ?Sized, U> Captures<U> for T {}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,11 @@ pub async fn disconnect_monitor<N: NetworkZone>(
|
|||
mut new_connection_rx: mpsc::UnboundedReceiver<(ConnectionHandle, InternalPeerID<N::Addr>)>,
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
) {
|
||||
// We need to hold a weak reference otherwise the client pool and this would hold a reference to
|
||||
// each other causing the pool to be leaked.
|
||||
let weak_client_pool = Arc::downgrade(&client_pool);
|
||||
drop(client_pool);
|
||||
|
||||
tracing::info!("Starting peer disconnect monitor.");
|
||||
|
||||
let mut futs: FuturesUnordered<PeerDisconnectFut<N>> = FuturesUnordered::new();
|
||||
|
@ -39,7 +44,13 @@ pub async fn disconnect_monitor<N: NetworkZone>(
|
|||
}
|
||||
Some(peer_id) = futs.next() => {
|
||||
tracing::debug!("{peer_id} has disconnected, removing from client pool.");
|
||||
client_pool.remove_client(&peer_id);
|
||||
let Some(pool) = weak_client_pool.upgrade() else {
|
||||
tracing::info!("Peer disconnect monitor shutting down.");
|
||||
return;
|
||||
};
|
||||
|
||||
pool.remove_client(&peer_id);
|
||||
drop(pool);
|
||||
}
|
||||
else => {
|
||||
tracing::info!("Peer disconnect monitor shutting down.");
|
||||
|
|
|
@ -14,7 +14,7 @@ pub struct ClientPoolDropGuard<N: NetworkZone> {
|
|||
/// The [`Client`].
|
||||
///
|
||||
/// This is set to [`Some`] when this guard is created, then
|
||||
/// ### [`take`](Option::take)n and returned to the pool when dropped.
|
||||
/// [`take`](Option::take)n and returned to the pool when dropped.
|
||||
pub(super) client: Option<Client<N>>,
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,10 @@ impl<N: NetworkZone> Drop for ClientPoolDropGuard<N> {
|
|||
fn drop(&mut self) {
|
||||
let client = self.client.take().unwrap();
|
||||
|
||||
if !client.info.handle.is_closed() {
|
||||
tracing::warn!("peer dropped");
|
||||
}
|
||||
|
||||
self.pool.add_client(client);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,52 @@
|
|||
use cuprate_helper::network::Network;
|
||||
use monero_address_book::AddressBookConfig;
|
||||
use monero_p2p::NetworkZone;
|
||||
use monero_wire::{common::PeerSupportFlags, BasicNodeData};
|
||||
|
||||
/// P2P config.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct P2PConfig {
|
||||
pub struct P2PConfig<N: NetworkZone> {
|
||||
/// The [`Network`] we should connect to.
|
||||
pub network: Network,
|
||||
|
||||
/// The number of outbound connections to make and try keep.
|
||||
pub outbound_connections: usize,
|
||||
/// The amount of extra connections we can make if we are under load from the rest of Cuprate.
|
||||
pub extra_outbound_connections: usize,
|
||||
/// The maximum amount of inbound connections, only relevant if [`P2PConfig::server_config`] is set to [`Some`]
|
||||
pub max_inbound_connections: usize,
|
||||
/// The percent of outbound peers that should be gray aka never connected to before.
|
||||
///
|
||||
/// Only values 0..=1 are valid.
|
||||
pub gray_peers_percent: f64,
|
||||
/// The inbound server configuration,
|
||||
///
|
||||
/// If this is [`None`] no inbound connections will be accepted.
|
||||
pub server_config: Option<N::ServerCfg>,
|
||||
|
||||
/// The port to listen on for inbound connections, only relevant if [`P2PConfig::server_config`] is set to [`Some`].
|
||||
pub p2p_port: u16,
|
||||
/// The public RPC port to tell peers about so wallets can use our node. `0` if we do not have a public RPC port.
|
||||
pub rpc_port: u16,
|
||||
|
||||
/// The [`AddressBookConfig`].
|
||||
pub address_book_config: AddressBookConfig,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> P2PConfig<N> {
|
||||
/// Returns the [`BasicNodeData`] for this [`P2PConfig`].
|
||||
///
|
||||
/// [`BasicNodeData::peer_id`] is set to a random u64, so this function should only be called once
|
||||
/// per [`NetworkZone`] per run.
|
||||
pub(crate) fn basic_node_data(&self) -> BasicNodeData {
|
||||
BasicNodeData {
|
||||
my_port: u32::from(self.p2p_port),
|
||||
network_id: self.network.network_id(),
|
||||
peer_id: rand::random(),
|
||||
support_flags: PeerSupportFlags::FLUFFY_BLOCKS,
|
||||
rpc_port: self.rpc_port,
|
||||
// We do not (and probably will never) support paying for RPC with hashes.
|
||||
rpc_credits_per_hash: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ use tokio::{
|
|||
time::{sleep, timeout},
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::instrument;
|
||||
use tracing::{instrument, Instrument, Span};
|
||||
|
||||
use monero_p2p::{
|
||||
client::{Client, ConnectRequest, HandshakeError},
|
||||
|
@ -60,7 +60,7 @@ pub struct OutboundConnectionKeeper<N: NetworkZone, A, C> {
|
|||
/// we add a permit to the semaphore and keep track here, upto a value in config.
|
||||
pub extra_peers: usize,
|
||||
/// The p2p config.
|
||||
pub config: P2PConfig,
|
||||
pub config: P2PConfig<N>,
|
||||
/// The [`Bernoulli`] distribution, when sampled will return true if we should connect to a gray peer or
|
||||
/// false if we should connect to a white peer.
|
||||
///
|
||||
|
@ -76,7 +76,7 @@ where
|
|||
C::Future: Send + 'static,
|
||||
{
|
||||
pub fn new(
|
||||
config: P2PConfig,
|
||||
config: P2PConfig<N>,
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
|
||||
address_book_svc: A,
|
||||
|
@ -149,7 +149,7 @@ where
|
|||
}
|
||||
|
||||
/// Connects to a given outbound peer.
|
||||
#[instrument(level = "info", skip(self, permit), fields(%addr))]
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) {
|
||||
let client_pool = self.client_pool.clone();
|
||||
let connection_fut = self
|
||||
|
@ -159,11 +159,14 @@ where
|
|||
.expect("Connector had an error in `poll_ready`")
|
||||
.call(ConnectRequest { addr, permit });
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await {
|
||||
client_pool.add_new_client(peer);
|
||||
tokio::spawn(
|
||||
async move {
|
||||
if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await {
|
||||
client_pool.add_new_client(peer);
|
||||
}
|
||||
}
|
||||
});
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
}
|
||||
|
||||
/// Handles a request from the peer set for more peers.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::time::Duration;
|
||||
|
||||
/// The timeout we set on handshakes.
|
||||
pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(20);
|
||||
|
||||
/// The maximum amount of connections to make to seed nodes for when we need peers.
|
||||
pub(crate) const MAX_SEED_CONNECTIONS: usize = 3;
|
||||
|
@ -12,6 +12,12 @@ pub(crate) const OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_
|
|||
/// The durations of a short ban.
|
||||
pub(crate) const SHORT_BAN: Duration = Duration::from_secs(60 * 10);
|
||||
|
||||
/// The durations of a medium ban.
|
||||
pub(crate) const MEDIUM_BAN: Duration = Duration::from_secs(60 * 60 * 24);
|
||||
|
||||
/// The durations of a long ban.
|
||||
pub(crate) const LONG_BAN: Duration = Duration::from_secs(60 * 60 * 24 * 7);
|
||||
|
||||
/// The default amount of time between inbound diffusion flushes.
|
||||
pub(crate) const DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND: Duration = Duration::from_secs(5);
|
||||
|
||||
|
@ -28,6 +34,41 @@ pub(crate) const SOFT_TX_MESSAGE_SIZE_SIZE_LIMIT: usize = 10 * 1024 * 1024;
|
|||
/// 50 more transactions after it are added to the queue.
|
||||
pub(crate) const MAX_TXS_IN_BROADCAST_CHANNEL: usize = 50;
|
||||
|
||||
/// The time to sleep after an inbound connection comes in.
|
||||
///
|
||||
/// This is a safety measure to prevent Cuprate from getting spammed with a load of inbound connections.
|
||||
/// TODO: it might be a good idea to make this configurable.
|
||||
pub(crate) const INBOUND_CONNECTION_COOL_DOWN: Duration = Duration::from_millis(500);
|
||||
|
||||
/// The initial amount of chain requests to send to find the best chain to sync from.
|
||||
pub(crate) const INITIAL_CHAIN_REQUESTS_TO_SEND: usize = 3;
|
||||
|
||||
/// The enforced maximum amount of blocks to request in a batch.
|
||||
///
|
||||
/// Requesting more than this will cause the peer to disconnect and potentially lead to bans.
|
||||
pub(crate) const MAX_BLOCK_BATCH_LEN: usize = 100;
|
||||
|
||||
/// The timeout that the block downloader will use for requests.
|
||||
pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
|
||||
/// The maximum size of a transaction, a sanity limit that all transactions across all hard-forks must
|
||||
/// be less than.
|
||||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size>
|
||||
pub(crate) const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000;
|
||||
|
||||
/// The maximum amount of block IDS allowed in a chain entry response.
|
||||
///
|
||||
/// ref: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/cryptonote_config.h#L97>
|
||||
// TODO: link to the protocol book when this section is added.
|
||||
pub(crate) const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000;
|
||||
|
||||
/// The amount of failures downloading a specific batch before we stop attempting to download it.
|
||||
pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5;
|
||||
|
||||
/// The amount of empty chain entries to receive before we assume we have found the top of the chain.
|
||||
pub(crate) const EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED: usize = 5;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
113
p2p/cuprate-p2p/src/inbound_server.rs
Normal file
113
p2p/cuprate-p2p/src/inbound_server.rs
Normal file
|
@ -0,0 +1,113 @@
|
|||
//! # Inbound Server
|
||||
//!
|
||||
//! This module contains the inbound connection server, which listens for inbound connections, gives
|
||||
//! them to the handshaker service and then adds them to the client pool.
|
||||
use std::{pin::pin, sync::Arc};
|
||||
|
||||
use futures::StreamExt;
|
||||
use tokio::{
|
||||
sync::Semaphore,
|
||||
time::{sleep, timeout},
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::{instrument, Instrument, Span};
|
||||
|
||||
use monero_p2p::{
|
||||
client::{Client, DoHandshakeRequest, HandshakeError, InternalPeerID},
|
||||
services::{AddressBookRequest, AddressBookResponse},
|
||||
AddressBook, ConnectionDirection, NetworkZone,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
client_pool::ClientPool,
|
||||
constants::{HANDSHAKE_TIMEOUT, INBOUND_CONNECTION_COOL_DOWN},
|
||||
P2PConfig,
|
||||
};
|
||||
|
||||
/// Starts the inbound server.
|
||||
#[instrument(level = "warn", skip_all)]
|
||||
pub async fn inbound_server<N, HS, A>(
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
mut handshaker: HS,
|
||||
mut address_book: A,
|
||||
config: P2PConfig<N>,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
N: NetworkZone,
|
||||
HS: Service<DoHandshakeRequest<N>, Response = Client<N>, Error = HandshakeError>
|
||||
+ Send
|
||||
+ 'static,
|
||||
HS::Future: Send + 'static,
|
||||
A: AddressBook<N>,
|
||||
{
|
||||
let Some(server_config) = config.server_config else {
|
||||
tracing::warn!("No inbound server config provided, not listening for inbound connections.");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
tracing::info!("Starting inbound connection server");
|
||||
|
||||
let listener = N::incoming_connection_listener(server_config, config.p2p_port)
|
||||
.await
|
||||
.inspect_err(|e| tracing::warn!("Failed to start inbound server: {e}"))?;
|
||||
|
||||
let mut listener = pin!(listener);
|
||||
|
||||
let semaphore = Arc::new(Semaphore::new(config.max_inbound_connections));
|
||||
|
||||
while let Some(connection) = listener.next().await {
|
||||
let Ok((addr, peer_stream, peer_sink)) = connection else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Some(addr) = &addr {
|
||||
let AddressBookResponse::IsPeerBanned(banned) = address_book
|
||||
.ready()
|
||||
.await?
|
||||
.call(AddressBookRequest::IsPeerBanned(*addr))
|
||||
.await?
|
||||
else {
|
||||
panic!("Address book returned incorrect response!");
|
||||
};
|
||||
|
||||
if banned {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let addr = match addr {
|
||||
Some(addr) => InternalPeerID::KnownAddr(addr),
|
||||
None => InternalPeerID::Unknown(rand::random()),
|
||||
};
|
||||
|
||||
if let Ok(permit) = semaphore.clone().try_acquire_owned() {
|
||||
tracing::debug!("Permit free for incoming connection, attempting handshake.");
|
||||
|
||||
let fut = handshaker.ready().await?.call(DoHandshakeRequest {
|
||||
addr,
|
||||
peer_stream,
|
||||
peer_sink,
|
||||
direction: ConnectionDirection::InBound,
|
||||
permit,
|
||||
});
|
||||
|
||||
let cloned_pool = client_pool.clone();
|
||||
|
||||
tokio::spawn(
|
||||
async move {
|
||||
if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, fut).await {
|
||||
cloned_pool.add_new_client(peer);
|
||||
}
|
||||
}
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
} else {
|
||||
tracing::debug!("No permit free for incoming connection.");
|
||||
// TODO: listen for if the peer is just trying to ping us to see if we are reachable.
|
||||
}
|
||||
|
||||
sleep(INBOUND_CONNECTION_COOL_DOWN).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,17 +1,213 @@
|
|||
//! Cuprate's P2P Crate.
|
||||
//!
|
||||
//! This crate contains a [`ClientPool`](client_pool::ClientPool) which holds connected peers on a single [`NetworkZone`](monero_p2p::NetworkZone).
|
||||
//!
|
||||
//! This crate also contains the different routing methods that control how messages should be sent, i.e. broadcast to all,
|
||||
//! or send to a single peer.
|
||||
//!
|
||||
#![allow(dead_code)]
|
||||
//! This crate contains a [`NetworkInterface`] which allows interacting with the Monero P2P network on
|
||||
//! a certain [`NetworkZone`]
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_buffer::BufferStream;
|
||||
use futures::FutureExt;
|
||||
use tokio::{
|
||||
sync::{mpsc, watch},
|
||||
task::JoinSet,
|
||||
};
|
||||
use tokio_stream::wrappers::WatchStream;
|
||||
use tower::{buffer::Buffer, util::BoxCloneService, Service, ServiceExt};
|
||||
use tracing::{instrument, Instrument, Span};
|
||||
|
||||
use monero_p2p::{
|
||||
client::Connector,
|
||||
client::InternalPeerID,
|
||||
services::{AddressBookRequest, AddressBookResponse},
|
||||
CoreSyncSvc, NetworkZone, PeerRequestHandler,
|
||||
};
|
||||
|
||||
pub mod block_downloader;
|
||||
mod broadcast;
|
||||
pub mod client_pool;
|
||||
mod client_pool;
|
||||
pub mod config;
|
||||
pub mod connection_maintainer;
|
||||
mod constants;
|
||||
mod inbound_server;
|
||||
mod sync_states;
|
||||
|
||||
use block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse};
|
||||
pub use broadcast::{BroadcastRequest, BroadcastSvc};
|
||||
use client_pool::ClientPoolDropGuard;
|
||||
pub use config::P2PConfig;
|
||||
use connection_maintainer::MakeConnectionRequest;
|
||||
use monero_p2p::services::PeerSyncRequest;
|
||||
|
||||
/// Initializes the P2P [`NetworkInterface`] for a specific [`NetworkZone`].
|
||||
///
|
||||
/// This function starts all the tasks to maintain/accept/make connections.
|
||||
///
|
||||
/// # Usage
|
||||
/// You must provide:
|
||||
/// - A peer request handler, which is given to each connection
|
||||
/// - A core sync service, which keeps track of the sync state of our node
|
||||
#[instrument(level = "debug", name = "net", skip_all, fields(zone = N::NAME))]
|
||||
pub async fn initialize_network<N, R, CS>(
|
||||
peer_req_handler: R,
|
||||
core_sync_svc: CS,
|
||||
config: P2PConfig<N>,
|
||||
) -> Result<NetworkInterface<N>, tower::BoxError>
|
||||
where
|
||||
N: NetworkZone,
|
||||
R: PeerRequestHandler + Clone,
|
||||
CS: CoreSyncSvc + Clone,
|
||||
{
|
||||
let address_book =
|
||||
monero_address_book::init_address_book(config.address_book_config.clone()).await?;
|
||||
let address_book = Buffer::new(
|
||||
address_book,
|
||||
config.max_inbound_connections + config.outbound_connections,
|
||||
);
|
||||
|
||||
let (sync_states_svc, top_block_watch) = sync_states::PeerSyncSvc::new();
|
||||
let sync_states_svc = Buffer::new(
|
||||
sync_states_svc,
|
||||
config.max_inbound_connections + config.outbound_connections,
|
||||
);
|
||||
|
||||
// Use the default config. Changing the defaults affects tx fluff times, which could affect D++ so for now don't allow changing
|
||||
// this.
|
||||
let (broadcast_svc, outbound_mkr, inbound_mkr) =
|
||||
broadcast::init_broadcast_channels(broadcast::BroadcastConfig::default());
|
||||
|
||||
let mut basic_node_data = config.basic_node_data();
|
||||
|
||||
if !N::CHECK_NODE_ID {
|
||||
basic_node_data.peer_id = 1;
|
||||
}
|
||||
|
||||
let outbound_handshaker = monero_p2p::client::HandShaker::new(
|
||||
address_book.clone(),
|
||||
sync_states_svc.clone(),
|
||||
core_sync_svc.clone(),
|
||||
peer_req_handler.clone(),
|
||||
outbound_mkr,
|
||||
basic_node_data.clone(),
|
||||
);
|
||||
|
||||
let inbound_handshaker = monero_p2p::client::HandShaker::new(
|
||||
address_book.clone(),
|
||||
sync_states_svc.clone(),
|
||||
core_sync_svc.clone(),
|
||||
peer_req_handler,
|
||||
inbound_mkr,
|
||||
basic_node_data,
|
||||
);
|
||||
|
||||
let client_pool = client_pool::ClientPool::new();
|
||||
|
||||
let (make_connection_tx, make_connection_rx) = mpsc::channel(3);
|
||||
|
||||
let outbound_connector = Connector::new(outbound_handshaker);
|
||||
let outbound_connection_maintainer = connection_maintainer::OutboundConnectionKeeper::new(
|
||||
config.clone(),
|
||||
client_pool.clone(),
|
||||
make_connection_rx,
|
||||
address_book.clone(),
|
||||
outbound_connector,
|
||||
);
|
||||
|
||||
let mut background_tasks = JoinSet::new();
|
||||
|
||||
background_tasks.spawn(
|
||||
outbound_connection_maintainer
|
||||
.run()
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
background_tasks.spawn(
|
||||
inbound_server::inbound_server(
|
||||
client_pool.clone(),
|
||||
inbound_handshaker,
|
||||
address_book.clone(),
|
||||
config,
|
||||
)
|
||||
.map(|res| {
|
||||
if let Err(e) = res {
|
||||
tracing::error!("Error in inbound connection listener: {e}")
|
||||
}
|
||||
|
||||
tracing::info!("Inbound connection listener shutdown")
|
||||
})
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
|
||||
Ok(NetworkInterface {
|
||||
pool: client_pool,
|
||||
broadcast_svc,
|
||||
top_block_watch,
|
||||
make_connection_tx,
|
||||
sync_states_svc,
|
||||
address_book: address_book.boxed_clone(),
|
||||
_background_tasks: Arc::new(background_tasks),
|
||||
})
|
||||
}
|
||||
|
||||
/// The interface to Monero's P2P network on a certain [`NetworkZone`].
|
||||
#[derive(Clone)]
|
||||
pub struct NetworkInterface<N: NetworkZone> {
|
||||
/// A pool of free connected peers.
|
||||
pool: Arc<client_pool::ClientPool<N>>,
|
||||
/// A [`Service`] that allows broadcasting to all connected peers.
|
||||
broadcast_svc: BroadcastSvc<N>,
|
||||
/// A [`watch`] channel that contains the highest seen cumulative difficulty and other info
|
||||
/// on that claimed chain.
|
||||
top_block_watch: watch::Receiver<sync_states::NewSyncInfo>,
|
||||
/// A channel to request extra connections.
|
||||
#[allow(dead_code)] // will be used eventually
|
||||
make_connection_tx: mpsc::Sender<MakeConnectionRequest>,
|
||||
/// The address book service.
|
||||
address_book: BoxCloneService<AddressBookRequest<N>, AddressBookResponse<N>, tower::BoxError>,
|
||||
/// The peers sync states service.
|
||||
sync_states_svc: Buffer<sync_states::PeerSyncSvc<N>, PeerSyncRequest<N>>,
|
||||
/// Background tasks that will be aborted when this interface is dropped.
|
||||
_background_tasks: Arc<JoinSet<()>>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> NetworkInterface<N> {
|
||||
/// Returns a service which allows broadcasting messages to all the connected peers in a specific [`NetworkZone`].
|
||||
pub fn broadcast_svc(&self) -> BroadcastSvc<N> {
|
||||
self.broadcast_svc.clone()
|
||||
}
|
||||
|
||||
/// Starts the block downloader and returns a stream that will yield sequentially downloaded blocks.
|
||||
pub fn block_downloader<C>(
|
||||
&self,
|
||||
our_chain_service: C,
|
||||
config: BlockDownloaderConfig,
|
||||
) -> BufferStream<BlockBatch>
|
||||
where
|
||||
C: Service<ChainSvcRequest, Response = ChainSvcResponse, Error = tower::BoxError>
|
||||
+ Send
|
||||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
{
|
||||
block_downloader::download_blocks(
|
||||
self.pool.clone(),
|
||||
self.sync_states_svc.clone(),
|
||||
our_chain_service,
|
||||
config,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a stream which yields the highest seen sync state from a connected peer.
|
||||
pub fn top_sync_stream(&self) -> WatchStream<sync_states::NewSyncInfo> {
|
||||
WatchStream::from_changes(self.top_block_watch.clone())
|
||||
}
|
||||
|
||||
/// Returns the address book service.
|
||||
pub fn address_book(
|
||||
&self,
|
||||
) -> BoxCloneService<AddressBookRequest<N>, AddressBookResponse<N>, tower::BoxError> {
|
||||
self.address_book.clone()
|
||||
}
|
||||
|
||||
/// Pulls a client from the client pool, returning it in a guard that will return it there when it's
|
||||
/// dropped.
|
||||
pub fn borrow_client(&self, peer: &InternalPeerID<N::Addr>) -> Option<ClientPoolDropGuard<N>> {
|
||||
self.pool.borrow_client(peer)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,14 +25,14 @@ use monero_wire::CoreSyncData;
|
|||
use crate::{client_pool::disconnect_monitor::PeerDisconnectFut, constants::SHORT_BAN};
|
||||
|
||||
/// The highest claimed sync info from our connected peers.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct NewSyncInfo {
|
||||
/// The peers chain height.
|
||||
chain_height: u64,
|
||||
pub chain_height: u64,
|
||||
/// The peers top block's hash.
|
||||
top_hash: [u8; 32],
|
||||
pub top_hash: [u8; 32],
|
||||
/// The peers cumulative difficulty.
|
||||
cumulative_difficulty: u128,
|
||||
pub cumulative_difficulty: u128,
|
||||
}
|
||||
|
||||
/// A service that keeps track of our peers blockchains.
|
||||
|
|
|
@ -10,7 +10,7 @@ default = ["borsh"]
|
|||
borsh = ["dep:borsh", "monero-pruning/borsh"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-helper = { path = "../../helper" }
|
||||
cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false }
|
||||
monero-wire = { path = "../../net/monero-wire", features = ["tracing"] }
|
||||
monero-pruning = { path = "../../pruning" }
|
||||
|
||||
|
|
|
@ -10,7 +10,8 @@ use tokio::{
|
|||
task::JoinHandle,
|
||||
};
|
||||
use tokio_util::sync::PollSemaphore;
|
||||
use tower::Service;
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::Instrument;
|
||||
|
||||
use cuprate_helper::asynch::InfallibleOneshotReceiver;
|
||||
|
||||
|
@ -24,18 +25,19 @@ mod connector;
|
|||
pub mod handshaker;
|
||||
mod timeout_monitor;
|
||||
|
||||
use crate::handles::ConnectionGuard;
|
||||
pub use connector::{ConnectRequest, Connector};
|
||||
pub use handshaker::{DoHandshakeRequest, HandShaker, HandshakeError};
|
||||
use monero_pruning::PruningSeed;
|
||||
|
||||
/// An internal identifier for a given peer, will be their address if known
|
||||
/// or a random u64 if not.
|
||||
/// or a random u128 if not.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum InternalPeerID<A> {
|
||||
/// A known address.
|
||||
KnownAddr(A),
|
||||
/// An unknown address (probably an inbound anonymity network connection).
|
||||
Unknown(u64),
|
||||
Unknown(u128),
|
||||
}
|
||||
|
||||
impl<A: Display> Display for InternalPeerID<A> {
|
||||
|
@ -158,11 +160,68 @@ impl<Z: NetworkZone> Service<PeerRequest> for Client<Z> {
|
|||
permit: Some(permit),
|
||||
};
|
||||
|
||||
self.connection_tx
|
||||
.try_send(req)
|
||||
.map_err(|_| ())
|
||||
.expect("poll_ready should have been called");
|
||||
if let Err(e) = self.connection_tx.try_send(req) {
|
||||
use mpsc::error::TrySendError;
|
||||
|
||||
match e {
|
||||
TrySendError::Closed(req) | TrySendError::Full(req) => {
|
||||
self.set_err(PeerError::ClientChannelClosed);
|
||||
|
||||
let _ = req
|
||||
.response_channel
|
||||
.send(Err(PeerError::ClientChannelClosed.into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rx.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a mock [`Client`] for testing purposes.
|
||||
///
|
||||
/// `request_handler` will be used to handle requests sent to the [`Client`]
|
||||
pub fn mock_client<Z: NetworkZone, S>(
|
||||
info: PeerInformation<Z::Addr>,
|
||||
connection_guard: ConnectionGuard,
|
||||
mut request_handler: S,
|
||||
) -> Client<Z>
|
||||
where
|
||||
S: crate::PeerRequestHandler,
|
||||
{
|
||||
let (tx, mut rx) = mpsc::channel(1);
|
||||
|
||||
let task_span = tracing::error_span!("mock_connection", addr = %info.id);
|
||||
|
||||
let task_handle = tokio::spawn(
|
||||
async move {
|
||||
let _guard = connection_guard;
|
||||
loop {
|
||||
let Some(req): Option<connection::ConnectionTaskRequest> = rx.recv().await else {
|
||||
tracing::debug!("Channel closed, closing mock connection");
|
||||
return;
|
||||
};
|
||||
|
||||
tracing::debug!("Received new request: {:?}", req.request.id());
|
||||
let res = request_handler
|
||||
.ready()
|
||||
.await
|
||||
.unwrap()
|
||||
.call(req.request)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tracing::debug!("Sending back response");
|
||||
|
||||
let _ = req.response_channel.send(Ok(res));
|
||||
}
|
||||
}
|
||||
.instrument(task_span),
|
||||
);
|
||||
|
||||
let timeout_task = tokio::spawn(futures::future::pending());
|
||||
let semaphore = Arc::new(Semaphore::new(1));
|
||||
let error_slot = SharedError::new();
|
||||
|
||||
Client::new(info, tx, task_handle, timeout_task, semaphore, error_slot)
|
||||
}
|
||||
|
|
|
@ -130,11 +130,11 @@ pub trait NetworkZone: Clone + Copy + Send + 'static {
|
|||
/// The sink (outgoing data) type for this network.
|
||||
type Sink: Sink<LevinMessage<Message>, Error = BucketError> + Unpin + Send + 'static;
|
||||
/// The inbound connection listener for this network.
|
||||
type Listener: Stream<
|
||||
Item = Result<(Option<Self::Addr>, Self::Stream, Self::Sink), std::io::Error>,
|
||||
>;
|
||||
type Listener: Stream<Item = Result<(Option<Self::Addr>, Self::Stream, Self::Sink), std::io::Error>>
|
||||
+ Send
|
||||
+ 'static;
|
||||
/// Config used to start a server which listens for incoming connections.
|
||||
type ServerCfg;
|
||||
type ServerCfg: Clone + Debug + Send + 'static;
|
||||
|
||||
async fn connect_to_peer(
|
||||
addr: Self::Addr,
|
||||
|
@ -142,6 +142,7 @@ pub trait NetworkZone: Clone + Copy + Send + 'static {
|
|||
|
||||
async fn incoming_connection_listener(
|
||||
config: Self::ServerCfg,
|
||||
port: u16,
|
||||
) -> Result<Self::Listener, std::io::Error>;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,8 +37,9 @@ impl NetZoneAddress for SocketAddr {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ClearNetServerCfg {
|
||||
pub addr: SocketAddr,
|
||||
pub ip: IpAddr,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
|
@ -80,8 +81,9 @@ impl NetworkZone for ClearNet {
|
|||
|
||||
async fn incoming_connection_listener(
|
||||
config: Self::ServerCfg,
|
||||
port: u16,
|
||||
) -> Result<Self::Listener, std::io::Error> {
|
||||
let listener = TcpListener::bind(config.addr).await?;
|
||||
let listener = TcpListener::bind(SocketAddr::new(config.ip, port)).await?;
|
||||
Ok(InBoundStream { listener })
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,10 +119,14 @@ pub enum AddressBookRequest<Z: NetworkZone> {
|
|||
TakeRandomPeer { height: Option<u64> },
|
||||
/// Gets the specified number of white peers, or less if we don't have enough.
|
||||
GetWhitePeers(usize),
|
||||
/// Checks if the given peer is banned.
|
||||
IsPeerBanned(Z::Addr),
|
||||
}
|
||||
|
||||
pub enum AddressBookResponse<Z: NetworkZone> {
|
||||
Ok,
|
||||
Peer(ZoneSpecificPeerListEntryBase<Z::Addr>),
|
||||
Peers(Vec<ZoneSpecificPeerListEntryBase<Z::Addr>>),
|
||||
/// Contains `true` if the peer is banned.
|
||||
IsPeerBanned(bool),
|
||||
}
|
||||
|
|
|
@ -71,8 +71,9 @@ impl NetworkZone for FragNet {
|
|||
|
||||
async fn incoming_connection_listener(
|
||||
config: Self::ServerCfg,
|
||||
port: u16,
|
||||
) -> Result<Self::Listener, std::io::Error> {
|
||||
let listener = TcpListener::bind(config.addr).await?;
|
||||
let listener = TcpListener::bind(SocketAddr::new(config.ip, port)).await?;
|
||||
Ok(InBoundStream { listener })
|
||||
}
|
||||
}
|
||||
|
@ -194,9 +195,9 @@ async fn fragmented_handshake_monerod_to_cuprate() {
|
|||
our_basic_node_data,
|
||||
);
|
||||
|
||||
let addr = "127.0.0.1:18081".parse().unwrap();
|
||||
let ip = "127.0.0.1".parse().unwrap();
|
||||
|
||||
let mut listener = FragNet::incoming_connection_listener(ClearNetServerCfg { addr })
|
||||
let mut listener = FragNet::incoming_connection_listener(ClearNetServerCfg { ip }, 18081)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
|
|
@ -174,9 +174,9 @@ async fn handshake_monerod_to_cuprate() {
|
|||
our_basic_node_data,
|
||||
);
|
||||
|
||||
let addr = "127.0.0.1:18081".parse().unwrap();
|
||||
let ip = "127.0.0.1".parse().unwrap();
|
||||
|
||||
let mut listener = ClearNet::incoming_connection_listener(ClearNetServerCfg { addr })
|
||||
let mut listener = ClearNet::incoming_connection_listener(ClearNetServerCfg { ip }, 18081)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
|
|
@ -1 +1,4 @@
|
|||
# TODO
|
||||
# RPC
|
||||
This directory contains Monero RPC types and Cuprate RPC's libraries.
|
||||
|
||||
<!-- TODO: link to architecture book section. -->
|
||||
|
|
15
rpc/cuprate-rpc-interface/Cargo.toml
Normal file
15
rpc/cuprate-rpc-interface/Cargo.toml
Normal file
|
@ -0,0 +1,15 @@
|
|||
[package]
|
||||
name = "cuprate-rpc-interface"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
description = "Cuprate's RPC interface library"
|
||||
license = "MIT"
|
||||
authors = ["hinto-janai"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/cuprate-rpc-interface"
|
||||
keywords = ["cuprate", "rpc", "interface"]
|
||||
|
||||
[features]
|
||||
|
||||
[dependencies]
|
||||
|
||||
[dev-dependencies]
|
1
rpc/cuprate-rpc-interface/src/lib.rs
Normal file
1
rpc/cuprate-rpc-interface/src/lib.rs
Normal file
|
@ -0,0 +1 @@
|
|||
|
20
rpc/json-rpc/Cargo.toml
Normal file
20
rpc/json-rpc/Cargo.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[package]
|
||||
name = "json-rpc"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
description = "JSON-RPC 2.0 implementation"
|
||||
license = "MIT"
|
||||
authors = ["hinto-janai"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/json-rpc"
|
||||
keywords = ["json", "rpc"]
|
||||
categories = ["encoding"]
|
||||
|
||||
[features]
|
||||
|
||||
[dependencies]
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true, features = ["std"] }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = { workspace = true }
|
197
rpc/json-rpc/README.md
Normal file
197
rpc/json-rpc/README.md
Normal file
|
@ -0,0 +1,197 @@
|
|||
# `json-rpc`
|
||||
JSON-RPC 2.0 types and (de)serialization.
|
||||
|
||||
## What
|
||||
This crate implements the [JSON-RPC 2.0 specification](https://www.jsonrpc.org/specification)
|
||||
for usage in [Cuprate](https://github.com/Cuprate/cuprate).
|
||||
|
||||
It contains slight modifications catered towards Cuprate and isn't
|
||||
necessarily a general purpose implementation of the specification
|
||||
(see below).
|
||||
|
||||
This crate expects you to read the brief JSON-RPC 2.0 specification for context.
|
||||
|
||||
## Batching
|
||||
This crate does not have any types for [JSON-RPC 2.0 batching](https://www.jsonrpc.org/specification#batch).
|
||||
|
||||
This is because `monerod` does not support this,
|
||||
as such, neither does Cuprate.
|
||||
|
||||
TODO: citation needed on `monerod` not supporting batching.
|
||||
|
||||
## Request changes
|
||||
[JSON-RPC 2.0's `Request` object](https://www.jsonrpc.org/specification#request_object) usually contains these 2 fields:
|
||||
- `method`
|
||||
- `params`
|
||||
|
||||
This crate replaces those two with a `body` field that is `#[serde(flatten)]`ed,
|
||||
and assumes the type within that `body` field is tagged properly, for example:
|
||||
|
||||
```rust
|
||||
# use pretty_assertions::assert_eq;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use json_rpc::{Id, Request};
|
||||
|
||||
// Parameter type.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
struct GetBlock {
|
||||
height: u64,
|
||||
}
|
||||
|
||||
// Method enum containing all enums.
|
||||
// All methods are tagged as `method`
|
||||
// and their inner parameter types are
|
||||
// tagged with `params` (in snake case).
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(tag = "method", content = "params")] // INVARIANT: these tags are needed
|
||||
#[serde(rename_all = "snake_case")] // for proper (de)serialization.
|
||||
enum Methods {
|
||||
GetBlock(GetBlock),
|
||||
/* other methods */
|
||||
}
|
||||
|
||||
// Create the request object.
|
||||
let request = Request::new_with_id(
|
||||
Id::Str("hello".into()),
|
||||
Methods::GetBlock(GetBlock { height: 123 }),
|
||||
);
|
||||
|
||||
// Serializing properly shows the `method/params` fields
|
||||
// even though `Request` doesn't contain those fields.
|
||||
let json = serde_json::to_string_pretty(&request).unwrap();
|
||||
let expected_json =
|
||||
r#"{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "hello",
|
||||
"method": "get_block",
|
||||
"params": {
|
||||
"height": 123
|
||||
}
|
||||
}"#;
|
||||
assert_eq!(json, expected_json);
|
||||
```
|
||||
|
||||
This is how the method/param types are done in Cuprate.
|
||||
|
||||
For reasoning, see: <https://github.com/Cuprate/cuprate/pull/146#issuecomment-2145734838>.
|
||||
|
||||
## Serialization changes
|
||||
This crate's serialized field order slightly differs compared to `monerod`.
|
||||
|
||||
`monerod`'s JSON objects are serialized in alphabetically order, where as this crate serializes the fields in their defined order (due to [`serde`]).
|
||||
|
||||
With that said, parsing should be not affected at all since a key-value map is used:
|
||||
```rust
|
||||
# use pretty_assertions::assert_eq;
|
||||
use json_rpc::{Id, Response};
|
||||
|
||||
let response = Response::ok(Id::Num(123), "OK");
|
||||
let response_json = serde_json::to_string_pretty(&response).unwrap();
|
||||
|
||||
// This crate's `Response` result type will _always_
|
||||
// serialize fields in the following order:
|
||||
let expected_json =
|
||||
r#"{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 123,
|
||||
"result": "OK"
|
||||
}"#;
|
||||
assert_eq!(response_json, expected_json);
|
||||
|
||||
// Although, `monerod` will serialize like such:
|
||||
let monerod_json =
|
||||
r#"{
|
||||
"id": 123,
|
||||
"jsonrpc": "2.0",
|
||||
"result": "OK"
|
||||
}"#;
|
||||
|
||||
///---
|
||||
|
||||
let response = Response::<()>::invalid_request(Id::Num(123));
|
||||
let response_json = serde_json::to_string_pretty(&response).unwrap();
|
||||
|
||||
// This crate's `Response` error type will _always_
|
||||
// serialize fields in the following order:
|
||||
let expected_json =
|
||||
r#"{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 123,
|
||||
"error": {
|
||||
"code": -32600,
|
||||
"message": "Invalid Request"
|
||||
}
|
||||
}"#;
|
||||
assert_eq!(response_json, expected_json);
|
||||
|
||||
// Although, `monerod` will serialize like such:
|
||||
let monerod_json =
|
||||
r#"{
|
||||
"error": {
|
||||
"code": -32600,
|
||||
"message": "Invalid Request"
|
||||
},
|
||||
"id": 123
|
||||
"jsonrpc": "2.0",
|
||||
}"#;
|
||||
```
|
||||
|
||||
## Compared to other implementations
|
||||
A quick table showing some small differences between this crate and other JSON-RPC 2.0 implementations.
|
||||
|
||||
| Implementation | Allows any case for key fields excluding `method/params` | Allows unknown fields in main `{}`, and response/request objects | Allows overwriting previous values upon duplicate fields (except [`Response`]'s `result/error` field) |
|
||||
|---|---|---|---|
|
||||
| [`monerod`](https://github.com/monero-project/monero) | ✅ | ✅ | ✅
|
||||
| [`jsonrpsee`](https://docs.rs/jsonrpsee) | ❌ | ✅ | ❌
|
||||
| This crate | ❌ | ✅ | ✅
|
||||
|
||||
Allows any case for key fields excluding `method/params`:
|
||||
```rust
|
||||
# use json_rpc::Response;
|
||||
# use serde_json::from_str;
|
||||
# use pretty_assertions::assert_eq;
|
||||
let json = r#"{"jsonrpc":"2.0","id":123,"result":"OK"}"#;
|
||||
from_str::<Response<String>>(&json).unwrap();
|
||||
|
||||
// Only `lowercase` is allowed.
|
||||
let json = r#"{"jsonRPC":"2.0","id":123,"result":"OK"}"#;
|
||||
let err = from_str::<Response<String>>(&json).unwrap_err();
|
||||
assert_eq!(format!("{err}"), "missing field `jsonrpc` at line 1 column 40");
|
||||
```
|
||||
|
||||
Allows unknown fields in main `{}`, and response/request objects:
|
||||
```rust
|
||||
# use json_rpc::Response;
|
||||
# use serde_json::from_str;
|
||||
// unknown fields are allowed in main `{}`
|
||||
// v
|
||||
let json = r#"{"unknown_field":"asdf","jsonrpc":"2.0","id":123,"result":"OK"}"#;
|
||||
from_str::<Response<String>>(&json).unwrap();
|
||||
|
||||
// and within objects
|
||||
// v
|
||||
let json = r#"{"jsonrpc":"2.0","id":123,"error":{"code":-1,"message":"","unknown_field":"asdf"}}"#;
|
||||
from_str::<Response<String>>(&json).unwrap();
|
||||
```
|
||||
|
||||
Allows overwriting previous values upon duplicate fields (except [`Response`]'s `result/error` field)
|
||||
```rust
|
||||
# use json_rpc::{Id, Response};
|
||||
# use serde_json::from_str;
|
||||
# use pretty_assertions::assert_eq;
|
||||
// duplicate fields will get overwritten by the latest one
|
||||
// v v
|
||||
let json = r#"{"jsonrpc":"2.0","id":123,"id":321,"result":"OK"}"#;
|
||||
let response = from_str::<Response<String>>(&json).unwrap();
|
||||
assert_eq!(response.id, Id::Num(321));
|
||||
|
||||
// But 2 results are not allowed.
|
||||
let json = r#"{"jsonrpc":"2.0","id":123,"result":"OK","result":"OK"}"#;
|
||||
let err = from_str::<Response<String>>(&json).unwrap_err();
|
||||
assert_eq!(format!("{err}"), "duplicate field `result/error` at line 1 column 48");
|
||||
|
||||
// Same with errors.
|
||||
let json = r#"{"jsonrpc":"2.0","id":123,"error":{"code":-1,"message":""},"error":{"code":-1,"message":""}}"#;
|
||||
let err = from_str::<Response<String>>(&json).unwrap_err();
|
||||
assert_eq!(format!("{err}"), "duplicate field `result/error` at line 1 column 66");
|
||||
```
|
219
rpc/json-rpc/src/error/code.rs
Normal file
219
rpc/json-rpc/src/error/code.rs
Normal file
|
@ -0,0 +1,219 @@
|
|||
//! Error codes.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::error::constants::{
|
||||
INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR, SERVER_ERROR,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- ErrorCode
|
||||
/// [Error object code](https://www.jsonrpc.org/specification#error_object).
|
||||
///
|
||||
/// This `enum` encapsulates JSON-RPC 2.0's error codes
|
||||
/// found in [`ErrorObject`](crate::error::ErrorObject).
|
||||
///
|
||||
/// It associates the code integer ([`i32`]) with its defined message.
|
||||
///
|
||||
/// # Application defined errors
|
||||
/// The custom error codes past `-32099` (`-31000, -31001`, ...)
|
||||
/// defined in JSON-RPC 2.0 are not supported by this enum because:
|
||||
///
|
||||
/// 1. The `(i32, &'static str)` required makes the enum more than 3x larger
|
||||
/// 2. It is not used by Cuprate/Monero[^1]
|
||||
///
|
||||
/// [^1]: Defined errors used by Monero (also excludes the last defined error `-32000 to -32099 Server error`): <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/contrib/epee/include/net/http_server_handlers_map2.h#L150>
|
||||
///
|
||||
/// # Display
|
||||
/// ```rust
|
||||
/// use json_rpc::error::ErrorCode;
|
||||
/// use serde_json::{to_value, from_value, Value};
|
||||
///
|
||||
/// for e in [
|
||||
/// ErrorCode::ParseError,
|
||||
/// ErrorCode::InvalidRequest,
|
||||
/// ErrorCode::MethodNotFound,
|
||||
/// ErrorCode::InvalidParams,
|
||||
/// ErrorCode::InternalError,
|
||||
/// ErrorCode::ServerError(0),
|
||||
/// ] {
|
||||
/// // The formatting is `$CODE: $MSG`.
|
||||
/// let expected_fmt = format!("{}: {}", e.code(), e.msg());
|
||||
/// assert_eq!(expected_fmt, format!("{e}"));
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # (De)serialization
|
||||
/// This type gets (de)serialized as the associated `i32`, for example:
|
||||
/// ```rust
|
||||
/// use json_rpc::error::ErrorCode;
|
||||
/// use serde_json::{to_value, from_value, Value};
|
||||
///
|
||||
/// for e in [
|
||||
/// ErrorCode::ParseError,
|
||||
/// ErrorCode::InvalidRequest,
|
||||
/// ErrorCode::MethodNotFound,
|
||||
/// ErrorCode::InvalidParams,
|
||||
/// ErrorCode::InternalError,
|
||||
/// ErrorCode::ServerError(0),
|
||||
/// ErrorCode::ServerError(1),
|
||||
/// ErrorCode::ServerError(2),
|
||||
/// ] {
|
||||
/// // Gets serialized into a JSON integer.
|
||||
/// let value = to_value(&e).unwrap();
|
||||
/// assert_eq!(value, Value::Number(e.code().into()));
|
||||
///
|
||||
/// // Expects a JSON integer when deserializing.
|
||||
/// assert_eq!(e, from_value(value).unwrap());
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ```rust,should_panic
|
||||
/// # use json_rpc::error::ErrorCode;
|
||||
/// # use serde_json::from_value;
|
||||
/// // A JSON string that contains an integer won't work.
|
||||
/// from_value::<ErrorCode>("-32700".into()).unwrap();
|
||||
/// ```
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, thiserror::Error)]
|
||||
pub enum ErrorCode {
|
||||
#[error("{}: {}", PARSE_ERROR.0, PARSE_ERROR.1)]
|
||||
/// Invalid JSON was received by the server.
|
||||
///
|
||||
/// An error occurred on the server while parsing the JSON text.
|
||||
ParseError,
|
||||
|
||||
#[error("{}: {}", INVALID_REQUEST.0, INVALID_REQUEST.1)]
|
||||
/// The JSON sent is not a valid Request object.
|
||||
InvalidRequest,
|
||||
|
||||
#[error("{}: {}", METHOD_NOT_FOUND.0, METHOD_NOT_FOUND.1)]
|
||||
/// The method does not exist / is not available.
|
||||
MethodNotFound,
|
||||
|
||||
#[error("{}: {}", INVALID_PARAMS.0, INVALID_PARAMS.1)]
|
||||
/// Invalid method parameters.
|
||||
InvalidParams,
|
||||
|
||||
#[error("{}: {}", INTERNAL_ERROR.0, INTERNAL_ERROR.1)]
|
||||
/// Internal JSON-RPC error.
|
||||
InternalError,
|
||||
|
||||
#[error("{0}: {SERVER_ERROR}")]
|
||||
/// Reserved for implementation-defined server-errors.
|
||||
ServerError(i32),
|
||||
}
|
||||
|
||||
impl ErrorCode {
|
||||
/// Creates [`Self`] from a [`i32`] code.
|
||||
///
|
||||
/// [`From<i32>`] is the same as this function.
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::error::{
|
||||
/// ErrorCode,
|
||||
/// INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR,
|
||||
/// };
|
||||
///
|
||||
/// assert_eq!(ErrorCode::from_code(PARSE_ERROR.0), ErrorCode::ParseError);
|
||||
/// assert_eq!(ErrorCode::from_code(INVALID_REQUEST.0), ErrorCode::InvalidRequest);
|
||||
/// assert_eq!(ErrorCode::from_code(METHOD_NOT_FOUND.0), ErrorCode::MethodNotFound);
|
||||
/// assert_eq!(ErrorCode::from_code(INVALID_PARAMS.0), ErrorCode::InvalidParams);
|
||||
/// assert_eq!(ErrorCode::from_code(INTERNAL_ERROR.0), ErrorCode::InternalError);
|
||||
///
|
||||
/// // Non-defined code inputs will default to a custom `ServerError`.
|
||||
/// assert_eq!(ErrorCode::from_code(0), ErrorCode::ServerError(0));
|
||||
/// assert_eq!(ErrorCode::from_code(1), ErrorCode::ServerError(1));
|
||||
/// assert_eq!(ErrorCode::from_code(2), ErrorCode::ServerError(2));
|
||||
/// ```
|
||||
pub const fn from_code(code: i32) -> Self {
|
||||
// FIXME: you cannot `match` on tuple fields
|
||||
// so use `if` (seems to compile to the same
|
||||
// assembly as matching directly on `i32`s).
|
||||
if code == PARSE_ERROR.0 {
|
||||
Self::ParseError
|
||||
} else if code == INVALID_REQUEST.0 {
|
||||
Self::InvalidRequest
|
||||
} else if code == METHOD_NOT_FOUND.0 {
|
||||
Self::MethodNotFound
|
||||
} else if code == INVALID_PARAMS.0 {
|
||||
Self::InvalidParams
|
||||
} else if code == INTERNAL_ERROR.0 {
|
||||
Self::InternalError
|
||||
} else {
|
||||
Self::ServerError(code)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `self`'s [`i32`] code representation.
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::error::{
|
||||
/// ErrorCode,
|
||||
/// INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR,
|
||||
/// };
|
||||
///
|
||||
/// assert_eq!(ErrorCode::ParseError.code(), PARSE_ERROR.0);
|
||||
/// assert_eq!(ErrorCode::InvalidRequest.code(), INVALID_REQUEST.0);
|
||||
/// assert_eq!(ErrorCode::MethodNotFound.code(), METHOD_NOT_FOUND.0);
|
||||
/// assert_eq!(ErrorCode::InvalidParams.code(), INVALID_PARAMS.0);
|
||||
/// assert_eq!(ErrorCode::InternalError.code(), INTERNAL_ERROR.0);
|
||||
/// assert_eq!(ErrorCode::ServerError(0).code(), 0);
|
||||
/// assert_eq!(ErrorCode::ServerError(1).code(), 1);
|
||||
/// ```
|
||||
pub const fn code(&self) -> i32 {
|
||||
match self {
|
||||
Self::ParseError => PARSE_ERROR.0,
|
||||
Self::InvalidRequest => INVALID_REQUEST.0,
|
||||
Self::MethodNotFound => METHOD_NOT_FOUND.0,
|
||||
Self::InvalidParams => INVALID_PARAMS.0,
|
||||
Self::InternalError => INTERNAL_ERROR.0,
|
||||
Self::ServerError(code) => *code,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `self`'s human readable [`str`] message.
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::error::{
|
||||
/// ErrorCode,
|
||||
/// INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR, SERVER_ERROR,
|
||||
/// };
|
||||
///
|
||||
/// assert_eq!(ErrorCode::ParseError.msg(), PARSE_ERROR.1);
|
||||
/// assert_eq!(ErrorCode::InvalidRequest.msg(), INVALID_REQUEST.1);
|
||||
/// assert_eq!(ErrorCode::MethodNotFound.msg(), METHOD_NOT_FOUND.1);
|
||||
/// assert_eq!(ErrorCode::InvalidParams.msg(), INVALID_PARAMS.1);
|
||||
/// assert_eq!(ErrorCode::InternalError.msg(), INTERNAL_ERROR.1);
|
||||
/// assert_eq!(ErrorCode::ServerError(0).msg(), SERVER_ERROR);
|
||||
/// ```
|
||||
pub const fn msg(&self) -> &'static str {
|
||||
match self {
|
||||
Self::ParseError => PARSE_ERROR.1,
|
||||
Self::InvalidRequest => INVALID_REQUEST.1,
|
||||
Self::MethodNotFound => METHOD_NOT_FOUND.1,
|
||||
Self::InvalidParams => INVALID_PARAMS.1,
|
||||
Self::InternalError => INTERNAL_ERROR.1,
|
||||
Self::ServerError(_) => SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Trait impl
|
||||
impl<N: Into<i32>> From<N> for ErrorCode {
|
||||
fn from(code: N) -> Self {
|
||||
Self::from_code(code.into())
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Serde impl
|
||||
impl<'a> Deserialize<'a> for ErrorCode {
|
||||
fn deserialize<D: Deserializer<'a>>(deserializer: D) -> Result<Self, D::Error> {
|
||||
Ok(Self::from_code(Deserialize::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ErrorCode {
|
||||
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
serializer.serialize_i32(self.code())
|
||||
}
|
||||
}
|
22
rpc/json-rpc/src/error/constants.rs
Normal file
22
rpc/json-rpc/src/error/constants.rs
Normal file
|
@ -0,0 +1,22 @@
|
|||
//! [`JSON-RPC 2.0`](https://www.jsonrpc.org/specification#error_object) defined errors as constants.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- JSON-RPC spec errors.
|
||||
/// Code and message for [`ErrorCode::ParseError`](crate::error::ErrorCode::ParseError).
|
||||
pub const PARSE_ERROR: (i32, &str) = (-32700, "Parse error");
|
||||
|
||||
/// Code and message for [`ErrorCode::InvalidRequest`](crate::error::ErrorCode::InvalidRequest).
|
||||
pub const INVALID_REQUEST: (i32, &str) = (-32600, "Invalid Request");
|
||||
|
||||
/// Code and message for [`ErrorCode::MethodNotFound`](crate::error::ErrorCode::MethodNotFound).
|
||||
pub const METHOD_NOT_FOUND: (i32, &str) = (-32601, "Method not found");
|
||||
|
||||
/// Code and message for [`ErrorCode::InvalidParams`](crate::error::ErrorCode::InvalidParams).
|
||||
pub const INVALID_PARAMS: (i32, &str) = (-32602, "Invalid params");
|
||||
|
||||
/// Code and message for [`ErrorCode::InternalError`](crate::error::ErrorCode::InternalError).
|
||||
pub const INTERNAL_ERROR: (i32, &str) = (-32603, "Internal error");
|
||||
|
||||
/// Message for [`ErrorCode::ServerError`](crate::error::ErrorCode::ServerError).
|
||||
///
|
||||
/// The [`i32`] error code is the caller's choice, this is only the message.
|
||||
pub const SERVER_ERROR: &str = "Server error";
|
14
rpc/json-rpc/src/error/mod.rs
Normal file
14
rpc/json-rpc/src/error/mod.rs
Normal file
|
@ -0,0 +1,14 @@
|
|||
//! [Error codes and objects](https://www.jsonrpc.org/specification#error_object).
|
||||
//!
|
||||
//! This module contains JSON-RPC 2.0's error object and codes,
|
||||
//! as well as some associated constants.
|
||||
|
||||
mod code;
|
||||
mod constants;
|
||||
mod object;
|
||||
|
||||
pub use code::ErrorCode;
|
||||
pub use constants::{
|
||||
INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR, SERVER_ERROR,
|
||||
};
|
||||
pub use object::ErrorObject;
|
258
rpc/json-rpc/src/error/object.rs
Normal file
258
rpc/json-rpc/src/error/object.rs
Normal file
|
@ -0,0 +1,258 @@
|
|||
//! Error object.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::{borrow::Cow, error::Error, fmt::Display};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::value::Value;
|
||||
|
||||
use crate::error::{
|
||||
constants::{
|
||||
INTERNAL_ERROR, INVALID_PARAMS, INVALID_REQUEST, METHOD_NOT_FOUND, PARSE_ERROR,
|
||||
SERVER_ERROR,
|
||||
},
|
||||
ErrorCode,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- ErrorObject
|
||||
/// [The error object](https://www.jsonrpc.org/specification).
|
||||
///
|
||||
/// This is the object sent back in a [`Response`](crate::Response)
|
||||
/// if the method call errored.
|
||||
///
|
||||
/// # Display
|
||||
/// ```rust
|
||||
/// use json_rpc::error::ErrorObject;
|
||||
///
|
||||
/// // The format is `$CODE: $MESSAGE`.
|
||||
/// // If a message was not passed during construction,
|
||||
/// // the error code's message will be used.
|
||||
/// assert_eq!(format!("{}", ErrorObject::parse_error()), "-32700: Parse error");
|
||||
/// assert_eq!(format!("{}", ErrorObject::invalid_request()), "-32600: Invalid Request");
|
||||
/// assert_eq!(format!("{}", ErrorObject::method_not_found()), "-32601: Method not found");
|
||||
/// assert_eq!(format!("{}", ErrorObject::invalid_params()), "-32602: Invalid params");
|
||||
/// assert_eq!(format!("{}", ErrorObject::internal_error()), "-32603: Internal error");
|
||||
/// assert_eq!(format!("{}", ErrorObject::server_error(0)), "0: Server error");
|
||||
///
|
||||
/// // Set a custom message.
|
||||
/// let mut e = ErrorObject::server_error(1);
|
||||
/// e.message = "hello".into();
|
||||
/// assert_eq!(format!("{e}"), "1: hello");
|
||||
/// ```
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ErrorObject {
|
||||
/// The error code.
|
||||
pub code: ErrorCode,
|
||||
|
||||
/// A custom message for this error, distinct from [`ErrorCode::msg`].
|
||||
///
|
||||
/// A JSON `string` value.
|
||||
///
|
||||
/// This is a `Cow<'static, str>` to support both 0-allocation for
|
||||
/// `const` string ID's commonly found in programs, as well as support
|
||||
/// for runtime [`String`]'s.
|
||||
pub message: Cow<'static, str>,
|
||||
|
||||
/// Optional data associated with the error.
|
||||
///
|
||||
/// # `None` vs `Some(Value::Null)`
|
||||
/// This field will be completely omitted during serialization if [`None`],
|
||||
/// however if it is `Some(Value::Null)`, it will be serialized as `"data": null`.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub data: Option<Value>,
|
||||
}
|
||||
|
||||
impl ErrorObject {
|
||||
/// Creates a new error, deriving the message from the code.
|
||||
///
|
||||
/// Same as `ErrorObject::from(ErrorCode)`.
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::borrow::Cow;
|
||||
/// use json_rpc::error::{ErrorCode, ErrorObject};
|
||||
///
|
||||
/// for code in [
|
||||
/// ErrorCode::ParseError,
|
||||
/// ErrorCode::InvalidRequest,
|
||||
/// ErrorCode::MethodNotFound,
|
||||
/// ErrorCode::InvalidParams,
|
||||
/// ErrorCode::InternalError,
|
||||
/// ErrorCode::ServerError(0),
|
||||
/// ] {
|
||||
/// let object = ErrorObject::from_code(code);
|
||||
/// assert_eq!(object, ErrorObject {
|
||||
/// code,
|
||||
/// message: Cow::Borrowed(code.msg()),
|
||||
/// data: None,
|
||||
/// });
|
||||
///
|
||||
/// }
|
||||
/// ```
|
||||
pub const fn from_code(code: ErrorCode) -> Self {
|
||||
Self {
|
||||
code,
|
||||
message: Cow::Borrowed(code.msg()),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new error using [`PARSE_ERROR`].
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::borrow::Cow;
|
||||
/// use json_rpc::error::{ErrorCode, ErrorObject};
|
||||
///
|
||||
/// let code = ErrorCode::ParseError;
|
||||
/// let object = ErrorObject::parse_error();
|
||||
/// assert_eq!(object, ErrorObject {
|
||||
/// code,
|
||||
/// message: Cow::Borrowed(code.msg()),
|
||||
/// data: None,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn parse_error() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::ParseError,
|
||||
message: Cow::Borrowed(PARSE_ERROR.1),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new error using [`INVALID_REQUEST`].
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::borrow::Cow;
|
||||
/// use json_rpc::error::{ErrorCode, ErrorObject};
|
||||
///
|
||||
/// let code = ErrorCode::InvalidRequest;
|
||||
/// let object = ErrorObject::invalid_request();
|
||||
/// assert_eq!(object, ErrorObject {
|
||||
/// code,
|
||||
/// message: Cow::Borrowed(code.msg()),
|
||||
/// data: None,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn invalid_request() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::InvalidRequest,
|
||||
message: Cow::Borrowed(INVALID_REQUEST.1),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new error using [`METHOD_NOT_FOUND`].
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::borrow::Cow;
|
||||
/// use json_rpc::error::{ErrorCode, ErrorObject};
|
||||
///
|
||||
/// let code = ErrorCode::MethodNotFound;
|
||||
/// let object = ErrorObject::method_not_found();
|
||||
/// assert_eq!(object, ErrorObject {
|
||||
/// code,
|
||||
/// message: Cow::Borrowed(code.msg()),
|
||||
/// data: None,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn method_not_found() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::MethodNotFound,
|
||||
message: Cow::Borrowed(METHOD_NOT_FOUND.1),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new error using [`INVALID_PARAMS`].
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::borrow::Cow;
|
||||
/// use json_rpc::error::{ErrorCode, ErrorObject};
|
||||
///
|
||||
/// let code = ErrorCode::InvalidParams;
|
||||
/// let object = ErrorObject::invalid_params();
|
||||
/// assert_eq!(object, ErrorObject {
|
||||
/// code,
|
||||
/// message: Cow::Borrowed(code.msg()),
|
||||
/// data: None,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn invalid_params() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::InvalidParams,
|
||||
message: Cow::Borrowed(INVALID_PARAMS.1),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new error using [`INTERNAL_ERROR`].
|
||||
///
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::borrow::Cow;
|
||||
/// use json_rpc::error::{ErrorCode, ErrorObject};
|
||||
///
|
||||
/// let code = ErrorCode::InternalError;
|
||||
/// let object = ErrorObject::internal_error();
|
||||
/// assert_eq!(object, ErrorObject {
|
||||
/// code,
|
||||
/// message: Cow::Borrowed(code.msg()),
|
||||
/// data: None,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn internal_error() -> Self {
|
||||
Self {
|
||||
code: ErrorCode::InternalError,
|
||||
message: Cow::Borrowed(INTERNAL_ERROR.1),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new error using [`SERVER_ERROR`].
|
||||
///
|
||||
/// You must provide the custom [`i32`] error code.
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::borrow::Cow;
|
||||
/// use json_rpc::error::{ErrorCode, ErrorObject};
|
||||
///
|
||||
/// let code = ErrorCode::ServerError(0);
|
||||
/// let object = ErrorObject::server_error(0);
|
||||
/// assert_eq!(object, ErrorObject {
|
||||
/// code,
|
||||
/// message: Cow::Borrowed(code.msg()),
|
||||
/// data: None,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn server_error(error_code: i32) -> Self {
|
||||
Self {
|
||||
code: ErrorCode::ServerError(error_code),
|
||||
message: Cow::Borrowed(SERVER_ERROR),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Trait impl
|
||||
impl From<ErrorCode> for ErrorObject {
|
||||
fn from(code: ErrorCode) -> Self {
|
||||
Self::from_code(code)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ErrorObject {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Using `self.code`'s formatting will write the
|
||||
// message twice, so prefer the built-in message.
|
||||
write!(f, "{}: {}", self.code.code(), self.message)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ErrorObject {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
Some(&self.code)
|
||||
}
|
||||
|
||||
fn description(&self) -> &str {
|
||||
&self.message
|
||||
}
|
||||
}
|
242
rpc/json-rpc/src/id.rs
Normal file
242
rpc/json-rpc/src/id.rs
Normal file
|
@ -0,0 +1,242 @@
|
|||
//! [`Id`]: request/response identification.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::borrow::Cow;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Id
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
/// [Request](crate::Request)/[Response](crate::Response) identification.
|
||||
///
|
||||
/// This is the [JSON-RPC 2.0 `id` field](https://www.jsonrpc.org/specification)
|
||||
/// type found in `Request/Response`s.
|
||||
///
|
||||
/// # From
|
||||
/// This type implements [`From`] on:
|
||||
/// - [`String`]
|
||||
/// - [`str`]
|
||||
/// - [`u8`], [`u16`], [`u32`], [`u64`]
|
||||
///
|
||||
/// and all of those wrapped in [`Option`].
|
||||
///
|
||||
/// If the `Option` is [`None`], [`Id::Null`] is returned.
|
||||
///
|
||||
/// Note that the `&str` implementations will allocate, use [`Id::from_static_str`]
|
||||
/// (or just manually create the `Cow`) for a non-allocating `Id`.
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::Id;
|
||||
///
|
||||
/// assert_eq!(Id::from(String::new()), Id::Str("".into()));
|
||||
/// assert_eq!(Id::from(Some(String::new())), Id::Str("".into()));
|
||||
/// assert_eq!(Id::from(None::<String>), Id::Null);
|
||||
/// assert_eq!(Id::from(123_u64), Id::Num(123_u64));
|
||||
/// assert_eq!(Id::from(Some(123_u64)), Id::Num(123_u64));
|
||||
/// assert_eq!(Id::from(None::<u64>), Id::Null);
|
||||
/// ```
|
||||
pub enum Id {
|
||||
/// A JSON `null` value.
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::Id;
|
||||
/// use serde_json::{from_value,to_value,json,Value};
|
||||
///
|
||||
/// assert_eq!(from_value::<Id>(json!(null)).unwrap(), Id::Null);
|
||||
/// assert_eq!(to_value(Id::Null).unwrap(), Value::Null);
|
||||
///
|
||||
/// // Not a real `null`, but a string.
|
||||
/// assert_eq!(from_value::<Id>(json!("null")).unwrap(), Id::Str("null".into()));
|
||||
/// ```
|
||||
Null,
|
||||
|
||||
/// A JSON `number` value.
|
||||
Num(u64),
|
||||
|
||||
/// A JSON `string` value.
|
||||
///
|
||||
/// This is a `Cow<'static, str>` to support both 0-allocation for
|
||||
/// `const` string ID's commonly found in programs, as well as support
|
||||
/// for runtime [`String`]'s.
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::borrow::Cow;
|
||||
/// use json_rpc::Id;
|
||||
///
|
||||
/// /// A program's static ID.
|
||||
/// const ID: &'static str = "my_id";
|
||||
///
|
||||
/// // No allocation.
|
||||
/// let s = Id::Str(Cow::Borrowed(ID));
|
||||
///
|
||||
/// // Runtime allocation.
|
||||
/// let s = Id::Str(Cow::Owned("runtime_id".to_string()));
|
||||
/// ```
|
||||
Str(Cow<'static, str>),
|
||||
}
|
||||
|
||||
impl Id {
|
||||
/// This returns `Some(u64)` if [`Id`] is a number.
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::Id;
|
||||
///
|
||||
/// assert_eq!(Id::Num(0).as_u64(), Some(0));
|
||||
/// assert_eq!(Id::Str("0".into()).as_u64(), None);
|
||||
/// assert_eq!(Id::Null.as_u64(), None);
|
||||
/// ```
|
||||
pub const fn as_u64(&self) -> Option<u64> {
|
||||
match self {
|
||||
Self::Num(n) => Some(*n),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// This returns `Some(&str)` if [`Id`] is a string.
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::Id;
|
||||
///
|
||||
/// assert_eq!(Id::Str("0".into()).as_str(), Some("0"));
|
||||
/// assert_eq!(Id::Num(0).as_str(), None);
|
||||
/// assert_eq!(Id::Null.as_str(), None);
|
||||
/// ```
|
||||
pub fn as_str(&self) -> Option<&str> {
|
||||
match self {
|
||||
Self::Str(s) => Some(s.as_ref()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` is [`Id::Null`].
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::Id;
|
||||
///
|
||||
/// assert!(Id::Null.is_null());
|
||||
/// assert!(!Id::Num(0).is_null());
|
||||
/// assert!(!Id::Str("".into()).is_null());
|
||||
/// ```
|
||||
pub fn is_null(&self) -> bool {
|
||||
*self == Self::Null
|
||||
}
|
||||
|
||||
/// Create a new [`Id::Str`] from a static string.
|
||||
///
|
||||
/// ```rust
|
||||
/// use json_rpc::Id;
|
||||
///
|
||||
/// assert_eq!(Id::from_static_str("hi"), Id::Str("hi".into()));
|
||||
/// ```
|
||||
pub const fn from_static_str(s: &'static str) -> Self {
|
||||
Self::Str(Cow::Borrowed(s))
|
||||
}
|
||||
|
||||
/// Inner infallible implementation of [`FromStr::from_str`]
|
||||
const fn from_string(s: String) -> Self {
|
||||
Self::Str(Cow::Owned(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Id {
|
||||
type Err = std::convert::Infallible;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, std::convert::Infallible> {
|
||||
Ok(Self::from_string(s.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Id {
|
||||
fn from(s: String) -> Self {
|
||||
Self::from_string(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for Id {
|
||||
fn from(s: &str) -> Self {
|
||||
Self::from_string(s.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<String>> for Id {
|
||||
fn from(s: Option<String>) -> Self {
|
||||
match s {
|
||||
Some(s) => Self::from_string(s),
|
||||
None => Self::Null,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<&str>> for Id {
|
||||
fn from(s: Option<&str>) -> Self {
|
||||
let s = s.map(ToString::to_string);
|
||||
s.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement `From<unsigned integer>` for `Id`.
|
||||
///
|
||||
/// Not a generic since that clashes with `From<String>`.
|
||||
macro_rules! impl_u {
|
||||
($($u:ty),*) => {
|
||||
$(
|
||||
impl From<$u> for Id {
|
||||
fn from(u: $u) -> Self {
|
||||
Self::Num(u64::from(u))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&$u> for Id {
|
||||
fn from(u: &$u) -> Self {
|
||||
Self::Num(u64::from(*u))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<$u>> for Id {
|
||||
fn from(u: Option<$u>) -> Self {
|
||||
match u {
|
||||
Some(u) => Self::Num(u64::from(u)),
|
||||
None => Self::Null,
|
||||
}
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
impl_u!(u8, u16, u32);
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
impl_u!(u64);
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TESTS
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
/// Basic [`Id::as_u64()`] tests.
|
||||
#[test]
|
||||
fn __as_u64() {
|
||||
let id = Id::Num(u64::MIN);
|
||||
assert_eq!(id.as_u64().unwrap(), u64::MIN);
|
||||
|
||||
let id = Id::Num(u64::MAX);
|
||||
assert_eq!(id.as_u64().unwrap(), u64::MAX);
|
||||
|
||||
let id = Id::Null;
|
||||
assert!(id.as_u64().is_none());
|
||||
let id = Id::Str("".into());
|
||||
assert!(id.as_u64().is_none());
|
||||
}
|
||||
|
||||
/// Basic [`Id::as_str()`] tests.
|
||||
#[test]
|
||||
fn __as_str() {
|
||||
let id = Id::Str("str".into());
|
||||
assert_eq!(id.as_str().unwrap(), "str");
|
||||
|
||||
let id = Id::Null;
|
||||
assert!(id.as_str().is_none());
|
||||
let id = Id::Num(0);
|
||||
assert!(id.as_str().is_none());
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue