mirror of
https://github.com/hinto-janai/cuprate.git
synced 2024-12-22 03:29:30 +00:00
This commit is contained in:
commit
9ba17f8c7f
128 changed files with 4606 additions and 1559 deletions
34
.github/workflows/audit.yml
vendored
34
.github/workflows/audit.yml
vendored
|
@ -1,34 +0,0 @@
|
|||
# This runs `cargo audit` on all dependencies (only if Cargo deps changed)
|
||||
|
||||
name: Audit
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo
|
||||
target
|
||||
key: audit
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install dependencies
|
||||
run: cargo install cargo-audit --locked
|
||||
- name: Audit
|
||||
run: cargo audit
|
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
|
@ -133,7 +133,12 @@ jobs:
|
|||
- name: Test
|
||||
run: |
|
||||
cargo test --all-features --workspace
|
||||
cargo test --package cuprate-blockchain --no-default-features --features redb --features service
|
||||
cargo test --package cuprate-blockchain --no-default-features --features redb
|
||||
|
||||
- name: Hack Check
|
||||
run: |
|
||||
cargo install cargo-hack --locked
|
||||
cargo hack --workspace check --feature-powerset --no-dev-deps
|
||||
|
||||
# TODO: upload binaries with `actions/upload-artifact@v3`
|
||||
- name: Build
|
||||
|
|
|
@ -120,12 +120,15 @@ Before pushing your code, please run the following at the root of the repository
|
|||
|
||||
After that, ensure all other CI passes by running:
|
||||
|
||||
| Command | Does what |
|
||||
|------------------------------------------------------------------------|-----------|
|
||||
| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK
|
||||
| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied
|
||||
| `cargo test --all-features --workspace` | Runs all tests
|
||||
| `cargo build --all-features --all-targets --workspace` | Builds all code
|
||||
| Command | Does what |
|
||||
|------------------------------------------------------------------------|-------------------------------------------------------------------------|
|
||||
| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK |
|
||||
| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied |
|
||||
| `cargo test --all-features --workspace` | Runs all tests |
|
||||
| `cargo build --all-features --all-targets --workspace` | Builds all code |
|
||||
| `cargo hack --workspace check --feature-powerset --no-dev-deps` | Uses `cargo hack` to check our crates build with different features set |
|
||||
|
||||
`cargo hack` can be installed with `cargo` from: https://github.com/taiki-e/cargo-hack.
|
||||
|
||||
**Note: in order for some tests to work, you will need to place a [`monerod`](https://www.getmonero.org/downloads/) binary at the root of the repository.**
|
||||
|
||||
|
|
906
Cargo.lock
generated
906
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
149
Cargo.toml
149
Cargo.toml
|
@ -15,26 +15,35 @@ members = [
|
|||
"consensus/context",
|
||||
"consensus/fast-sync",
|
||||
"consensus/rules",
|
||||
|
||||
# Net
|
||||
"net/epee-encoding",
|
||||
"net/fixed-bytes",
|
||||
"net/levin",
|
||||
"net/wire",
|
||||
|
||||
# P2P
|
||||
"p2p/p2p",
|
||||
"p2p/p2p-core",
|
||||
"p2p/bucket",
|
||||
"p2p/dandelion-tower",
|
||||
"p2p/async-buffer",
|
||||
"p2p/address-book",
|
||||
|
||||
# Storage
|
||||
"storage/blockchain",
|
||||
"storage/service",
|
||||
"storage/txpool",
|
||||
"storage/database",
|
||||
|
||||
# RPC
|
||||
"rpc/json-rpc",
|
||||
"rpc/types",
|
||||
"rpc/interface",
|
||||
|
||||
# ZMQ
|
||||
"zmq/types",
|
||||
|
||||
# Misc
|
||||
"constants",
|
||||
"cryptonight",
|
||||
|
@ -64,83 +73,89 @@ opt-level = 3
|
|||
|
||||
[workspace.dependencies]
|
||||
# Cuprate members
|
||||
cuprate-fast-sync = { path = "consensus/fast-sync" ,default-features = false}
|
||||
cuprate-consensus-rules = { path = "consensus/rules" ,default-features = false}
|
||||
cuprate-constants = { path = "constants" ,default-features = false}
|
||||
cuprate-consensus = { path = "consensus" ,default-features = false}
|
||||
cuprate-consensus-context = { path = "consensus/context" ,default-features = false}
|
||||
cuprate-cryptonight = { path = "cryptonight" ,default-features = false}
|
||||
cuprate-helper = { path = "helper" ,default-features = false}
|
||||
cuprate-epee-encoding = { path = "net/epee-encoding" ,default-features = false}
|
||||
cuprate-fixed-bytes = { path = "net/fixed-bytes" ,default-features = false}
|
||||
cuprate-levin = { path = "net/levin" ,default-features = false}
|
||||
cuprate-wire = { path = "net/wire" ,default-features = false}
|
||||
cuprate-p2p = { path = "p2p/p2p" ,default-features = false}
|
||||
cuprate-p2p-core = { path = "p2p/p2p-core" ,default-features = false}
|
||||
cuprate-dandelion-tower = { path = "p2p/dandelion-tower" ,default-features = false}
|
||||
cuprate-async-buffer = { path = "p2p/async-buffer" ,default-features = false}
|
||||
cuprate-address-book = { path = "p2p/address-book" ,default-features = false}
|
||||
cuprate-blockchain = { path = "storage/blockchain" ,default-features = false}
|
||||
cuprate-database = { path = "storage/database" ,default-features = false}
|
||||
cuprate-database-service = { path = "storage/service" ,default-features = false}
|
||||
cuprate-txpool = { path = "storage/txpool" ,default-features = false}
|
||||
cuprate-pruning = { path = "pruning" ,default-features = false}
|
||||
cuprate-test-utils = { path = "test-utils" ,default-features = false}
|
||||
cuprate-types = { path = "types" ,default-features = false}
|
||||
cuprate-json-rpc = { path = "rpc/json-rpc" ,default-features = false}
|
||||
cuprate-rpc-types = { path = "rpc/types" ,default-features = false}
|
||||
cuprate-rpc-interface = { path = "rpc/interface" ,default-features = false}
|
||||
cuprate-benchmark-lib = { path = "benches/benchmark/lib", default-features = false }
|
||||
cuprate-benchmark-example = { path = "benches/benchmark/example", default-features = false }
|
||||
cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false }
|
||||
cuprate-consensus-rules = { path = "consensus/rules", default-features = false }
|
||||
cuprate-constants = { path = "constants", default-features = false }
|
||||
cuprate-consensus = { path = "consensus", default-features = false }
|
||||
cuprate-consensus-context = { path = "consensus/context", default-features = false }
|
||||
cuprate-cryptonight = { path = "cryptonight", default-features = false }
|
||||
cuprate-helper = { path = "helper", default-features = false }
|
||||
cuprate-epee-encoding = { path = "net/epee-encoding", default-features = false }
|
||||
cuprate-fixed-bytes = { path = "net/fixed-bytes", default-features = false }
|
||||
cuprate-levin = { path = "net/levin", default-features = false }
|
||||
cuprate-wire = { path = "net/wire", default-features = false }
|
||||
cuprate-p2p = { path = "p2p/p2p", default-features = false }
|
||||
cuprate-p2p-core = { path = "p2p/p2p-core", default-features = false }
|
||||
cuprate-p2p-bucket = { path = "p2p/p2p-bucket", default-features = false }
|
||||
cuprate-dandelion-tower = { path = "p2p/dandelion-tower", default-features = false }
|
||||
cuprate-async-buffer = { path = "p2p/async-buffer", default-features = false }
|
||||
cuprate-address-book = { path = "p2p/address-book", default-features = false }
|
||||
cuprate-blockchain = { path = "storage/blockchain", default-features = false }
|
||||
cuprate-database = { path = "storage/database", default-features = false }
|
||||
cuprate-database-service = { path = "storage/service", default-features = false }
|
||||
cuprate-txpool = { path = "storage/txpool", default-features = false }
|
||||
cuprate-pruning = { path = "pruning", default-features = false }
|
||||
cuprate-test-utils = { path = "test-utils", default-features = false }
|
||||
cuprate-types = { path = "types", default-features = false }
|
||||
cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false }
|
||||
cuprate-rpc-types = { path = "rpc/types", default-features = false }
|
||||
cuprate-rpc-interface = { path = "rpc/interface", default-features = false }
|
||||
cuprate-zmq-types = { path = "zmq/types", default-features = false }
|
||||
|
||||
# External dependencies
|
||||
anyhow = { version = "1.0.89", default-features = false }
|
||||
async-trait = { version = "0.1.82", default-features = false }
|
||||
bitflags = { version = "2.6.0", default-features = false }
|
||||
borsh = { version = "1.5.1", default-features = false }
|
||||
bytemuck = { version = "1.18.0", default-features = false }
|
||||
bytes = { version = "1.7.2", default-features = false }
|
||||
cfg-if = { version = "1.0.0", default-features = false }
|
||||
clap = { version = "4.5.17", default-features = false }
|
||||
chrono = { version = "0.4.38", default-features = false }
|
||||
crypto-bigint = { version = "0.5.5", default-features = false }
|
||||
crossbeam = { version = "0.8.4", default-features = false }
|
||||
const_format = { version = "0.2.33", default-features = false }
|
||||
curve25519-dalek = { version = "4.1.3", default-features = false }
|
||||
dashmap = { version = "5.5.3", default-features = false }
|
||||
dirs = { version = "5.0.1", default-features = false }
|
||||
futures = { version = "0.3.30", default-features = false }
|
||||
hex = { version = "0.4.3", default-features = false }
|
||||
anyhow = { version = "1", default-features = false }
|
||||
arrayvec = { version = "0.7", default-features = false }
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
bitflags = { version = "2", default-features = false }
|
||||
blake3 = { version = "1", default-features = false }
|
||||
borsh = { version = "1", default-features = false }
|
||||
bytemuck = { version = "1", default-features = false }
|
||||
bytes = { version = "1", default-features = false }
|
||||
cfg-if = { version = "1", default-features = false }
|
||||
clap = { version = "4", default-features = false }
|
||||
chrono = { version = "0.4", default-features = false }
|
||||
crypto-bigint = { version = "0.5", default-features = false }
|
||||
crossbeam = { version = "0.8", default-features = false }
|
||||
const_format = { version = "0.2", default-features = false }
|
||||
curve25519-dalek = { version = "4", default-features = false }
|
||||
dashmap = { version = "6", default-features = false }
|
||||
dirs = { version = "5", default-features = false }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
hex = { version = "0.4", default-features = false }
|
||||
hex-literal = { version = "0.4", default-features = false }
|
||||
indexmap = { version = "2.5.0", default-features = false }
|
||||
indexmap = { version = "2", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false }
|
||||
paste = { version = "1.0.15", default-features = false }
|
||||
pin-project = { version = "1.1.5", default-features = false }
|
||||
paste = { version = "1", default-features = false }
|
||||
pin-project = { version = "1", default-features = false }
|
||||
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
|
||||
rand = { version = "0.8.5", default-features = false }
|
||||
rand_distr = { version = "0.4.3", default-features = false }
|
||||
rayon = { version = "1.10.0", default-features = false }
|
||||
serde_bytes = { version = "0.11.15", default-features = false }
|
||||
serde_json = { version = "1.0.128", default-features = false }
|
||||
serde = { version = "1.0.210", default-features = false }
|
||||
strum = { version = "0.26.3", default-features = false }
|
||||
thiserror = { version = "1.0.63", default-features = false }
|
||||
thread_local = { version = "1.1.8", default-features = false }
|
||||
tokio-util = { version = "0.7.12", default-features = false }
|
||||
tokio-stream = { version = "0.1.16", default-features = false }
|
||||
tokio = { version = "1.40.0", default-features = false }
|
||||
rand = { version = "0.8", default-features = false }
|
||||
rand_distr = { version = "0.4", default-features = false }
|
||||
rayon = { version = "1", default-features = false }
|
||||
serde_bytes = { version = "0.11", default-features = false }
|
||||
serde_json = { version = "1", default-features = false }
|
||||
serde = { version = "1", default-features = false }
|
||||
strum = { version = "0.26", default-features = false }
|
||||
thiserror = { version = "1", default-features = false }
|
||||
thread_local = { version = "1", default-features = false }
|
||||
tokio-util = { version = "0.7", default-features = false }
|
||||
tokio-stream = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # <https://github.com/tower-rs/tower/pull/796>
|
||||
tracing-subscriber = { version = "0.3.18", default-features = false }
|
||||
tracing = { version = "0.1.40", default-features = false }
|
||||
tracing-subscriber = { version = "0.3", default-features = false }
|
||||
tracing = { version = "0.1", default-features = false }
|
||||
|
||||
## workspace.dev-dependencies
|
||||
criterion = { version = "0.5.1" }
|
||||
function_name = { version = "0.3.0" }
|
||||
tempfile = { version = "3.13.0" }
|
||||
criterion = { version = "0.5" }
|
||||
function_name = { version = "0.3" }
|
||||
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||
pretty_assertions = { version = "1.4.1" }
|
||||
proptest = { version = "1.5.0" }
|
||||
proptest-derive = { version = "0.4.0" }
|
||||
tokio-test = { version = "0.4.4" }
|
||||
tempfile = { version = "3" }
|
||||
pretty_assertions = { version = "1" }
|
||||
proptest = { version = "1" }
|
||||
proptest-derive = { version = "0.5" }
|
||||
tokio-test = { version = "0.4" }
|
||||
|
||||
## TODO:
|
||||
## Potential dependencies.
|
||||
|
|
|
@ -28,8 +28,8 @@ example = [
|
|||
]
|
||||
|
||||
[dependencies]
|
||||
cuprate-benchmark-lib = { path = "../lib" }
|
||||
cuprate-benchmark-example = { path = "../example", optional = true }
|
||||
cuprate-benchmark-lib = { workspace = true }
|
||||
cuprate-benchmark-example = { workspace = true, optional = true }
|
||||
|
||||
cfg-if = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
|
@ -40,4 +40,4 @@ tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"]
|
|||
[dev-dependencies]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
workspace = true
|
||||
|
|
|
@ -9,15 +9,15 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cu
|
|||
keywords = ["cuprate", "json-rpc", "criterion", "benchmark"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-json-rpc = { workspace = true }
|
||||
|
||||
criterion = { workspace = true }
|
||||
function_name = { workspace = true }
|
||||
serde_json = { workspace = true, features = ["default"] }
|
||||
|
||||
cuprate-json-rpc = { path = "../../../rpc/json-rpc" }
|
||||
|
||||
[[bench]]
|
||||
name = "main"
|
||||
harness = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
workspace = true
|
||||
|
|
|
@ -9,31 +9,32 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
|
|||
|
||||
[dependencies]
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
cuprate-consensus = { workspace = true }
|
||||
cuprate-fast-sync = { workspace = true }
|
||||
cuprate-consensus = { workspace = true }
|
||||
cuprate-fast-sync = { workspace = true }
|
||||
cuprate-consensus-context = { workspace = true }
|
||||
cuprate-consensus-rules = { workspace = true }
|
||||
cuprate-cryptonight = { workspace = true }
|
||||
cuprate-helper = { workspace = true }
|
||||
cuprate-epee-encoding = { workspace = true }
|
||||
cuprate-fixed-bytes = { workspace = true }
|
||||
cuprate-levin = { workspace = true }
|
||||
cuprate-wire = { workspace = true }
|
||||
cuprate-p2p = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true }
|
||||
cuprate-dandelion-tower = { workspace = true }
|
||||
cuprate-async-buffer = { workspace = true }
|
||||
cuprate-address-book = { workspace = true }
|
||||
cuprate-blockchain = { workspace = true, features = ["service"] }
|
||||
cuprate-database-service = { workspace = true }
|
||||
cuprate-txpool = { workspace = true }
|
||||
cuprate-database = { workspace = true }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-test-utils = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
cuprate-json-rpc = { workspace = true }
|
||||
cuprate-rpc-interface = { workspace = true }
|
||||
cuprate-rpc-types = { workspace = true }
|
||||
cuprate-consensus-rules = { workspace = true }
|
||||
cuprate-constants = { workspace = true }
|
||||
cuprate-cryptonight = { workspace = true }
|
||||
cuprate-helper = { workspace = true }
|
||||
cuprate-epee-encoding = { workspace = true }
|
||||
cuprate-fixed-bytes = { workspace = true }
|
||||
cuprate-levin = { workspace = true }
|
||||
cuprate-wire = { workspace = true }
|
||||
cuprate-p2p = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true }
|
||||
cuprate-dandelion-tower = { workspace = true, features = ["txpool"] }
|
||||
cuprate-async-buffer = { workspace = true }
|
||||
cuprate-address-book = { workspace = true }
|
||||
cuprate-blockchain = { workspace = true }
|
||||
cuprate-database-service = { workspace = true }
|
||||
cuprate-txpool = { workspace = true }
|
||||
cuprate-database = { workspace = true }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-test-utils = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
cuprate-json-rpc = { workspace = true }
|
||||
cuprate-rpc-interface = { workspace = true }
|
||||
cuprate-rpc-types = { workspace = true }
|
||||
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
anyhow = { workspace = true }
|
||||
|
|
|
@ -25,7 +25,7 @@ mod manager;
|
|||
mod syncer;
|
||||
mod types;
|
||||
|
||||
use types::{
|
||||
pub use types::{
|
||||
ConcreteBlockVerifierService, ConcreteTxVerifierService, ConsensusBlockchainReadHandle,
|
||||
};
|
||||
|
||||
|
|
|
@ -8,17 +8,16 @@ use std::{
|
|||
};
|
||||
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use rayon::prelude::*;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::transactions::new_tx_verification_data;
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain,
|
||||
use cuprate_txpool::service::{
|
||||
interface::{TxpoolReadRequest, TxpoolReadResponse},
|
||||
TxpoolReadHandle,
|
||||
};
|
||||
use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse};
|
||||
|
||||
use crate::{
|
||||
blockchain::manager::{BlockchainManagerCommand, IncomingBlockOk},
|
||||
|
@ -38,7 +37,7 @@ pub enum IncomingBlockError {
|
|||
///
|
||||
/// The inner values are the block hash and the indexes of the missing txs in the block.
|
||||
#[error("Unknown transactions in block.")]
|
||||
UnknownTransactions([u8; 32], Vec<u64>),
|
||||
UnknownTransactions([u8; 32], Vec<usize>),
|
||||
/// We are missing the block's parent.
|
||||
#[error("The block has an unknown parent.")]
|
||||
Orphan,
|
||||
|
@ -59,8 +58,9 @@ pub enum IncomingBlockError {
|
|||
/// - the block's parent is unknown
|
||||
pub async fn handle_incoming_block(
|
||||
block: Block,
|
||||
given_txs: Vec<Transaction>,
|
||||
mut given_txs: HashMap<[u8; 32], Transaction>,
|
||||
blockchain_read_handle: &mut BlockchainReadHandle,
|
||||
txpool_read_handle: &mut TxpoolReadHandle,
|
||||
) -> Result<IncomingBlockOk, IncomingBlockError> {
|
||||
/// A [`HashSet`] of block hashes that the blockchain manager is currently handling.
|
||||
///
|
||||
|
@ -72,7 +72,12 @@ pub async fn handle_incoming_block(
|
|||
/// which are also more expensive than `Mutex`s.
|
||||
static BLOCKS_BEING_HANDLED: LazyLock<Mutex<HashSet<[u8; 32]>>> =
|
||||
LazyLock::new(|| Mutex::new(HashSet::new()));
|
||||
// FIXME: we should look in the tx-pool for txs when that is ready.
|
||||
|
||||
if given_txs.len() > block.transactions.len() {
|
||||
return Err(IncomingBlockError::InvalidBlock(anyhow::anyhow!(
|
||||
"Too many transactions given for block"
|
||||
)));
|
||||
}
|
||||
|
||||
if !block_exists(block.header.previous, blockchain_read_handle)
|
||||
.await
|
||||
|
@ -90,23 +95,36 @@ pub async fn handle_incoming_block(
|
|||
return Ok(IncomingBlockOk::AlreadyHave);
|
||||
}
|
||||
|
||||
// TODO: remove this when we have a working tx-pool.
|
||||
if given_txs.len() != block.transactions.len() {
|
||||
return Err(IncomingBlockError::UnknownTransactions(
|
||||
block_hash,
|
||||
(0..usize_to_u64(block.transactions.len())).collect(),
|
||||
));
|
||||
}
|
||||
let TxpoolReadResponse::TxsForBlock { mut txs, missing } = txpool_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolReadRequest::TxsForBlock(block.transactions.clone()))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// TODO: check we actually got given the right txs.
|
||||
let prepped_txs = given_txs
|
||||
.into_par_iter()
|
||||
.map(|tx| {
|
||||
let tx = new_tx_verification_data(tx)?;
|
||||
Ok((tx.tx_hash, tx))
|
||||
})
|
||||
.collect::<Result<_, anyhow::Error>>()
|
||||
.map_err(IncomingBlockError::InvalidBlock)?;
|
||||
if !missing.is_empty() {
|
||||
let needed_hashes = missing.iter().map(|index| block.transactions[*index]);
|
||||
|
||||
for needed_hash in needed_hashes {
|
||||
let Some(tx) = given_txs.remove(&needed_hash) else {
|
||||
// We return back the indexes of all txs missing from our pool, not taking into account the txs
|
||||
// that were given with the block, as these txs will be dropped. It is not worth it to try to add
|
||||
// these txs to the pool as this will only happen with a misbehaving peer or if the txpool reaches
|
||||
// the size limit.
|
||||
return Err(IncomingBlockError::UnknownTransactions(block_hash, missing));
|
||||
};
|
||||
|
||||
txs.insert(
|
||||
needed_hash,
|
||||
new_tx_verification_data(tx)
|
||||
.map_err(|e| IncomingBlockError::InvalidBlock(e.into()))?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let Some(incoming_block_tx) = COMMAND_TX.get() else {
|
||||
// We could still be starting up the blockchain manager.
|
||||
|
@ -119,28 +137,37 @@ pub async fn handle_incoming_block(
|
|||
return Ok(IncomingBlockOk::AlreadyHave);
|
||||
}
|
||||
|
||||
// From this point on we MUST not early return without removing the block hash from `BLOCKS_BEING_HANDLED`.
|
||||
// We must remove the block hash from `BLOCKS_BEING_HANDLED`.
|
||||
let _guard = {
|
||||
struct RemoveFromBlocksBeingHandled {
|
||||
block_hash: [u8; 32],
|
||||
}
|
||||
impl Drop for RemoveFromBlocksBeingHandled {
|
||||
fn drop(&mut self) {
|
||||
BLOCKS_BEING_HANDLED
|
||||
.lock()
|
||||
.unwrap()
|
||||
.remove(&self.block_hash);
|
||||
}
|
||||
}
|
||||
RemoveFromBlocksBeingHandled { block_hash }
|
||||
};
|
||||
|
||||
let (response_tx, response_rx) = oneshot::channel();
|
||||
|
||||
incoming_block_tx
|
||||
.send(BlockchainManagerCommand::AddBlock {
|
||||
block,
|
||||
prepped_txs,
|
||||
prepped_txs: txs,
|
||||
response_tx,
|
||||
})
|
||||
.await
|
||||
.expect("TODO: don't actually panic here, an err means we are shutting down");
|
||||
|
||||
let res = response_rx
|
||||
response_rx
|
||||
.await
|
||||
.expect("The blockchain manager will always respond")
|
||||
.map_err(IncomingBlockError::InvalidBlock);
|
||||
|
||||
// Remove the block hash from the blocks being handled.
|
||||
BLOCKS_BEING_HANDLED.lock().unwrap().remove(&block_hash);
|
||||
|
||||
res
|
||||
.map_err(IncomingBlockError::InvalidBlock)
|
||||
}
|
||||
|
||||
/// Check if we have a block with the given hash.
|
||||
|
|
|
@ -18,6 +18,7 @@ use cuprate_p2p::{
|
|||
BroadcastSvc, NetworkInterface,
|
||||
};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::TxpoolWriteHandle;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, TransactionVerificationData,
|
||||
|
@ -46,6 +47,7 @@ pub async fn init_blockchain_manager(
|
|||
clearnet_interface: NetworkInterface<ClearNet>,
|
||||
blockchain_write_handle: BlockchainWriteHandle,
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
txpool_write_handle: TxpoolWriteHandle,
|
||||
mut blockchain_context_service: BlockChainContextService,
|
||||
block_verifier_service: ConcreteBlockVerifierService,
|
||||
block_downloader_config: BlockDownloaderConfig,
|
||||
|
@ -80,6 +82,7 @@ pub async fn init_blockchain_manager(
|
|||
let manager = BlockchainManager {
|
||||
blockchain_write_handle,
|
||||
blockchain_read_handle,
|
||||
txpool_write_handle,
|
||||
blockchain_context_service,
|
||||
cached_blockchain_context: blockchain_context.unchecked_blockchain_context().clone(),
|
||||
block_verifier_service,
|
||||
|
@ -102,6 +105,8 @@ pub struct BlockchainManager {
|
|||
blockchain_write_handle: BlockchainWriteHandle,
|
||||
/// A [`BlockchainReadHandle`].
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
/// A [`TxpoolWriteHandle`].
|
||||
txpool_write_handle: TxpoolWriteHandle,
|
||||
// TODO: Improve the API of the cache service.
|
||||
// TODO: rename the cache service -> `BlockchainContextService`.
|
||||
/// The blockchain context cache, this caches the current state of the blockchain to quickly calculate/retrieve
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
//! The blockchain manager handler functions.
|
||||
use bytes::Bytes;
|
||||
use futures::{TryFutureExt, TryStreamExt};
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
transaction::{Input, Transaction},
|
||||
};
|
||||
use rayon::prelude::*;
|
||||
use std::ops::ControlFlow;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
@ -17,16 +20,14 @@ use cuprate_consensus::{
|
|||
use cuprate_consensus_context::NewBlockData;
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_p2p::{block_downloader::BlockBatch, constants::LONG_BAN, BroadcastRequest};
|
||||
use cuprate_txpool::service::interface::TxpoolWriteRequest;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest},
|
||||
AltBlockInformation, HardFork, TransactionVerificationData, VerifiedBlockInformation,
|
||||
};
|
||||
|
||||
use crate::blockchain::manager::commands::IncomingBlockOk;
|
||||
use crate::{
|
||||
blockchain::{
|
||||
manager::commands::BlockchainManagerCommand, types::ConsensusBlockchainReadHandle,
|
||||
},
|
||||
blockchain::manager::commands::{BlockchainManagerCommand, IncomingBlockOk},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
signals::REORG_LOCK,
|
||||
};
|
||||
|
@ -434,6 +435,18 @@ impl super::BlockchainManager {
|
|||
&mut self,
|
||||
verified_block: VerifiedBlockInformation,
|
||||
) {
|
||||
// FIXME: this is pretty inefficient, we should probably return the KI map created in the consensus crate.
|
||||
let spent_key_images = verified_block
|
||||
.txs
|
||||
.iter()
|
||||
.flat_map(|tx| {
|
||||
tx.tx.prefix().inputs.iter().map(|input| match input {
|
||||
Input::ToKey { key_image, .. } => key_image.compress().0,
|
||||
Input::Gen(_) => unreachable!(),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<[u8; 32]>>();
|
||||
|
||||
self.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
|
@ -472,6 +485,14 @@ impl super::BlockchainManager {
|
|||
};
|
||||
|
||||
self.cached_blockchain_context = blockchain_context.unchecked_blockchain_context().clone();
|
||||
|
||||
self.txpool_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolWriteRequest::NewBlock { spent_key_images })
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
// FIXME: This whole module is not great and should be rewritten when the PeerSet is made.
|
||||
use std::{pin::pin, sync::Arc, time::Duration};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use futures::StreamExt;
|
||||
use tokio::time::interval;
|
||||
use tokio::{
|
||||
sync::{mpsc, Notify},
|
||||
time::sleep,
|
||||
time::interval,
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::instrument;
|
||||
|
@ -13,7 +12,7 @@ use tracing::instrument;
|
|||
use cuprate_consensus::{BlockChainContext, BlockChainContextRequest, BlockChainContextResponse};
|
||||
use cuprate_p2p::{
|
||||
block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse},
|
||||
NetworkInterface,
|
||||
NetworkInterface, PeerSetRequest, PeerSetResponse,
|
||||
};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
|
||||
|
@ -29,15 +28,11 @@ pub enum SyncerError {
|
|||
}
|
||||
|
||||
/// The syncer tasks that makes sure we are fully synchronised with our connected peers.
|
||||
#[expect(
|
||||
clippy::significant_drop_tightening,
|
||||
reason = "Client pool which will be removed"
|
||||
)]
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn syncer<C, CN>(
|
||||
mut context_svc: C,
|
||||
our_chain: CN,
|
||||
clearnet_interface: NetworkInterface<ClearNet>,
|
||||
mut clearnet_interface: NetworkInterface<ClearNet>,
|
||||
incoming_block_batch_tx: mpsc::Sender<BlockBatch>,
|
||||
stop_current_block_downloader: Arc<Notify>,
|
||||
block_downloader_config: BlockDownloaderConfig,
|
||||
|
@ -68,8 +63,6 @@ where
|
|||
unreachable!();
|
||||
};
|
||||
|
||||
let client_pool = clearnet_interface.client_pool();
|
||||
|
||||
tracing::debug!("Waiting for new sync info in top sync channel");
|
||||
|
||||
loop {
|
||||
|
@ -80,9 +73,20 @@ where
|
|||
check_update_blockchain_context(&mut context_svc, &mut blockchain_ctx).await?;
|
||||
let raw_blockchain_context = blockchain_ctx.unchecked_blockchain_context();
|
||||
|
||||
if !client_pool.contains_client_with_more_cumulative_difficulty(
|
||||
raw_blockchain_context.cumulative_difficulty,
|
||||
) {
|
||||
let PeerSetResponse::MostPoWSeen {
|
||||
cumulative_difficulty,
|
||||
..
|
||||
} = clearnet_interface
|
||||
.peer_set()
|
||||
.ready()
|
||||
.await?
|
||||
.call(PeerSetRequest::MostPoWSeen)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
if cumulative_difficulty <= raw_blockchain_context.cumulative_difficulty {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,7 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{FutureExt, TryFutureExt};
|
||||
use tower::{util::MapErr, Service};
|
||||
use tower::util::MapErr;
|
||||
|
||||
use cuprate_blockchain::{cuprate_database::RuntimeError, service::BlockchainReadHandle};
|
||||
use cuprate_consensus::{BlockChainContextService, BlockVerifierService, TxVerifierService};
|
||||
use cuprate_p2p::block_downloader::{ChainSvcRequest, ChainSvcResponse};
|
||||
use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse};
|
||||
|
||||
/// The [`BlockVerifierService`] with all generic types defined.
|
||||
pub type ConcreteBlockVerifierService = BlockVerifierService<
|
||||
|
|
|
@ -9,6 +9,10 @@
|
|||
unused_variables,
|
||||
clippy::needless_pass_by_value,
|
||||
clippy::unused_async,
|
||||
clippy::diverging_sub_expression,
|
||||
unused_mut,
|
||||
clippy::let_unit_value,
|
||||
clippy::needless_pass_by_ref_mut,
|
||||
reason = "TODO: remove after v1.0.0"
|
||||
)]
|
||||
|
||||
|
|
|
@ -2,4 +2,7 @@
|
|||
//!
|
||||
//! Will handle initiating the P2P and contains a protocol request handler.
|
||||
|
||||
mod network_address;
|
||||
pub mod request_handler;
|
||||
|
||||
pub use network_address::CrossNetworkInternalPeerId;
|
||||
|
|
16
binaries/cuprated/src/p2p/network_address.rs
Normal file
16
binaries/cuprated/src/p2p/network_address.rs
Normal file
|
@ -0,0 +1,16 @@
|
|||
use std::net::SocketAddr;
|
||||
|
||||
use cuprate_p2p_core::{client::InternalPeerID, ClearNet, NetworkZone};
|
||||
|
||||
/// An identifier for a P2P peer on any network.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum CrossNetworkInternalPeerId {
|
||||
/// A clear-net peer.
|
||||
ClearNet(InternalPeerID<<ClearNet as NetworkZone>::Addr>),
|
||||
}
|
||||
|
||||
impl From<InternalPeerID<<ClearNet as NetworkZone>::Addr>> for CrossNetworkInternalPeerId {
|
||||
fn from(addr: InternalPeerID<<ClearNet as NetworkZone>::Addr>) -> Self {
|
||||
Self::ClearNet(addr)
|
||||
}
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
//! Will contain the code to initiate the RPC and a request handler.
|
||||
|
||||
mod bin;
|
||||
mod constants;
|
||||
mod handler;
|
||||
mod json;
|
||||
mod other;
|
||||
|
|
5
binaries/cuprated/src/rpc/constants.rs
Normal file
5
binaries/cuprated/src/rpc/constants.rs
Normal file
|
@ -0,0 +1,5 @@
|
|||
//! Constants used within RPC.
|
||||
|
||||
/// The string message used in RPC response fields for when
|
||||
/// `cuprated` does not support a field that `monerod` has.
|
||||
pub(super) const FIELD_NOT_SUPPORTED: &str = "`cuprated` does not support this field.";
|
|
@ -8,6 +8,8 @@ use monero_serai::block::Block;
|
|||
use tower::Service;
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::BlockChainContextService;
|
||||
use cuprate_pruning::PruningSeed;
|
||||
use cuprate_rpc_interface::RpcHandler;
|
||||
use cuprate_rpc_types::{
|
||||
bin::{BinRequest, BinResponse},
|
||||
|
@ -15,6 +17,7 @@ use cuprate_rpc_types::{
|
|||
other::{OtherRequest, OtherResponse},
|
||||
};
|
||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
||||
use cuprate_types::{AddAuxPow, AuxPow, HardFork};
|
||||
|
||||
use crate::rpc::{bin, json, other};
|
||||
|
||||
|
@ -54,6 +57,32 @@ pub enum BlockchainManagerRequest {
|
|||
|
||||
/// The height of the next block in the chain.
|
||||
TargetHeight,
|
||||
|
||||
/// Generate new blocks.
|
||||
///
|
||||
/// This request is only for regtest, see RPC's `generateblocks`.
|
||||
GenerateBlocks {
|
||||
/// Number of the blocks to be generated.
|
||||
amount_of_blocks: u64,
|
||||
/// The previous block's hash.
|
||||
prev_block: [u8; 32],
|
||||
/// The starting value for the nonce.
|
||||
starting_nonce: u32,
|
||||
/// The address that will receive the coinbase reward.
|
||||
wallet_address: String,
|
||||
},
|
||||
|
||||
// // TODO: the below requests actually belong to the block downloader/syncer:
|
||||
// // <https://github.com/Cuprate/cuprate/pull/320#discussion_r1811089758>
|
||||
// /// Get [`Span`] data.
|
||||
// ///
|
||||
// /// This is data that describes an active downloading process,
|
||||
// /// if we are fully synced, this will return an empty [`Vec`].
|
||||
// Spans,
|
||||
|
||||
//
|
||||
/// Get the next [`PruningSeed`] needed for a pruned sync.
|
||||
NextNeededPruningSeed,
|
||||
}
|
||||
|
||||
/// TODO: use real type when public.
|
||||
|
@ -69,6 +98,9 @@ pub enum BlockchainManagerResponse {
|
|||
/// Response to [`BlockchainManagerRequest::PopBlocks`]
|
||||
PopBlocks { new_height: usize },
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Prune`]
|
||||
Prune(PruningSeed),
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::Pruned`]
|
||||
Pruned(bool),
|
||||
|
||||
|
@ -83,6 +115,19 @@ pub enum BlockchainManagerResponse {
|
|||
|
||||
/// Response to [`BlockchainManagerRequest::TargetHeight`]
|
||||
TargetHeight { height: usize },
|
||||
|
||||
/// Response to [`BlockchainManagerRequest::GenerateBlocks`]
|
||||
GenerateBlocks {
|
||||
/// Hashes of the blocks generated.
|
||||
blocks: Vec<[u8; 32]>,
|
||||
/// The new top height. (TODO: is this correct?)
|
||||
height: usize,
|
||||
},
|
||||
|
||||
// /// Response to [`BlockchainManagerRequest::Spans`].
|
||||
// Spans(Vec<Span<Z::Addr>>),
|
||||
/// Response to [`BlockchainManagerRequest::NextNeededPruningSeed`].
|
||||
NextNeededPruningSeed(PruningSeed),
|
||||
}
|
||||
|
||||
/// TODO: use real type when public.
|
||||
|
@ -102,6 +147,9 @@ pub struct CupratedRpcHandler {
|
|||
/// Read handle to the blockchain database.
|
||||
pub blockchain_read: BlockchainReadHandle,
|
||||
|
||||
/// Handle to the blockchain context service.
|
||||
pub blockchain_context: BlockChainContextService,
|
||||
|
||||
/// Handle to the blockchain manager.
|
||||
pub blockchain_manager: BlockchainManagerHandle,
|
||||
|
||||
|
@ -117,6 +165,7 @@ impl CupratedRpcHandler {
|
|||
pub const fn new(
|
||||
restricted: bool,
|
||||
blockchain_read: BlockchainReadHandle,
|
||||
blockchain_context: BlockChainContextService,
|
||||
blockchain_manager: BlockchainManagerHandle,
|
||||
txpool_read: TxpoolReadHandle,
|
||||
txpool_manager: std::convert::Infallible,
|
||||
|
@ -124,6 +173,7 @@ impl CupratedRpcHandler {
|
|||
Self {
|
||||
restricted,
|
||||
blockchain_read,
|
||||
blockchain_context,
|
||||
blockchain_manager,
|
||||
txpool_read,
|
||||
txpool_manager,
|
||||
|
|
|
@ -2,26 +2,33 @@
|
|||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::Error;
|
||||
use anyhow::{anyhow, Error};
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_p2p_core::{
|
||||
services::{AddressBookRequest, AddressBookResponse},
|
||||
types::{BanState, ConnectionId},
|
||||
AddressBook, NetworkZone,
|
||||
};
|
||||
use cuprate_pruning::PruningSeed;
|
||||
use cuprate_rpc_types::misc::{ConnectionInfo, Span};
|
||||
|
||||
use crate::rpc::constants::FIELD_NOT_SUPPORTED;
|
||||
|
||||
// FIXME: use `anyhow::Error` over `tower::BoxError` in address book.
|
||||
|
||||
/// [`AddressBookRequest::PeerlistSize`]
|
||||
pub(super) async fn peerlist_size<Z: NetworkZone>(
|
||||
pub(crate) async fn peerlist_size<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let AddressBookResponse::PeerlistSize { white, grey } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::PeerlistSize)
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
@ -29,17 +36,74 @@ pub(super) async fn peerlist_size<Z: NetworkZone>(
|
|||
Ok((usize_to_u64(white), usize_to_u64(grey)))
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::ConnectionInfo`]
|
||||
pub(crate) async fn connection_info<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<Vec<ConnectionInfo>, Error> {
|
||||
let AddressBookResponse::ConnectionInfo(vec) = address_book
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::ConnectionInfo)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
// FIXME: impl this map somewhere instead of inline.
|
||||
let vec = vec
|
||||
.into_iter()
|
||||
.map(|info| {
|
||||
let (ip, port) = match info.socket_addr {
|
||||
Some(socket) => (socket.ip().to_string(), socket.port().to_string()),
|
||||
None => (String::new(), String::new()),
|
||||
};
|
||||
|
||||
ConnectionInfo {
|
||||
address: info.address.to_string(),
|
||||
address_type: info.address_type,
|
||||
avg_download: info.avg_download,
|
||||
avg_upload: info.avg_upload,
|
||||
connection_id: String::from(ConnectionId::DEFAULT_STR),
|
||||
current_download: info.current_download,
|
||||
current_upload: info.current_upload,
|
||||
height: info.height,
|
||||
host: info.host,
|
||||
incoming: info.incoming,
|
||||
ip,
|
||||
live_time: info.live_time,
|
||||
localhost: info.localhost,
|
||||
local_ip: info.local_ip,
|
||||
peer_id: hex::encode(info.peer_id.to_ne_bytes()),
|
||||
port,
|
||||
pruning_seed: info.pruning_seed.compress(),
|
||||
recv_count: info.recv_count,
|
||||
recv_idle_time: info.recv_idle_time,
|
||||
rpc_credits_per_hash: info.rpc_credits_per_hash,
|
||||
rpc_port: info.rpc_port,
|
||||
send_count: info.send_count,
|
||||
send_idle_time: info.send_idle_time,
|
||||
state: info.state,
|
||||
support_flags: info.support_flags,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
/// [`AddressBookRequest::ConnectionCount`]
|
||||
pub(super) async fn connection_count<Z: NetworkZone>(
|
||||
pub(crate) async fn connection_count<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let AddressBookResponse::ConnectionCount { incoming, outgoing } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::ConnectionCount)
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
@ -48,17 +112,17 @@ pub(super) async fn connection_count<Z: NetworkZone>(
|
|||
}
|
||||
|
||||
/// [`AddressBookRequest::SetBan`]
|
||||
pub(super) async fn set_ban<Z: NetworkZone>(
|
||||
pub(crate) async fn set_ban<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
peer: cuprate_p2p_core::ban::SetBan<Z::Addr>,
|
||||
set_ban: cuprate_p2p_core::types::SetBan<Z::Addr>,
|
||||
) -> Result<(), Error> {
|
||||
let AddressBookResponse::Ok = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(AddressBookRequest::SetBan(peer))
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::SetBan(set_ban))
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
@ -67,17 +131,17 @@ pub(super) async fn set_ban<Z: NetworkZone>(
|
|||
}
|
||||
|
||||
/// [`AddressBookRequest::GetBan`]
|
||||
pub(super) async fn get_ban<Z: NetworkZone>(
|
||||
pub(crate) async fn get_ban<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
peer: Z::Addr,
|
||||
) -> Result<Option<std::time::Instant>, Error> {
|
||||
let AddressBookResponse::GetBan { unban_instant } = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::GetBan(peer))
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
@ -86,19 +150,19 @@ pub(super) async fn get_ban<Z: NetworkZone>(
|
|||
}
|
||||
|
||||
/// [`AddressBookRequest::GetBans`]
|
||||
pub(super) async fn get_bans<Z: NetworkZone>(
|
||||
pub(crate) async fn get_bans<Z: NetworkZone>(
|
||||
address_book: &mut impl AddressBook<Z>,
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<Vec<BanState<Z::Addr>>, Error> {
|
||||
let AddressBookResponse::GetBans(bans) = address_book
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(AddressBookRequest::GetBans)
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(todo!())
|
||||
Ok(bans)
|
||||
}
|
||||
|
|
|
@ -1,24 +1,61 @@
|
|||
//! Functions for [`BlockchainReadRequest`].
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
ops::Range,
|
||||
};
|
||||
|
||||
use anyhow::Error;
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use monero_serai::block::Block;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::{service::BlockchainReadHandle, types::AltChainInfo};
|
||||
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, CoinbaseTxSum, ExtendedBlockHeader, MinerData, OutputHistogramEntry,
|
||||
OutputHistogramInput, OutputOnChain,
|
||||
Chain, ChainInfo, CoinbaseTxSum, ExtendedBlockHeader, HardFork, MinerData,
|
||||
OutputHistogramEntry, OutputHistogramInput, OutputOnChain,
|
||||
};
|
||||
|
||||
/// [`BlockchainReadRequest::Block`].
|
||||
pub(crate) async fn block(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
height: u64,
|
||||
) -> Result<Block, Error> {
|
||||
let BlockchainResponse::Block(block) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::Block {
|
||||
height: u64_to_usize(height),
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockByHash`].
|
||||
pub(crate) async fn block_by_hash(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
hash: [u8; 32],
|
||||
) -> Result<Block, Error> {
|
||||
let BlockchainResponse::Block(block) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockByHash(hash))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockExtendedHeader`].
|
||||
pub(super) async fn block_extended_header(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn block_extended_header(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
height: u64,
|
||||
) -> Result<ExtendedBlockHeader, Error> {
|
||||
let BlockchainResponse::BlockExtendedHeader(header) = blockchain_read
|
||||
|
@ -36,8 +73,8 @@ pub(super) async fn block_extended_header(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockHash`].
|
||||
pub(super) async fn block_hash(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn block_hash(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
height: u64,
|
||||
chain: Chain,
|
||||
) -> Result<[u8; 32], Error> {
|
||||
|
@ -57,8 +94,8 @@ pub(super) async fn block_hash(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FindBlock`].
|
||||
pub(super) async fn find_block(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn find_block(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
block_hash: [u8; 32],
|
||||
) -> Result<Option<(Chain, usize)>, Error> {
|
||||
let BlockchainResponse::FindBlock(option) = blockchain_read
|
||||
|
@ -74,8 +111,8 @@ pub(super) async fn find_block(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FilterUnknownHashes`].
|
||||
pub(super) async fn filter_unknown_hashes(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn filter_unknown_hashes(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
block_hashes: HashSet<[u8; 32]>,
|
||||
) -> Result<HashSet<[u8; 32]>, Error> {
|
||||
let BlockchainResponse::FilterUnknownHashes(output) = blockchain_read
|
||||
|
@ -91,8 +128,8 @@ pub(super) async fn filter_unknown_hashes(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockExtendedHeaderInRange`]
|
||||
pub(super) async fn block_extended_header_in_range(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn block_extended_header_in_range(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
range: Range<usize>,
|
||||
chain: Chain,
|
||||
) -> Result<Vec<ExtendedBlockHeader>, Error> {
|
||||
|
@ -111,8 +148,8 @@ pub(super) async fn block_extended_header_in_range(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::ChainHeight`].
|
||||
pub(super) async fn chain_height(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn chain_height(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<(u64, [u8; 32]), Error> {
|
||||
let BlockchainResponse::ChainHeight(height, hash) = blockchain_read
|
||||
.ready()
|
||||
|
@ -127,8 +164,8 @@ pub(super) async fn chain_height(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::GeneratedCoins`].
|
||||
pub(super) async fn generated_coins(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn generated_coins(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
block_height: u64,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainResponse::GeneratedCoins(generated_coins) = blockchain_read
|
||||
|
@ -146,8 +183,8 @@ pub(super) async fn generated_coins(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::Outputs`]
|
||||
pub(super) async fn outputs(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn outputs(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
outputs: HashMap<u64, HashSet<u64>>,
|
||||
) -> Result<HashMap<u64, HashMap<u64, OutputOnChain>>, Error> {
|
||||
let BlockchainResponse::Outputs(outputs) = blockchain_read
|
||||
|
@ -163,8 +200,8 @@ pub(super) async fn outputs(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::NumberOutputsWithAmount`]
|
||||
pub(super) async fn number_outputs_with_amount(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn number_outputs_with_amount(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
output_amounts: Vec<u64>,
|
||||
) -> Result<HashMap<u64, usize>, Error> {
|
||||
let BlockchainResponse::NumberOutputsWithAmount(map) = blockchain_read
|
||||
|
@ -182,8 +219,8 @@ pub(super) async fn number_outputs_with_amount(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::KeyImagesSpent`]
|
||||
pub(super) async fn key_images_spent(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn key_images_spent(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
key_images: HashSet<[u8; 32]>,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainResponse::KeyImagesSpent(is_spent) = blockchain_read
|
||||
|
@ -199,8 +236,8 @@ pub(super) async fn key_images_spent(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::CompactChainHistory`]
|
||||
pub(super) async fn compact_chain_history(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn compact_chain_history(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<(Vec<[u8; 32]>, u128), Error> {
|
||||
let BlockchainResponse::CompactChainHistory {
|
||||
block_ids,
|
||||
|
@ -218,8 +255,8 @@ pub(super) async fn compact_chain_history(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FindFirstUnknown`]
|
||||
pub(super) async fn find_first_unknown(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn find_first_unknown(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
hashes: Vec<[u8; 32]>,
|
||||
) -> Result<Option<(usize, u64)>, Error> {
|
||||
let BlockchainResponse::FindFirstUnknown(resp) = blockchain_read
|
||||
|
@ -235,8 +272,8 @@ pub(super) async fn find_first_unknown(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::TotalTxCount`]
|
||||
pub(super) async fn total_tx_count(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn total_tx_count(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainResponse::TotalTxCount(tx_count) = blockchain_read
|
||||
.ready()
|
||||
|
@ -251,8 +288,8 @@ pub(super) async fn total_tx_count(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::DatabaseSize`]
|
||||
pub(super) async fn database_size(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn database_size(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<(u64, u64), Error> {
|
||||
let BlockchainResponse::DatabaseSize {
|
||||
database_size,
|
||||
|
@ -270,8 +307,8 @@ pub(super) async fn database_size(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::OutputHistogram`]
|
||||
pub(super) async fn output_histogram(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn output_histogram(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
input: OutputHistogramInput,
|
||||
) -> Result<Vec<OutputHistogramEntry>, Error> {
|
||||
let BlockchainResponse::OutputHistogram(histogram) = blockchain_read
|
||||
|
@ -287,8 +324,8 @@ pub(super) async fn output_histogram(
|
|||
}
|
||||
|
||||
/// [`BlockchainReadRequest::CoinbaseTxSum`]
|
||||
pub(super) async fn coinbase_tx_sum(
|
||||
mut blockchain_read: BlockchainReadHandle,
|
||||
pub(crate) async fn coinbase_tx_sum(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
height: u64,
|
||||
count: u64,
|
||||
) -> Result<CoinbaseTxSum, Error> {
|
||||
|
@ -306,3 +343,35 @@ pub(super) async fn coinbase_tx_sum(
|
|||
|
||||
Ok(sum)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::AltChains`]
|
||||
pub(crate) async fn alt_chains(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<Vec<ChainInfo>, Error> {
|
||||
let BlockchainResponse::AltChains(vec) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::AltChains)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::AltChainCount`]
|
||||
pub(crate) async fn alt_chain_count(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainResponse::AltChainCount(count) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::AltChainCount)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(usize_to_u64(count))
|
||||
}
|
||||
|
|
|
@ -2,27 +2,30 @@
|
|||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::Error;
|
||||
use anyhow::{anyhow, Error};
|
||||
use monero_serai::block::Block;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_consensus_context::{
|
||||
BlockChainContext, BlockChainContextRequest, BlockChainContextResponse,
|
||||
BlockChainContextService,
|
||||
};
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
use cuprate_types::{FeeEstimate, HardFork, HardForkInfo};
|
||||
|
||||
// FIXME: use `anyhow::Error` over `tower::BoxError` in blockchain context.
|
||||
|
||||
/// [`BlockChainContextRequest::Context`].
|
||||
pub(super) async fn context(
|
||||
service: &mut BlockChainContextService,
|
||||
height: u64,
|
||||
pub(crate) async fn context(
|
||||
blockchain_context: &mut BlockChainContextService,
|
||||
) -> Result<BlockChainContext, Error> {
|
||||
let BlockChainContextResponse::Context(context) = service
|
||||
let BlockChainContextResponse::Context(context) = blockchain_context
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
@ -31,17 +34,17 @@ pub(super) async fn context(
|
|||
}
|
||||
|
||||
/// [`BlockChainContextRequest::HardForkInfo`].
|
||||
pub(super) async fn hard_fork_info(
|
||||
service: &mut BlockChainContextService,
|
||||
pub(crate) async fn hard_fork_info(
|
||||
blockchain_context: &mut BlockChainContextService,
|
||||
hard_fork: HardFork,
|
||||
) -> Result<HardForkInfo, Error> {
|
||||
let BlockChainContextResponse::HardForkInfo(hf_info) = service
|
||||
let BlockChainContextResponse::HardForkInfo(hf_info) = blockchain_context
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(BlockChainContextRequest::HardForkInfo(hard_fork))
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
@ -50,20 +53,47 @@ pub(super) async fn hard_fork_info(
|
|||
}
|
||||
|
||||
/// [`BlockChainContextRequest::FeeEstimate`].
|
||||
pub(super) async fn fee_estimate(
|
||||
service: &mut BlockChainContextService,
|
||||
pub(crate) async fn fee_estimate(
|
||||
blockchain_context: &mut BlockChainContextService,
|
||||
grace_blocks: u64,
|
||||
) -> Result<FeeEstimate, Error> {
|
||||
let BlockChainContextResponse::FeeEstimate(fee) = service
|
||||
let BlockChainContextResponse::FeeEstimate(fee) = blockchain_context
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(BlockChainContextRequest::FeeEstimate { grace_blocks })
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(fee)
|
||||
}
|
||||
|
||||
/// [`BlockChainContextRequest::CalculatePow`]
|
||||
pub(crate) async fn calculate_pow(
|
||||
blockchain_context: &mut BlockChainContextService,
|
||||
hardfork: HardFork,
|
||||
height: u64,
|
||||
block: Box<Block>,
|
||||
seed_hash: [u8; 32],
|
||||
) -> Result<[u8; 32], Error> {
|
||||
let BlockChainContextResponse::CalculatePow(hash) = blockchain_context
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(BlockChainContextRequest::CalculatePow {
|
||||
hardfork,
|
||||
height: u64_to_usize(height),
|
||||
block,
|
||||
seed_hash,
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(hash)
|
||||
}
|
||||
|
|
|
@ -5,13 +5,18 @@ use monero_serai::block::Block;
|
|||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
|
||||
use cuprate_p2p_core::{types::ConnectionId, NetworkZone};
|
||||
use cuprate_pruning::PruningSeed;
|
||||
use cuprate_rpc_types::misc::Span;
|
||||
use cuprate_types::{AddAuxPow, AuxPow, HardFork};
|
||||
|
||||
use crate::rpc::handler::{
|
||||
BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse,
|
||||
use crate::rpc::{
|
||||
constants::FIELD_NOT_SUPPORTED,
|
||||
handler::{BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse},
|
||||
};
|
||||
|
||||
/// [`BlockchainManagerRequest::PopBlocks`]
|
||||
pub(super) async fn pop_blocks(
|
||||
pub(crate) async fn pop_blocks(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
amount: u64,
|
||||
) -> Result<u64, Error> {
|
||||
|
@ -30,8 +35,10 @@ pub(super) async fn pop_blocks(
|
|||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Prune`]
|
||||
pub(super) async fn prune(blockchain_manager: &mut BlockchainManagerHandle) -> Result<(), Error> {
|
||||
let BlockchainManagerResponse::Ok = blockchain_manager
|
||||
pub(crate) async fn prune(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<PruningSeed, Error> {
|
||||
let BlockchainManagerResponse::Prune(seed) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::Prune)
|
||||
|
@ -40,11 +47,11 @@ pub(super) async fn prune(blockchain_manager: &mut BlockchainManagerHandle) -> R
|
|||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(())
|
||||
Ok(seed)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Pruned`]
|
||||
pub(super) async fn pruned(
|
||||
pub(crate) async fn pruned(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Pruned(pruned) = blockchain_manager
|
||||
|
@ -60,7 +67,7 @@ pub(super) async fn pruned(
|
|||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::RelayBlock`]
|
||||
pub(super) async fn relay_block(
|
||||
pub(crate) async fn relay_block(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
block: Block,
|
||||
) -> Result<(), Error> {
|
||||
|
@ -77,7 +84,7 @@ pub(super) async fn relay_block(
|
|||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Syncing`]
|
||||
pub(super) async fn syncing(
|
||||
pub(crate) async fn syncing(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Syncing(syncing) = blockchain_manager
|
||||
|
@ -93,7 +100,7 @@ pub(super) async fn syncing(
|
|||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Synced`]
|
||||
pub(super) async fn synced(
|
||||
pub(crate) async fn synced(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<bool, Error> {
|
||||
let BlockchainManagerResponse::Synced(syncing) = blockchain_manager
|
||||
|
@ -109,7 +116,7 @@ pub(super) async fn synced(
|
|||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::Target`]
|
||||
pub(super) async fn target(
|
||||
pub(crate) async fn target(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<std::time::Duration, Error> {
|
||||
let BlockchainManagerResponse::Target(target) = blockchain_manager
|
||||
|
@ -125,7 +132,7 @@ pub(super) async fn target(
|
|||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::TargetHeight`]
|
||||
pub(super) async fn target_height(
|
||||
pub(crate) async fn target_height(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<u64, Error> {
|
||||
let BlockchainManagerResponse::TargetHeight { height } = blockchain_manager
|
||||
|
@ -139,3 +146,76 @@ pub(super) async fn target_height(
|
|||
|
||||
Ok(usize_to_u64(height))
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::GenerateBlocks`]
|
||||
pub(crate) async fn generate_blocks(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
amount_of_blocks: u64,
|
||||
prev_block: [u8; 32],
|
||||
starting_nonce: u32,
|
||||
wallet_address: String,
|
||||
) -> Result<(Vec<[u8; 32]>, u64), Error> {
|
||||
let BlockchainManagerResponse::GenerateBlocks { blocks, height } = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::GenerateBlocks {
|
||||
amount_of_blocks,
|
||||
prev_block,
|
||||
starting_nonce,
|
||||
wallet_address,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok((blocks, usize_to_u64(height)))
|
||||
}
|
||||
|
||||
// [`BlockchainManagerRequest::Spans`]
|
||||
pub(crate) async fn spans<Z: NetworkZone>(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<Vec<Span>, Error> {
|
||||
// let BlockchainManagerResponse::Spans(vec) = blockchain_manager
|
||||
// .ready()
|
||||
// .await?
|
||||
// .call(BlockchainManagerRequest::Spans)
|
||||
// .await?
|
||||
// else {
|
||||
// unreachable!();
|
||||
// };
|
||||
|
||||
let vec: Vec<cuprate_p2p_core::types::Span<Z::Addr>> = todo!();
|
||||
|
||||
// FIXME: impl this map somewhere instead of inline.
|
||||
let vec = vec
|
||||
.into_iter()
|
||||
.map(|span| Span {
|
||||
connection_id: String::from(ConnectionId::DEFAULT_STR),
|
||||
nblocks: span.nblocks,
|
||||
rate: span.rate,
|
||||
remote_address: span.remote_address.to_string(),
|
||||
size: span.size,
|
||||
speed: span.speed,
|
||||
start_block_height: span.start_block_height,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
/// [`BlockchainManagerRequest::NextNeededPruningSeed`]
|
||||
pub(crate) async fn next_needed_pruning_seed(
|
||||
blockchain_manager: &mut BlockchainManagerHandle,
|
||||
) -> Result<PruningSeed, Error> {
|
||||
let BlockchainManagerResponse::NextNeededPruningSeed(seed) = blockchain_manager
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainManagerRequest::NextNeededPruningSeed)
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(seed)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
use std::convert::Infallible;
|
||||
|
||||
use anyhow::Error;
|
||||
use anyhow::{anyhow, Error};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
|
@ -14,15 +14,17 @@ use cuprate_txpool::{
|
|||
TxEntry,
|
||||
};
|
||||
|
||||
// FIXME: use `anyhow::Error` over `tower::BoxError` in txpool.
|
||||
|
||||
/// [`TxpoolReadRequest::Backlog`]
|
||||
pub(super) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result<Vec<TxEntry>, Error> {
|
||||
pub(crate) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result<Vec<TxEntry>, Error> {
|
||||
let TxpoolReadResponse::Backlog(tx_entries) = txpool_read
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(TxpoolReadRequest::Backlog)
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
@ -31,14 +33,19 @@ pub(super) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result<Vec<Tx
|
|||
}
|
||||
|
||||
/// [`TxpoolReadRequest::Size`]
|
||||
pub(super) async fn size(txpool_read: &mut TxpoolReadHandle) -> Result<u64, Error> {
|
||||
pub(crate) async fn size(
|
||||
txpool_read: &mut TxpoolReadHandle,
|
||||
include_sensitive_txs: bool,
|
||||
) -> Result<u64, Error> {
|
||||
let TxpoolReadResponse::Size(size) = txpool_read
|
||||
.ready()
|
||||
.await
|
||||
.expect("TODO")
|
||||
.call(TxpoolReadRequest::Size)
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.call(TxpoolReadRequest::Size {
|
||||
include_sensitive_txs,
|
||||
})
|
||||
.await
|
||||
.expect("TODO")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
@ -47,9 +54,17 @@ pub(super) async fn size(txpool_read: &mut TxpoolReadHandle) -> Result<u64, Erro
|
|||
}
|
||||
|
||||
/// TODO
|
||||
#[expect(clippy::needless_pass_by_ref_mut, reason = "TODO: remove after impl")]
|
||||
pub(super) async fn flush(
|
||||
txpool_read: &mut TxpoolReadHandle,
|
||||
pub(crate) async fn flush(
|
||||
txpool_manager: &mut Infallible,
|
||||
tx_hashes: Vec<[u8; 32]>,
|
||||
) -> Result<(), Error> {
|
||||
todo!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// TODO
|
||||
pub(crate) async fn relay(
|
||||
txpool_manager: &mut Infallible,
|
||||
tx_hashes: Vec<[u8; 32]>,
|
||||
) -> Result<(), Error> {
|
||||
todo!();
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! Global `static`s used throughout `cuprated`.
|
||||
|
||||
use std::{
|
||||
sync::{atomic::AtomicU64, LazyLock},
|
||||
sync::LazyLock,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
|
|
|
@ -1,3 +1,15 @@
|
|||
//! Transaction Pool
|
||||
//!
|
||||
//! Will handle initiating the tx-pool, providing the preprocessor required for the dandelion pool.
|
||||
//! Handles initiating the tx-pool, providing the preprocessor required for the dandelion pool.
|
||||
use cuprate_consensus::BlockChainContextService;
|
||||
use cuprate_p2p::NetworkInterface;
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
||||
|
||||
use crate::blockchain::ConcreteTxVerifierService;
|
||||
|
||||
mod dandelion;
|
||||
mod incoming_tx;
|
||||
mod txs_being_handled;
|
||||
|
||||
pub use incoming_tx::IncomingTxHandler;
|
||||
|
|
65
binaries/cuprated/src/txpool/dandelion.rs
Normal file
65
binaries/cuprated/src/txpool/dandelion.rs
Normal file
|
@ -0,0 +1,65 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use cuprate_dandelion_tower::{
|
||||
pool::DandelionPoolService, DandelionConfig, DandelionRouter, Graph,
|
||||
};
|
||||
use cuprate_p2p::NetworkInterface;
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
||||
|
||||
use crate::{
|
||||
p2p::CrossNetworkInternalPeerId,
|
||||
txpool::incoming_tx::{DandelionTx, TxId},
|
||||
};
|
||||
|
||||
mod diffuse_service;
|
||||
mod stem_service;
|
||||
mod tx_store;
|
||||
|
||||
/// The configuration used for [`cuprate_dandelion_tower`].
|
||||
///
|
||||
/// TODO: should we expose this to users of cuprated? probably not.
|
||||
const DANDELION_CONFIG: DandelionConfig = DandelionConfig {
|
||||
time_between_hop: Duration::from_millis(175),
|
||||
epoch_duration: Duration::from_secs(10 * 60),
|
||||
fluff_probability: 0.12,
|
||||
graph: Graph::FourRegular,
|
||||
};
|
||||
|
||||
/// A [`DandelionRouter`] with all generic types defined.
|
||||
type ConcreteDandelionRouter = DandelionRouter<
|
||||
stem_service::OutboundPeerStream,
|
||||
diffuse_service::DiffuseService,
|
||||
CrossNetworkInternalPeerId,
|
||||
stem_service::StemPeerService<ClearNet>,
|
||||
DandelionTx,
|
||||
>;
|
||||
|
||||
/// Starts the dandelion pool manager task and returns a handle to send txs to broadcast.
|
||||
pub fn start_dandelion_pool_manager(
|
||||
router: ConcreteDandelionRouter,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
txpool_write_handle: TxpoolWriteHandle,
|
||||
) -> DandelionPoolService<DandelionTx, TxId, CrossNetworkInternalPeerId> {
|
||||
cuprate_dandelion_tower::pool::start_dandelion_pool_manager(
|
||||
// TODO: make this constant configurable?
|
||||
32,
|
||||
router,
|
||||
tx_store::TxStoreService {
|
||||
txpool_read_handle,
|
||||
txpool_write_handle,
|
||||
},
|
||||
DANDELION_CONFIG,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a [`DandelionRouter`] from a [`NetworkInterface`].
|
||||
pub fn dandelion_router(clear_net: NetworkInterface<ClearNet>) -> ConcreteDandelionRouter {
|
||||
DandelionRouter::new(
|
||||
diffuse_service::DiffuseService {
|
||||
clear_net_broadcast_service: clear_net.broadcast_svc(),
|
||||
},
|
||||
stem_service::OutboundPeerStream::new(clear_net),
|
||||
DANDELION_CONFIG,
|
||||
)
|
||||
}
|
44
binaries/cuprated/src/txpool/dandelion/diffuse_service.rs
Normal file
44
binaries/cuprated/src/txpool/dandelion/diffuse_service.rs
Normal file
|
@ -0,0 +1,44 @@
|
|||
use std::{
|
||||
future::{ready, Ready},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_dandelion_tower::traits::DiffuseRequest;
|
||||
use cuprate_p2p::{BroadcastRequest, BroadcastSvc};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
|
||||
use crate::txpool::dandelion::DandelionTx;
|
||||
|
||||
/// The dandelion diffusion service.
|
||||
pub struct DiffuseService {
|
||||
pub clear_net_broadcast_service: BroadcastSvc<ClearNet>,
|
||||
}
|
||||
|
||||
impl Service<DiffuseRequest<DandelionTx>> for DiffuseService {
|
||||
type Response = ();
|
||||
type Error = tower::BoxError;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.clear_net_broadcast_service
|
||||
.poll_ready(cx)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DiffuseRequest<DandelionTx>) -> Self::Future {
|
||||
// TODO: the dandelion crate should pass along where we got the tx from.
|
||||
let Ok(()) = self
|
||||
.clear_net_broadcast_service
|
||||
.call(BroadcastRequest::Transaction {
|
||||
tx_bytes: req.0 .0,
|
||||
direction: None,
|
||||
received_from: None,
|
||||
})
|
||||
.into_inner();
|
||||
|
||||
ready(Ok(()))
|
||||
}
|
||||
}
|
105
binaries/cuprated/src/txpool/dandelion/stem_service.rs
Normal file
105
binaries/cuprated/src/txpool/dandelion/stem_service.rs
Normal file
|
@ -0,0 +1,105 @@
|
|||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{future::BoxFuture, FutureExt, Stream};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_dandelion_tower::{traits::StemRequest, OutboundPeer};
|
||||
use cuprate_p2p::{ClientDropGuard, NetworkInterface, PeerSetRequest, PeerSetResponse};
|
||||
use cuprate_p2p_core::{
|
||||
client::{Client, InternalPeerID},
|
||||
ClearNet, NetworkZone, PeerRequest, ProtocolRequest,
|
||||
};
|
||||
use cuprate_wire::protocol::NewTransactions;
|
||||
|
||||
use crate::{p2p::CrossNetworkInternalPeerId, txpool::dandelion::DandelionTx};
|
||||
|
||||
/// The dandelion outbound peer stream.
|
||||
pub struct OutboundPeerStream {
|
||||
clear_net: NetworkInterface<ClearNet>,
|
||||
state: OutboundPeerStreamState,
|
||||
}
|
||||
|
||||
impl OutboundPeerStream {
|
||||
pub const fn new(clear_net: NetworkInterface<ClearNet>) -> Self {
|
||||
Self {
|
||||
clear_net,
|
||||
state: OutboundPeerStreamState::Standby,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for OutboundPeerStream {
|
||||
type Item = Result<
|
||||
OutboundPeer<CrossNetworkInternalPeerId, StemPeerService<ClearNet>>,
|
||||
tower::BoxError,
|
||||
>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
loop {
|
||||
match &mut self.state {
|
||||
OutboundPeerStreamState::Standby => {
|
||||
let peer_set = self.clear_net.peer_set();
|
||||
let res = ready!(peer_set.poll_ready(cx));
|
||||
|
||||
self.state = OutboundPeerStreamState::AwaitingPeer(
|
||||
peer_set.call(PeerSetRequest::StemPeer).boxed(),
|
||||
);
|
||||
}
|
||||
OutboundPeerStreamState::AwaitingPeer(fut) => {
|
||||
let res = ready!(fut.poll_unpin(cx));
|
||||
|
||||
return Poll::Ready(Some(res.map(|res| {
|
||||
let PeerSetResponse::StemPeer(stem_peer) = res else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
match stem_peer {
|
||||
Some(peer) => OutboundPeer::Peer(
|
||||
CrossNetworkInternalPeerId::ClearNet(peer.info.id),
|
||||
StemPeerService(peer),
|
||||
),
|
||||
None => OutboundPeer::Exhausted,
|
||||
}
|
||||
})));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The state of the [`OutboundPeerStream`].
|
||||
enum OutboundPeerStreamState {
|
||||
/// Standby state.
|
||||
Standby,
|
||||
/// Awaiting a response from the peer-set.
|
||||
AwaitingPeer(BoxFuture<'static, Result<PeerSetResponse<ClearNet>, tower::BoxError>>),
|
||||
}
|
||||
|
||||
/// The stem service, used to send stem txs.
|
||||
pub struct StemPeerService<N: NetworkZone>(ClientDropGuard<N>);
|
||||
|
||||
impl<N: NetworkZone> Service<StemRequest<DandelionTx>> for StemPeerService<N> {
|
||||
type Response = <Client<N> as Service<PeerRequest>>::Response;
|
||||
type Error = tower::BoxError;
|
||||
type Future = <Client<N> as Service<PeerRequest>>::Future;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: StemRequest<DandelionTx>) -> Self::Future {
|
||||
self.0
|
||||
.call(PeerRequest::Protocol(ProtocolRequest::NewTransactions(
|
||||
NewTransactions {
|
||||
txs: vec![req.0 .0],
|
||||
dandelionpp_fluff: false,
|
||||
padding: Bytes::new(),
|
||||
},
|
||||
)))
|
||||
}
|
||||
}
|
74
binaries/cuprated/src/txpool/dandelion/tx_store.rs
Normal file
74
binaries/cuprated/src/txpool/dandelion/tx_store.rs
Normal file
|
@ -0,0 +1,74 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_dandelion_tower::{
|
||||
traits::{TxStoreRequest, TxStoreResponse},
|
||||
State,
|
||||
};
|
||||
use cuprate_database::RuntimeError;
|
||||
use cuprate_txpool::service::{
|
||||
interface::{TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest},
|
||||
TxpoolReadHandle, TxpoolWriteHandle,
|
||||
};
|
||||
|
||||
use super::{DandelionTx, TxId};
|
||||
|
||||
/// The dandelion tx-store service.
|
||||
///
|
||||
/// This is just mapping the interface [`cuprate_dandelion_tower`] wants to what [`cuprate_txpool`] provides.
|
||||
pub struct TxStoreService {
|
||||
pub txpool_read_handle: TxpoolReadHandle,
|
||||
pub txpool_write_handle: TxpoolWriteHandle,
|
||||
}
|
||||
|
||||
impl Service<TxStoreRequest<TxId>> for TxStoreService {
|
||||
type Response = TxStoreResponse<DandelionTx>;
|
||||
type Error = tower::BoxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: TxStoreRequest<TxId>) -> Self::Future {
|
||||
match req {
|
||||
TxStoreRequest::Get(tx_id) => self
|
||||
.txpool_read_handle
|
||||
.clone()
|
||||
.oneshot(TxpoolReadRequest::TxBlob(tx_id))
|
||||
.map(|res| match res {
|
||||
Ok(TxpoolReadResponse::TxBlob {
|
||||
tx_blob,
|
||||
state_stem,
|
||||
}) => {
|
||||
let state = if state_stem {
|
||||
State::Stem
|
||||
} else {
|
||||
State::Fluff
|
||||
};
|
||||
|
||||
Ok(TxStoreResponse::Transaction(Some((
|
||||
DandelionTx(Bytes::from(tx_blob)),
|
||||
state,
|
||||
))))
|
||||
}
|
||||
Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Transaction(None)),
|
||||
Err(e) => Err(e.into()),
|
||||
Ok(_) => unreachable!(),
|
||||
})
|
||||
.boxed(),
|
||||
TxStoreRequest::Promote(tx_id) => self
|
||||
.txpool_write_handle
|
||||
.clone()
|
||||
.oneshot(TxpoolWriteRequest::Promote(tx_id))
|
||||
.map(|res| match res {
|
||||
Ok(_) | Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Ok),
|
||||
Err(e) => Err(e.into()),
|
||||
})
|
||||
.boxed(),
|
||||
}
|
||||
}
|
||||
}
|
379
binaries/cuprated/src/txpool/incoming_tx.rs
Normal file
379
binaries/cuprated/src/txpool/incoming_tx.rs
Normal file
|
@ -0,0 +1,379 @@
|
|||
use std::{
|
||||
collections::HashSet,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use monero_serai::transaction::Transaction;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_consensus::{
|
||||
transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse,
|
||||
BlockChainContextService, ExtendedConsensusError, VerifyTxRequest,
|
||||
};
|
||||
use cuprate_dandelion_tower::{
|
||||
pool::{DandelionPoolService, IncomingTxBuilder},
|
||||
State, TxState,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use cuprate_p2p::NetworkInterface;
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::{
|
||||
service::{
|
||||
interface::{
|
||||
TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest, TxpoolWriteResponse,
|
||||
},
|
||||
TxpoolReadHandle, TxpoolWriteHandle,
|
||||
},
|
||||
transaction_blob_hash,
|
||||
};
|
||||
use cuprate_types::TransactionVerificationData;
|
||||
|
||||
use crate::{
|
||||
blockchain::ConcreteTxVerifierService,
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
p2p::CrossNetworkInternalPeerId,
|
||||
signals::REORG_LOCK,
|
||||
txpool::{
|
||||
dandelion,
|
||||
txs_being_handled::{TxsBeingHandled, TxsBeingHandledLocally},
|
||||
},
|
||||
};
|
||||
|
||||
/// An error that can happen handling an incoming tx.
|
||||
pub enum IncomingTxError {
|
||||
Parse(std::io::Error),
|
||||
Consensus(ExtendedConsensusError),
|
||||
DuplicateTransaction,
|
||||
}
|
||||
|
||||
/// Incoming transactions.
|
||||
pub struct IncomingTxs {
|
||||
/// The raw bytes of the transactions.
|
||||
pub txs: Vec<Bytes>,
|
||||
/// The routing state of the transactions.
|
||||
pub state: TxState<CrossNetworkInternalPeerId>,
|
||||
}
|
||||
|
||||
/// The transaction type used for dandelion++.
|
||||
#[derive(Clone)]
|
||||
pub struct DandelionTx(pub Bytes);
|
||||
|
||||
/// A transaction ID/hash.
|
||||
pub(super) type TxId = [u8; 32];
|
||||
|
||||
/// The service than handles incoming transaction pool transactions.
|
||||
///
|
||||
/// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes.
|
||||
pub struct IncomingTxHandler {
|
||||
/// A store of txs currently being handled in incoming tx requests.
|
||||
pub(super) txs_being_handled: TxsBeingHandled,
|
||||
/// The blockchain context cache.
|
||||
pub(super) blockchain_context_cache: BlockChainContextService,
|
||||
/// The dandelion txpool manager.
|
||||
pub(super) dandelion_pool_manager:
|
||||
DandelionPoolService<DandelionTx, TxId, CrossNetworkInternalPeerId>,
|
||||
/// The transaction verifier service.
|
||||
pub(super) tx_verifier_service: ConcreteTxVerifierService,
|
||||
/// The txpool write handle.
|
||||
pub(super) txpool_write_handle: TxpoolWriteHandle,
|
||||
/// The txpool read handle.
|
||||
pub(super) txpool_read_handle: TxpoolReadHandle,
|
||||
}
|
||||
|
||||
impl IncomingTxHandler {
|
||||
/// Initialize the [`IncomingTxHandler`].
|
||||
#[expect(clippy::significant_drop_tightening)]
|
||||
pub fn init(
|
||||
clear_net: NetworkInterface<ClearNet>,
|
||||
txpool_write_handle: TxpoolWriteHandle,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
blockchain_context_cache: BlockChainContextService,
|
||||
tx_verifier_service: ConcreteTxVerifierService,
|
||||
) -> Self {
|
||||
let dandelion_router = dandelion::dandelion_router(clear_net);
|
||||
|
||||
let dandelion_pool_manager = dandelion::start_dandelion_pool_manager(
|
||||
dandelion_router,
|
||||
txpool_read_handle.clone(),
|
||||
txpool_write_handle.clone(),
|
||||
);
|
||||
|
||||
Self {
|
||||
txs_being_handled: TxsBeingHandled::new(),
|
||||
blockchain_context_cache,
|
||||
dandelion_pool_manager,
|
||||
tx_verifier_service,
|
||||
txpool_write_handle,
|
||||
txpool_read_handle,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<IncomingTxs> for IncomingTxHandler {
|
||||
type Response = ();
|
||||
type Error = IncomingTxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: IncomingTxs) -> Self::Future {
|
||||
handle_incoming_txs(
|
||||
req,
|
||||
self.txs_being_handled.clone(),
|
||||
self.blockchain_context_cache.clone(),
|
||||
self.tx_verifier_service.clone(),
|
||||
self.txpool_write_handle.clone(),
|
||||
self.txpool_read_handle.clone(),
|
||||
self.dandelion_pool_manager.clone(),
|
||||
)
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles the incoming txs.
|
||||
async fn handle_incoming_txs(
|
||||
IncomingTxs { txs, state }: IncomingTxs,
|
||||
txs_being_handled: TxsBeingHandled,
|
||||
mut blockchain_context_cache: BlockChainContextService,
|
||||
mut tx_verifier_service: ConcreteTxVerifierService,
|
||||
mut txpool_write_handle: TxpoolWriteHandle,
|
||||
mut txpool_read_handle: TxpoolReadHandle,
|
||||
mut dandelion_pool_manager: DandelionPoolService<DandelionTx, TxId, CrossNetworkInternalPeerId>,
|
||||
) -> Result<(), IncomingTxError> {
|
||||
let _reorg_guard = REORG_LOCK.read().await;
|
||||
|
||||
let (txs, stem_pool_txs, txs_being_handled_guard) =
|
||||
prepare_incoming_txs(txs, txs_being_handled, &mut txpool_read_handle).await?;
|
||||
|
||||
let BlockChainContextResponse::Context(context) = blockchain_context_cache
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let context = context.unchecked_blockchain_context();
|
||||
|
||||
tx_verifier_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(VerifyTxRequest::Prepped {
|
||||
txs: txs.clone(),
|
||||
current_chain_height: context.chain_height,
|
||||
top_hash: context.top_hash,
|
||||
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
||||
hf: context.current_hf,
|
||||
})
|
||||
.await
|
||||
.map_err(IncomingTxError::Consensus)?;
|
||||
|
||||
for tx in txs {
|
||||
handle_valid_tx(
|
||||
tx,
|
||||
state.clone(),
|
||||
&mut txpool_write_handle,
|
||||
&mut dandelion_pool_manager,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// Re-relay any txs we got in the block that were already in our stem pool.
|
||||
for stem_tx in stem_pool_txs {
|
||||
rerelay_stem_tx(
|
||||
&stem_tx,
|
||||
state.clone(),
|
||||
&mut txpool_read_handle,
|
||||
&mut dandelion_pool_manager,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prepares the incoming transactions for verification.
|
||||
///
|
||||
/// This will filter out all transactions already in the pool or txs already being handled in another request.
|
||||
///
|
||||
/// Returns in order:
|
||||
/// - The [`TransactionVerificationData`] for all the txs we did not already have
|
||||
/// - The Ids of the transactions in the incoming message that are in our stem-pool
|
||||
/// - A [`TxsBeingHandledLocally`] guard that prevents verifying the same tx at the same time across 2 tasks.
|
||||
async fn prepare_incoming_txs(
|
||||
tx_blobs: Vec<Bytes>,
|
||||
txs_being_handled: TxsBeingHandled,
|
||||
txpool_read_handle: &mut TxpoolReadHandle,
|
||||
) -> Result<
|
||||
(
|
||||
Vec<Arc<TransactionVerificationData>>,
|
||||
Vec<TxId>,
|
||||
TxsBeingHandledLocally,
|
||||
),
|
||||
IncomingTxError,
|
||||
> {
|
||||
let mut tx_blob_hashes = HashSet::new();
|
||||
let mut txs_being_handled_locally = txs_being_handled.local_tracker();
|
||||
|
||||
// Compute the blob hash for each tx and filter out the txs currently being handled by another incoming tx batch.
|
||||
let txs = tx_blobs
|
||||
.into_iter()
|
||||
.filter_map(|tx_blob| {
|
||||
let tx_blob_hash = transaction_blob_hash(&tx_blob);
|
||||
|
||||
// If a duplicate is in here the incoming tx batch contained the same tx twice.
|
||||
if !tx_blob_hashes.insert(tx_blob_hash) {
|
||||
return Some(Err(IncomingTxError::DuplicateTransaction));
|
||||
}
|
||||
|
||||
// If a duplicate is here it is being handled in another batch.
|
||||
if !txs_being_handled_locally.try_add_tx(tx_blob_hash) {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(Ok((tx_blob_hash, tx_blob)))
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// Filter the txs already in the txpool out.
|
||||
// This will leave the txs already in the pool in [`TxBeingHandledLocally`] but that shouldn't be an issue.
|
||||
let TxpoolReadResponse::FilterKnownTxBlobHashes {
|
||||
unknown_blob_hashes,
|
||||
stem_pool_hashes,
|
||||
} = txpool_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolReadRequest::FilterKnownTxBlobHashes(tx_blob_hashes))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// Now prepare the txs for verification.
|
||||
rayon_spawn_async(move || {
|
||||
let txs = txs
|
||||
.into_iter()
|
||||
.filter_map(|(tx_blob_hash, tx_blob)| {
|
||||
if unknown_blob_hashes.contains(&tx_blob_hash) {
|
||||
Some(tx_blob)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.map(|bytes| {
|
||||
let tx = Transaction::read(&mut bytes.as_ref()).map_err(IncomingTxError::Parse)?;
|
||||
|
||||
let tx = new_tx_verification_data(tx)
|
||||
.map_err(|e| IncomingTxError::Consensus(e.into()))?;
|
||||
|
||||
Ok(Arc::new(tx))
|
||||
})
|
||||
.collect::<Result<Vec<_>, IncomingTxError>>()?;
|
||||
|
||||
Ok((txs, stem_pool_hashes, txs_being_handled_locally))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Handle a verified tx.
|
||||
///
|
||||
/// This will add the tx to the txpool and route it to the network.
|
||||
async fn handle_valid_tx(
|
||||
tx: Arc<TransactionVerificationData>,
|
||||
state: TxState<CrossNetworkInternalPeerId>,
|
||||
txpool_write_handle: &mut TxpoolWriteHandle,
|
||||
dandelion_pool_manager: &mut DandelionPoolService<
|
||||
DandelionTx,
|
||||
TxId,
|
||||
CrossNetworkInternalPeerId,
|
||||
>,
|
||||
) {
|
||||
let incoming_tx =
|
||||
IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx.tx_blob)), tx.tx_hash);
|
||||
|
||||
let TxpoolWriteResponse::AddTransaction(double_spend) = txpool_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolWriteRequest::AddTransaction {
|
||||
tx,
|
||||
state_stem: state.is_stem_stage(),
|
||||
})
|
||||
.await
|
||||
.expect("TODO")
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// TODO: track double spends to quickly ignore them from their blob hash.
|
||||
if let Some(tx_hash) = double_spend {
|
||||
return;
|
||||
};
|
||||
|
||||
// TODO: There is a race condition possible if a tx and block come in at the same time: <https://github.com/Cuprate/cuprate/issues/314>.
|
||||
|
||||
let incoming_tx = incoming_tx
|
||||
.with_routing_state(state)
|
||||
.with_state_in_db(None)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
dandelion_pool_manager
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(incoming_tx)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
|
||||
/// Re-relay a tx that was already in our stem pool.
|
||||
async fn rerelay_stem_tx(
|
||||
tx_hash: &TxId,
|
||||
state: TxState<CrossNetworkInternalPeerId>,
|
||||
txpool_read_handle: &mut TxpoolReadHandle,
|
||||
dandelion_pool_manager: &mut DandelionPoolService<
|
||||
DandelionTx,
|
||||
TxId,
|
||||
CrossNetworkInternalPeerId,
|
||||
>,
|
||||
) {
|
||||
let Ok(TxpoolReadResponse::TxBlob { tx_blob, .. }) = txpool_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(TxpoolReadRequest::TxBlob(*tx_hash))
|
||||
.await
|
||||
else {
|
||||
// The tx could have been dropped from the pool.
|
||||
return;
|
||||
};
|
||||
|
||||
let incoming_tx =
|
||||
IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx_blob)), *tx_hash);
|
||||
|
||||
let incoming_tx = incoming_tx
|
||||
.with_routing_state(state)
|
||||
.with_state_in_db(Some(State::Stem))
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
dandelion_pool_manager
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(incoming_tx)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
53
binaries/cuprated/src/txpool/txs_being_handled.rs
Normal file
53
binaries/cuprated/src/txpool/txs_being_handled.rs
Normal file
|
@ -0,0 +1,53 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashSet;
|
||||
|
||||
/// A set of txs currently being handled, shared between instances of the incoming tx handler.
|
||||
#[derive(Clone)]
|
||||
pub struct TxsBeingHandled(Arc<DashSet<[u8; 32]>>);
|
||||
|
||||
impl TxsBeingHandled {
|
||||
/// Create a new [`TxsBeingHandled`]
|
||||
pub fn new() -> Self {
|
||||
Self(Arc::new(DashSet::new()))
|
||||
}
|
||||
|
||||
/// Create a new [`TxsBeingHandledLocally`] that will keep track of txs being handled in a request.
|
||||
pub fn local_tracker(&self) -> TxsBeingHandledLocally {
|
||||
TxsBeingHandledLocally {
|
||||
txs_being_handled: self.clone(),
|
||||
txs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A tracker of txs being handled in a single request. This will add the txs to the global [`TxsBeingHandled`]
|
||||
/// tracker as well.
|
||||
///
|
||||
/// When this is dropped the txs will be removed from [`TxsBeingHandled`].
|
||||
pub struct TxsBeingHandledLocally {
|
||||
txs_being_handled: TxsBeingHandled,
|
||||
txs: Vec<[u8; 32]>,
|
||||
}
|
||||
|
||||
impl TxsBeingHandledLocally {
|
||||
/// Try add a tx to the map from its [`transaction_blob_hash`](cuprate_txpool::transaction_blob_hash).
|
||||
///
|
||||
/// Returns `true` if the tx was added and `false` if another task is already handling this tx.
|
||||
pub fn try_add_tx(&mut self, tx_blob_hash: [u8; 32]) -> bool {
|
||||
if !self.txs_being_handled.0.insert(tx_blob_hash) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.txs.push(tx_blob_hash);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TxsBeingHandledLocally {
|
||||
fn drop(&mut self) {
|
||||
for hash in &self.txs {
|
||||
self.txs_being_handled.0.remove(hash);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -164,6 +164,11 @@
|
|||
|
||||
---
|
||||
|
||||
- [🟢 Monero oddities](oddities/intro.md)
|
||||
- [🟡 Little-endian IPv4 addresses](oddities/le-ipv4.md)
|
||||
|
||||
---
|
||||
|
||||
- [⚪️ Appendix](appendix/intro.md)
|
||||
- [🟢 Crates](appendix/crates.md)
|
||||
- [🔴 Contributing](appendix/contributing.md)
|
||||
|
|
|
@ -35,6 +35,7 @@ cargo doc --open --package cuprate-blockchain
|
|||
| [`cuprate-async-buffer`](https://doc.cuprate.org/cuprate_async_buffer) | [`p2p/async-buffer/`](https://github.com/Cuprate/cuprate/tree/main/p2p/async-buffer) | A bounded SPSC, FIFO, asynchronous buffer that supports arbitrary weights for values
|
||||
| [`cuprate-dandelion-tower`](https://doc.cuprate.org/cuprate_dandelion_tower) | [`p2p/dandelion-tower/`](https://github.com/Cuprate/cuprate/tree/main/p2p/dandelion-tower) | TODO
|
||||
| [`cuprate-p2p`](https://doc.cuprate.org/cuprate_p2p) | [`p2p/p2p/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p) | TODO
|
||||
| [`cuprate-p2p-bucket`](https://doc.cuprate.org/cuprate_p2p_bucket) | [`p2p/bucket/`](https://github.com/Cuprate/cuprate/tree/main/p2p/bucket) | A collection data structure discriminating its items into "buckets" of limited size.
|
||||
| [`cuprate-p2p-core`](https://doc.cuprate.org/cuprate_p2p_core) | [`p2p/p2p-core/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p-core) | TODO
|
||||
|
||||
## Storage
|
||||
|
@ -53,6 +54,11 @@ cargo doc --open --package cuprate-blockchain
|
|||
| [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing
|
||||
| [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers
|
||||
|
||||
## ZMQ
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
| [`cuprate-zmq-types`](https://doc.cuprate.org/cuprate_zmq_types) | [`zmq/types/`](https://github.com/Cuprate/cuprate/tree/main/zmq/types) | Message types for ZMQ Pub/Sub interface
|
||||
|
||||
## 1-off crates
|
||||
| Crate | In-tree path | Purpose |
|
||||
|-------|--------------|---------|
|
||||
|
|
37
books/architecture/src/oddities/intro.md
Normal file
37
books/architecture/src/oddities/intro.md
Normal file
|
@ -0,0 +1,37 @@
|
|||
# Monero oddities
|
||||
This section is a list of any peculiar, interesting,
|
||||
or non-standard behavior that Monero has that is not
|
||||
planned on being changed or deprecated.
|
||||
|
||||
This section exists to hold all the small yet noteworthy knowledge in one place,
|
||||
instead of in any single contributor's mind.
|
||||
|
||||
These are usually behaviors stemming from implementation rather than protocol/cryptography.
|
||||
|
||||
## Formatting
|
||||
This is the markdown formatting for each entry in this section.
|
||||
|
||||
If applicable, consider using this formatting when adding to this section.
|
||||
|
||||
```md
|
||||
# <concise_title_of_the_behavior>
|
||||
|
||||
## What
|
||||
A detailed description of the behavior.
|
||||
|
||||
## Expected
|
||||
The norm or standard behavior that is usually expected.
|
||||
|
||||
## Why
|
||||
The reasoning behind why this behavior exists and/or
|
||||
any links to more detailed discussion on the behavior.
|
||||
|
||||
## Affects
|
||||
A (potentially non-exhaustive) list of places that this behavior can/does affect.
|
||||
|
||||
## Example
|
||||
An example link or section of code where the behavior occurs.
|
||||
|
||||
## Source
|
||||
A link to original `monerod` code that defines the behavior.
|
||||
```
|
24
books/architecture/src/oddities/le-ipv4.md
Normal file
24
books/architecture/src/oddities/le-ipv4.md
Normal file
|
@ -0,0 +1,24 @@
|
|||
# Little-endian IPv4 addresses
|
||||
|
||||
## What
|
||||
Monero encodes IPv4 addresses in [little-endian](https://en.wikipedia.org/wiki/Endianness) byte order.
|
||||
|
||||
## Expected
|
||||
In general, [networking-related protocols/code use _networking order_ (big-endian)](https://en.wikipedia.org/wiki/Endianness#Networking).
|
||||
|
||||
## Why
|
||||
TODO
|
||||
|
||||
- <https://github.com/monero-project/monero/issues/3826>
|
||||
- <https://github.com/monero-project/monero/pull/5544>
|
||||
|
||||
## Affects
|
||||
Any representation and (de)serialization of IPv4 addresses must keep little
|
||||
endian in-mind, e.g. the P2P wire format or `int` encoded IPv4 addresses in RPC.
|
||||
|
||||
For example, [the `ip` field in `set_bans`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#set_bans).
|
||||
|
||||
For Cuprate, this means Rust's [`Ipv4Addr::from_bits/from`](https://doc.rust-lang.org/1.82.0/src/core/net/ip_addr.rs.html#1182) cannot be used in these cases as [it assumes big-endian encoding](https://doc.rust-lang.org/1.82.0/src/core/net/ip_addr.rs.html#540).
|
||||
|
||||
## Source
|
||||
- <https://github.com/monero-project/monero/blob/893916ad091a92e765ce3241b94e706ad012b62a/contrib/epee/include/net/net_utils_base.h#L97>
|
|
@ -18,6 +18,7 @@ use std::{
|
|||
};
|
||||
|
||||
use futures::{channel::oneshot, FutureExt};
|
||||
use monero_serai::block::Block;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::PollSender;
|
||||
use tower::Service;
|
||||
|
@ -267,6 +268,21 @@ pub enum BlockChainContextRequest {
|
|||
grace_blocks: u64,
|
||||
},
|
||||
|
||||
/// Calculate proof-of-work for this block.
|
||||
CalculatePow {
|
||||
/// The hardfork of the protocol at this block height.
|
||||
hardfork: HardFork,
|
||||
/// The height of the block.
|
||||
height: usize,
|
||||
/// The block data.
|
||||
///
|
||||
/// This is boxed because [`Block`] causes this enum to be 1200 bytes,
|
||||
/// where the 2nd variant is only 96 bytes.
|
||||
block: Box<Block>,
|
||||
/// The seed hash for the proof-of-work.
|
||||
seed_hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// Clear the alt chain context caches.
|
||||
ClearAltCache,
|
||||
|
||||
|
@ -364,6 +380,9 @@ pub enum BlockChainContextResponse {
|
|||
/// Response to [`BlockChainContextRequest::FeeEstimate`]
|
||||
FeeEstimate(FeeEstimate),
|
||||
|
||||
/// Response to [`BlockChainContextRequest::CalculatePow`]
|
||||
CalculatePow([u8; 32]),
|
||||
|
||||
/// Response to [`BlockChainContextRequest::AltChains`]
|
||||
///
|
||||
/// If the inner [`Vec::is_empty`], there were no alternate chains.
|
||||
|
|
|
@ -324,7 +324,8 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
|||
}
|
||||
BlockChainContextRequest::HardForkInfo(_)
|
||||
| BlockChainContextRequest::FeeEstimate { .. }
|
||||
| BlockChainContextRequest::AltChains => {
|
||||
| BlockChainContextRequest::AltChains
|
||||
| BlockChainContextRequest::CalculatePow { .. } => {
|
||||
todo!("finish https://github.com/Cuprate/cuprate/pull/297")
|
||||
}
|
||||
})
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
#![expect(non_local_definitions, reason = "proptest macro")]
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
|
|
|
@ -49,7 +49,7 @@ pub(crate) fn subarray_copy<T: AsRef<[U]> + ?Sized, U: Copy, const LEN: usize>(
|
|||
/// A mutable reference to a fixed-size subarray of type `[U; LEN]`.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if `start + LEN > array.as_ref().len()`.
|
||||
/// Panics if `start + LEN > array.as_mut().len()`.
|
||||
#[inline]
|
||||
pub(crate) fn subarray_mut<T: AsMut<[U]> + ?Sized, U, const LEN: usize>(
|
||||
array: &mut T,
|
||||
|
|
|
@ -81,6 +81,9 @@ ignore = [
|
|||
#{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" },
|
||||
#"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish
|
||||
#{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" },
|
||||
|
||||
# TODO: check this is sorted before a beta release.
|
||||
{ id = "RUSTSEC-2024-0370", reason = "unmaintained crate, not necessarily vulnerable yet." }
|
||||
]
|
||||
# If this is true, then cargo deny will use the git executable to fetch advisory database.
|
||||
# If this is false, then it uses a built-in git library.
|
||||
|
@ -110,6 +113,7 @@ allow = [
|
|||
"Apache-2.0", # https://tldrlegal.com/license/apache-license-2.0-(apache-2.0)
|
||||
"MPL-2.0", # https://www.mozilla.org/en-US/MPL/2.0/FAQ/
|
||||
"BSL-1.0", # https://tldrlegal.com/license/boost-software-license-1.0-explained
|
||||
"Zlib", # https://spdx.org/licenses/Zlib.html
|
||||
|
||||
# OpenSSL 3.0+ uses Apache-2.0
|
||||
# OpenSSL 1.x.x uses https://www.openssl.org/source/license-openssl-ssleay.txt
|
||||
|
|
|
@ -17,7 +17,7 @@ asynch = ["dep:futures", "dep:rayon"]
|
|||
cast = []
|
||||
constants = []
|
||||
crypto = ["dep:curve25519-dalek", "dep:monero-serai", "std"]
|
||||
fs = ["dep:dirs"]
|
||||
fs = ["dep:dirs", "std"]
|
||||
num = []
|
||||
map = ["cast", "dep:monero-serai", "dep:cuprate-constants"]
|
||||
time = ["dep:chrono", "std"]
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
// //
|
||||
//============================ SAFETY: DO NOT REMOVE ===========================//
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Free functions
|
||||
/// Cast [`u64`] to [`usize`].
|
||||
#[inline(always)]
|
||||
pub const fn u64_to_usize(u: u64) -> usize {
|
||||
|
|
|
@ -11,7 +11,7 @@ pub mod atomic;
|
|||
#[cfg(feature = "cast")]
|
||||
pub mod cast;
|
||||
|
||||
#[cfg(feature = "fs")]
|
||||
#[cfg(all(feature = "fs", feature = "std"))]
|
||||
pub mod fs;
|
||||
|
||||
pub mod network;
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use alloc::{string::ToString, vec, vec::Vec};
|
||||
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use ref_cast::RefCast;
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use alloc::string::{String, ToString};
|
||||
use core::{
|
||||
fmt::{Debug, Formatter},
|
||||
num::TryFromIntError,
|
||||
|
|
|
@ -64,6 +64,7 @@ use hex as _;
|
|||
|
||||
extern crate alloc;
|
||||
|
||||
use alloc::string::ToString;
|
||||
use core::str::from_utf8 as str_from_utf8;
|
||||
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! This module contains a [`EpeeValue`] trait and
|
||||
//! impls for some possible base epee values.
|
||||
|
||||
use alloc::{string::String, vec::Vec};
|
||||
use alloc::{string::String, vec, vec::Vec};
|
||||
use core::fmt::Debug;
|
||||
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
|
|
|
@ -17,10 +17,12 @@
|
|||
//! Monero network. Core Monero has 4 main addresses: IPv4, IPv6, Tor,
|
||||
//! I2p. Currently this module only has IPv(4/6).
|
||||
//!
|
||||
use bytes::BufMut;
|
||||
use cuprate_epee_encoding::EpeeObject;
|
||||
use std::{hash::Hash, net, net::SocketAddr};
|
||||
|
||||
use bytes::BufMut;
|
||||
|
||||
use cuprate_epee_encoding::EpeeObject;
|
||||
|
||||
mod epee_builder;
|
||||
use epee_builder::*;
|
||||
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
use bytes::Buf;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||
|
||||
use cuprate_epee_encoding::{epee_object, EpeeObjectBuilder};
|
||||
use bytes::Buf;
|
||||
use thiserror::Error;
|
||||
|
||||
use cuprate_epee_encoding::{epee_object, EpeeObjectBuilder};
|
||||
|
||||
use crate::NetworkAddress;
|
||||
|
||||
#[derive(Default)]
|
||||
|
@ -77,7 +78,7 @@ impl From<NetworkAddress> for TaggedNetworkAddress {
|
|||
SocketAddr::V4(addr) => Self {
|
||||
ty: Some(1),
|
||||
addr: Some(AllFieldsNetworkAddress {
|
||||
m_ip: Some(u32::from_be_bytes(addr.ip().octets())),
|
||||
m_ip: Some(u32::from_le_bytes(addr.ip().octets())),
|
||||
m_port: Some(addr.port()),
|
||||
addr: None,
|
||||
}),
|
||||
|
@ -112,7 +113,10 @@ epee_object!(
|
|||
impl AllFieldsNetworkAddress {
|
||||
fn try_into_network_address(self, ty: u8) -> Option<NetworkAddress> {
|
||||
Some(match ty {
|
||||
1 => NetworkAddress::from(SocketAddrV4::new(Ipv4Addr::from(self.m_ip?), self.m_port?)),
|
||||
1 => NetworkAddress::from(SocketAddrV4::new(
|
||||
Ipv4Addr::from(self.m_ip?.to_le_bytes()),
|
||||
self.m_port?,
|
||||
)),
|
||||
2 => NetworkAddress::from(SocketAddrV6::new(
|
||||
Ipv6Addr::from(self.addr?),
|
||||
self.m_port?,
|
||||
|
|
|
@ -9,7 +9,7 @@ authors = ["Boog900"]
|
|||
[dependencies]
|
||||
cuprate-constants = { workspace = true }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true, features = ["borsh"] }
|
||||
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
tokio = { workspace = true, features = ["time", "fs", "rt"]}
|
||||
|
|
|
@ -423,7 +423,8 @@ impl<Z: BorshNetworkZone> Service<AddressBookRequest<Z>> for AddressBook<Z> {
|
|||
AddressBookRequest::PeerlistSize
|
||||
| AddressBookRequest::ConnectionCount
|
||||
| AddressBookRequest::SetBan(_)
|
||||
| AddressBookRequest::GetBans => {
|
||||
| AddressBookRequest::GetBans
|
||||
| AddressBookRequest::ConnectionInfo => {
|
||||
todo!("finish https://github.com/Cuprate/cuprate/pull/297")
|
||||
}
|
||||
};
|
||||
|
|
13
p2p/bucket/Cargo.toml
Normal file
13
p2p/bucket/Cargo.toml
Normal file
|
@ -0,0 +1,13 @@
|
|||
[package]
|
||||
name = "cuprate-p2p-bucket"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
authors = ["SyntheticBird"]
|
||||
|
||||
[dependencies]
|
||||
arrayvec = { workspace = true }
|
||||
rand = { workspace = true, features = ["std", "std_rng"]}
|
||||
|
||||
[lints]
|
||||
workspace = true
|
172
p2p/bucket/src/lib.rs
Normal file
172
p2p/bucket/src/lib.rs
Normal file
|
@ -0,0 +1,172 @@
|
|||
//! Bucket data structure
|
||||
//!
|
||||
//! A collection data structure that discriminates its unique items and place them into "buckets".
|
||||
//!
|
||||
//! The item must implement the [`Bucketable`] trait that defines how to create the discriminant
|
||||
//! from the item type. The data structure will internally contain any item into "buckets" or vectors
|
||||
//! of sized capacity `N` that regroup all the stored items with this specific discriminant.
|
||||
//!
|
||||
//! A practical example of this data structure is for storing `N` amount of IP discriminated by their subnets.
|
||||
//! You can store in each "buckets" corresponding to a `/16` subnet up to `N` IPs of that subnet.
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```
|
||||
//! use cuprate_p2p_bucket::Bucket;
|
||||
//! use std::net::Ipv4Addr;
|
||||
//!
|
||||
//! // Create a new bucket that can store at most 2 IPs in a particular `/16` subnet.
|
||||
//! let mut bucket = Bucket::<2,Ipv4Addr>::new();
|
||||
//!
|
||||
//! // Fulfill the `96.96.0.0/16` bucket.
|
||||
//! bucket.push("96.96.0.1".parse().unwrap());
|
||||
//! bucket.push("96.96.0.2".parse().unwrap());
|
||||
//! assert_eq!(2, bucket.len());
|
||||
//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap());
|
||||
//!
|
||||
//! // Push a new IP from another subnet
|
||||
//! bucket.push("127.0.0.1".parse().unwrap());
|
||||
//! assert_eq!(3, bucket.len());
|
||||
//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap());
|
||||
//! assert_eq!(1, bucket.len_bucket(&[127_u8,0_u8]).unwrap());
|
||||
//!
|
||||
//! // Attempting to push a new IP within `96.96.0.0/16` bucket will return the IP back
|
||||
//! // as this subnet is already full.
|
||||
//! let pushed = bucket.push("96.96.0.3".parse().unwrap());
|
||||
//! assert!(pushed.is_some());
|
||||
//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap());
|
||||
//!
|
||||
//! ```
|
||||
|
||||
use arrayvec::{ArrayVec, CapacityError};
|
||||
use rand::random;
|
||||
|
||||
use std::{collections::BTreeMap, net::Ipv4Addr};
|
||||
|
||||
/// A discriminant that can be computed from the type.
|
||||
pub trait Bucketable: Sized + Eq + Clone {
|
||||
/// The type of the discriminant being used in the Binary tree.
|
||||
type Discriminant: Ord + AsRef<[u8]>;
|
||||
|
||||
/// Method that can compute the discriminant from the item.
|
||||
fn discriminant(&self) -> Self::Discriminant;
|
||||
}
|
||||
|
||||
/// A collection data structure discriminating its unique items
|
||||
/// with a specified method. Limiting the amount of items stored
|
||||
/// with that discriminant to the const `N`.
|
||||
pub struct Bucket<const N: usize, I: Bucketable> {
|
||||
/// The storage of the bucket
|
||||
storage: BTreeMap<I::Discriminant, ArrayVec<I, N>>,
|
||||
}
|
||||
|
||||
impl<const N: usize, I: Bucketable> Bucket<N, I> {
|
||||
/// Create a new Bucket
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
storage: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a new element into the Bucket
|
||||
///
|
||||
/// Will internally create a new vector for each new discriminant being
|
||||
/// generated from an item.
|
||||
///
|
||||
/// This function WILL NOT push the element if it already exists.
|
||||
///
|
||||
/// Return `None` if the item has been pushed or ignored. `Some(I)` if
|
||||
/// the vector is full.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use cuprate_p2p_bucket::Bucket;
|
||||
/// use std::net::Ipv4Addr;
|
||||
///
|
||||
/// let mut bucket = Bucket::<8,Ipv4Addr>::new();
|
||||
///
|
||||
/// // Push a first IP address.
|
||||
/// bucket.push("127.0.0.1".parse().unwrap());
|
||||
/// assert_eq!(1, bucket.len());
|
||||
///
|
||||
/// // Push the same IP address a second time.
|
||||
/// bucket.push("127.0.0.1".parse().unwrap());
|
||||
/// assert_eq!(1, bucket.len());
|
||||
/// ```
|
||||
pub fn push(&mut self, item: I) -> Option<I> {
|
||||
let discriminant = item.discriminant();
|
||||
|
||||
if let Some(vec) = self.storage.get_mut(&discriminant) {
|
||||
// Push the item if it doesn't exist.
|
||||
if !vec.contains(&item) {
|
||||
return vec.try_push(item).err().map(CapacityError::element);
|
||||
}
|
||||
} else {
|
||||
// Initialize the vector if not found.
|
||||
let mut vec = ArrayVec::<I, N>::new();
|
||||
vec.push(item);
|
||||
self.storage.insert(discriminant, vec);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Will attempt to remove an item from the bucket.
|
||||
pub fn remove(&mut self, item: &I) -> Option<I> {
|
||||
self.storage.get_mut(&item.discriminant()).and_then(|vec| {
|
||||
vec.iter()
|
||||
.enumerate()
|
||||
.find_map(|(i, v)| (item == v).then_some(i))
|
||||
.map(|index| vec.swap_remove(index))
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the number of item stored within the storage
|
||||
pub fn len(&self) -> usize {
|
||||
self.storage.values().map(ArrayVec::len).sum()
|
||||
}
|
||||
|
||||
/// Return the number of item stored with a specific discriminant.
|
||||
///
|
||||
/// This method returns None if the bucket with this discriminant
|
||||
/// doesn't exist.
|
||||
pub fn len_bucket(&self, discriminant: &I::Discriminant) -> Option<usize> {
|
||||
self.storage.get(discriminant).map(ArrayVec::len)
|
||||
}
|
||||
|
||||
/// Return `true` if the storage contains no items
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
/// Return a reference to an item chosen at random.
|
||||
///
|
||||
/// Repeated use of this function will provide a normal distribution of
|
||||
/// items based on their discriminants.
|
||||
pub fn get_random(&mut self) -> Option<&I> {
|
||||
// Get the total amount of discriminants to explore.
|
||||
let len = self.storage.len();
|
||||
|
||||
// Get a random bucket.
|
||||
let (_, vec) = self.storage.iter().nth(random::<usize>() / len).unwrap();
|
||||
|
||||
// Return a reference chose at random.
|
||||
vec.get(random::<usize>() / vec.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize, I: Bucketable> Default for Bucket<N, I> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Bucketable for Ipv4Addr {
|
||||
/// We are discriminating by `/16` subnets.
|
||||
type Discriminant = [u8; 2];
|
||||
|
||||
fn discriminant(&self) -> Self::Discriminant {
|
||||
[self.octets()[0], self.octets()[1]]
|
||||
}
|
||||
}
|
|
@ -73,6 +73,15 @@ pub enum TxState<Id> {
|
|||
Local,
|
||||
}
|
||||
|
||||
impl<Id> TxState<Id> {
|
||||
/// Returns `true` if the tx is in the stem stage.
|
||||
///
|
||||
/// [`TxState::Local`] & [`TxState::Stem`] are the 2 stem stage states.
|
||||
pub const fn is_stem_stage(&self) -> bool {
|
||||
matches!(self, Self::Local | Self::Stem { .. })
|
||||
}
|
||||
}
|
||||
|
||||
/// A request to route a transaction.
|
||||
pub struct DandelionRouteReq<Tx, Id> {
|
||||
/// The transaction.
|
||||
|
|
|
@ -10,9 +10,10 @@ default = ["borsh"]
|
|||
borsh = ["dep:borsh", "cuprate-pruning/borsh"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-helper = { workspace = true, features = ["asynch"], default-features = false }
|
||||
cuprate-wire = { workspace = true, features = ["tracing"] }
|
||||
cuprate-helper = { workspace = true, features = ["asynch"], default-features = false }
|
||||
cuprate-wire = { workspace = true, features = ["tracing"] }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
|
||||
tokio = { workspace = true, features = ["net", "sync", "macros", "time", "rt", "rt-multi-thread"]}
|
||||
tokio-util = { workspace = true, features = ["codec"] }
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
//! Data structures related to bans.
|
||||
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::NetZoneAddress;
|
||||
|
||||
/// Data within [`crate::services::AddressBookRequest::SetBan`].
|
||||
pub struct SetBan<A: NetZoneAddress> {
|
||||
/// Address of the peer.
|
||||
pub address: A,
|
||||
/// - If [`Some`], how long this peer should be banned for
|
||||
/// - If [`None`], the peer will be unbanned
|
||||
pub ban: Option<Duration>,
|
||||
}
|
||||
|
||||
/// Data within [`crate::services::AddressBookResponse::GetBans`].
|
||||
pub struct BanState<A: NetZoneAddress> {
|
||||
/// Address of the peer.
|
||||
pub address: A,
|
||||
/// - If [`Some`], the peer is banned until this [`Instant`]
|
||||
/// - If [`None`], the peer is not currently banned
|
||||
pub unban_instant: Option<Instant>,
|
||||
}
|
|
@ -27,9 +27,11 @@ mod connector;
|
|||
pub mod handshaker;
|
||||
mod request_handler;
|
||||
mod timeout_monitor;
|
||||
mod weak;
|
||||
|
||||
pub use connector::{ConnectRequest, Connector};
|
||||
pub use handshaker::{DoHandshakeRequest, HandshakeError, HandshakerBuilder};
|
||||
pub use weak::WeakClient;
|
||||
|
||||
/// An internal identifier for a given peer, will be their address if known
|
||||
/// or a random u128 if not.
|
||||
|
@ -128,6 +130,17 @@ impl<Z: NetworkZone> Client<Z> {
|
|||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Create a [`WeakClient`] for this [`Client`].
|
||||
pub fn downgrade(&self) -> WeakClient<Z> {
|
||||
WeakClient {
|
||||
info: self.info.clone(),
|
||||
connection_tx: self.connection_tx.downgrade(),
|
||||
semaphore: self.semaphore.clone(),
|
||||
permit: None,
|
||||
error: self.error.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Z: NetworkZone> Service<PeerRequest> for Client<Z> {
|
||||
|
|
|
@ -111,7 +111,8 @@ impl<N: NetworkZone> Service<AddressBookRequest<N>> for DummyAddressBook {
|
|||
AddressBookRequest::PeerlistSize
|
||||
| AddressBookRequest::ConnectionCount
|
||||
| AddressBookRequest::SetBan(_)
|
||||
| AddressBookRequest::GetBans => {
|
||||
| AddressBookRequest::GetBans
|
||||
| AddressBookRequest::ConnectionInfo => {
|
||||
todo!("finish https://github.com/Cuprate/cuprate/pull/297")
|
||||
}
|
||||
}))
|
||||
|
|
114
p2p/p2p-core/src/client/weak.rs
Normal file
114
p2p/p2p-core/src/client/weak.rs
Normal file
|
@ -0,0 +1,114 @@
|
|||
use std::task::{ready, Context, Poll};
|
||||
|
||||
use futures::channel::oneshot;
|
||||
use tokio::sync::{mpsc, OwnedSemaphorePermit};
|
||||
use tokio_util::sync::PollSemaphore;
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_helper::asynch::InfallibleOneshotReceiver;
|
||||
|
||||
use crate::{
|
||||
client::{connection, PeerInformation},
|
||||
NetworkZone, PeerError, PeerRequest, PeerResponse, SharedError,
|
||||
};
|
||||
|
||||
/// A weak handle to a [`Client`](super::Client).
|
||||
///
|
||||
/// When this is dropped the peer will not be disconnected.
|
||||
pub struct WeakClient<N: NetworkZone> {
|
||||
/// Information on the connected peer.
|
||||
pub info: PeerInformation<N::Addr>,
|
||||
|
||||
/// The channel to the [`Connection`](connection::Connection) task.
|
||||
pub(super) connection_tx: mpsc::WeakSender<connection::ConnectionTaskRequest>,
|
||||
|
||||
/// The semaphore that limits the requests sent to the peer.
|
||||
pub(super) semaphore: PollSemaphore,
|
||||
/// A permit for the semaphore, will be [`Some`] after `poll_ready` returns ready.
|
||||
pub(super) permit: Option<OwnedSemaphorePermit>,
|
||||
|
||||
/// The error slot shared between the [`Client`] and [`Connection`](connection::Connection).
|
||||
pub(super) error: SharedError<PeerError>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> WeakClient<N> {
|
||||
/// Internal function to set an error on the [`SharedError`].
|
||||
fn set_err(&self, err: PeerError) -> tower::BoxError {
|
||||
let err_str = err.to_string();
|
||||
match self.error.try_insert_err(err) {
|
||||
Ok(()) => err_str,
|
||||
Err(e) => e.to_string(),
|
||||
}
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Z: NetworkZone> Service<PeerRequest> for WeakClient<Z> {
|
||||
type Response = PeerResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = InfallibleOneshotReceiver<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if let Some(err) = self.error.try_get_err() {
|
||||
return Poll::Ready(Err(err.to_string().into()));
|
||||
}
|
||||
|
||||
if self.connection_tx.strong_count() == 0 {
|
||||
let err = self.set_err(PeerError::ClientChannelClosed);
|
||||
return Poll::Ready(Err(err));
|
||||
}
|
||||
|
||||
if self.permit.is_some() {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
|
||||
let permit = ready!(self.semaphore.poll_acquire(cx))
|
||||
.expect("Client semaphore should not be closed!");
|
||||
|
||||
self.permit = Some(permit);
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[expect(clippy::significant_drop_tightening)]
|
||||
fn call(&mut self, request: PeerRequest) -> Self::Future {
|
||||
let permit = self
|
||||
.permit
|
||||
.take()
|
||||
.expect("poll_ready did not return ready before call to call");
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let req = connection::ConnectionTaskRequest {
|
||||
response_channel: tx,
|
||||
request,
|
||||
permit: Some(permit),
|
||||
};
|
||||
|
||||
match self.connection_tx.upgrade() {
|
||||
None => {
|
||||
self.set_err(PeerError::ClientChannelClosed);
|
||||
|
||||
let resp = Err(PeerError::ClientChannelClosed.into());
|
||||
drop(req.response_channel.send(resp));
|
||||
}
|
||||
Some(sender) => {
|
||||
if let Err(e) = sender.try_send(req) {
|
||||
// The connection task could have closed between a call to `poll_ready` and the call to
|
||||
// `call`, which means if we don't handle the error here the receiver would panic.
|
||||
use mpsc::error::TrySendError;
|
||||
|
||||
match e {
|
||||
TrySendError::Closed(req) | TrySendError::Full(req) => {
|
||||
self.set_err(PeerError::ClientChannelClosed);
|
||||
|
||||
let resp = Err(PeerError::ClientChannelClosed.into());
|
||||
drop(req.response_channel.send(resp));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rx.into()
|
||||
}
|
||||
}
|
|
@ -75,7 +75,6 @@ use cuprate_wire::{
|
|||
NetworkAddress,
|
||||
};
|
||||
|
||||
pub mod ban;
|
||||
pub mod client;
|
||||
mod constants;
|
||||
pub mod error;
|
||||
|
@ -83,6 +82,7 @@ pub mod handles;
|
|||
mod network_zones;
|
||||
pub mod protocol;
|
||||
pub mod services;
|
||||
pub mod types;
|
||||
|
||||
pub use error::*;
|
||||
pub use network_zones::{ClearNet, ClearNetServerCfg};
|
||||
|
|
|
@ -4,9 +4,9 @@ use cuprate_pruning::{PruningError, PruningSeed};
|
|||
use cuprate_wire::{CoreSyncData, PeerListEntryBase};
|
||||
|
||||
use crate::{
|
||||
ban::{BanState, SetBan},
|
||||
client::InternalPeerID,
|
||||
handles::ConnectionHandle,
|
||||
types::{BanState, ConnectionInfo, SetBan},
|
||||
NetZoneAddress, NetworkAddressIncorrectZone, NetworkZone,
|
||||
};
|
||||
|
||||
|
@ -118,6 +118,9 @@ pub enum AddressBookRequest<Z: NetworkZone> {
|
|||
/// Get the amount of white & grey peers.
|
||||
PeerlistSize,
|
||||
|
||||
/// Get information on all connections.
|
||||
ConnectionInfo,
|
||||
|
||||
/// Get the amount of incoming & outgoing connections.
|
||||
ConnectionCount,
|
||||
|
||||
|
@ -152,6 +155,9 @@ pub enum AddressBookResponse<Z: NetworkZone> {
|
|||
/// Response to [`AddressBookRequest::PeerlistSize`].
|
||||
PeerlistSize { white: usize, grey: usize },
|
||||
|
||||
/// Response to [`AddressBookRequest::ConnectionInfo`].
|
||||
ConnectionInfo(Vec<ConnectionInfo<Z::Addr>>),
|
||||
|
||||
/// Response to [`AddressBookRequest::ConnectionCount`].
|
||||
ConnectionCount { incoming: usize, outgoing: usize },
|
||||
|
||||
|
|
96
p2p/p2p-core/src/types.rs
Normal file
96
p2p/p2p-core/src/types.rs
Normal file
|
@ -0,0 +1,96 @@
|
|||
//! General data structures.
|
||||
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use cuprate_pruning::PruningSeed;
|
||||
use cuprate_types::{AddressType, ConnectionState};
|
||||
|
||||
use crate::NetZoneAddress;
|
||||
|
||||
/// Data within [`crate::services::AddressBookRequest::SetBan`].
|
||||
pub struct SetBan<A: NetZoneAddress> {
|
||||
/// Address of the peer.
|
||||
pub address: A,
|
||||
/// - If [`Some`], how long this peer should be banned for
|
||||
/// - If [`None`], the peer will be unbanned
|
||||
pub ban: Option<Duration>,
|
||||
}
|
||||
|
||||
/// Data within [`crate::services::AddressBookResponse::GetBans`].
|
||||
pub struct BanState<A: NetZoneAddress> {
|
||||
/// Address of the peer.
|
||||
pub address: A,
|
||||
/// - If [`Some`], the peer is banned until this [`Instant`]
|
||||
/// - If [`None`], the peer is not currently banned
|
||||
pub unban_instant: Option<Instant>,
|
||||
}
|
||||
|
||||
/// Data within [`crate::services::AddressBookResponse::ConnectionInfo`].
|
||||
pub struct ConnectionInfo<A: NetZoneAddress> {
|
||||
// The following fields are mostly the same as `monerod`.
|
||||
pub address: A,
|
||||
pub address_type: AddressType,
|
||||
pub avg_download: u64,
|
||||
pub avg_upload: u64,
|
||||
pub current_download: u64,
|
||||
pub current_upload: u64,
|
||||
pub height: u64,
|
||||
/// Either a domain or an IP without the port.
|
||||
pub host: String,
|
||||
pub incoming: bool,
|
||||
pub live_time: u64,
|
||||
pub localhost: bool,
|
||||
pub local_ip: bool,
|
||||
pub peer_id: u64,
|
||||
pub pruning_seed: PruningSeed,
|
||||
pub recv_count: u64,
|
||||
pub recv_idle_time: u64,
|
||||
pub rpc_credits_per_hash: u32,
|
||||
pub rpc_port: u16,
|
||||
pub send_count: u64,
|
||||
pub send_idle_time: u64,
|
||||
pub state: ConnectionState,
|
||||
pub support_flags: u32,
|
||||
|
||||
// The following fields are slightly different than `monerod`.
|
||||
|
||||
//
|
||||
/// [`None`] if Tor/i2p or unknown.
|
||||
pub socket_addr: Option<std::net::SocketAddr>,
|
||||
|
||||
/// This field does not exist for `cuprated`'s RPC, this is just a marker type:
|
||||
/// - <https://github.com/Cuprate/cuprate/pull/320#discussion_r1811335020>
|
||||
/// - <https://github.com/Cuprate/cuprate/pull/320#discussion_r1819826080>
|
||||
///
|
||||
/// [`ConnectionId::DEFAULT_STR`] is used when mapping to the RPC type.
|
||||
pub connection_id: ConnectionId,
|
||||
}
|
||||
|
||||
/// Marker type for `monerod`'s connection ID.
|
||||
///
|
||||
/// `connection_id` is a 128-bit `uuid` in `monerod`.
|
||||
/// `cuprated` does not support this field so it returns
|
||||
/// the default value in the RPC interface, an all 0-bit UUID.
|
||||
///
|
||||
/// This default value in string form is [`ConnectionId::DEFAULT_STR`].
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct ConnectionId;
|
||||
|
||||
impl ConnectionId {
|
||||
/// [`str`] representation of a default connection ID.
|
||||
pub const DEFAULT_STR: &str = "00000000000000000000000000000000";
|
||||
}
|
||||
|
||||
/// Used in RPC's `sync_info`.
|
||||
///
|
||||
// TODO: fix docs after <https://github.com/Cuprate/cuprate/pull/320#discussion_r1811089758>
|
||||
// Data within [`crate::services::AddressBookResponse::Spans`].
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Span<A: NetZoneAddress> {
|
||||
pub nblocks: u64,
|
||||
pub rate: u32,
|
||||
pub remote_address: A,
|
||||
pub size: u64,
|
||||
pub speed: u32,
|
||||
pub start_block_height: u64,
|
||||
}
|
|
@ -20,12 +20,12 @@ monero-serai = { workspace = true, features = ["std"] }
|
|||
|
||||
tower = { workspace = true, features = ["buffer"] }
|
||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||
rayon = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
tokio-stream = { workspace = true, features = ["sync", "time"] }
|
||||
futures = { workspace = true, features = ["std"] }
|
||||
pin-project = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
indexmap = { workspace = true, features = ["std"] }
|
||||
|
||||
thiserror = { workspace = true }
|
||||
bytes = { workspace = true, features = ["std"] }
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
use std::{
|
||||
cmp::{max, min, Reverse},
|
||||
collections::{BTreeMap, BinaryHeap},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
|
@ -18,7 +17,7 @@ use tokio::{
|
|||
task::JoinSet,
|
||||
time::{interval, timeout, MissedTickBehavior},
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tower::{util::BoxCloneService, Service, ServiceExt};
|
||||
use tracing::{instrument, Instrument, Span};
|
||||
|
||||
use cuprate_async_buffer::{BufferAppender, BufferStream};
|
||||
|
@ -27,11 +26,11 @@ use cuprate_p2p_core::{handles::ConnectionHandle, NetworkZone};
|
|||
use cuprate_pruning::PruningSeed;
|
||||
|
||||
use crate::{
|
||||
client_pool::{ClientPool, ClientPoolDropGuard},
|
||||
constants::{
|
||||
BLOCK_DOWNLOADER_REQUEST_TIMEOUT, EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED, LONG_BAN,
|
||||
MAX_BLOCK_BATCH_LEN, MAX_DOWNLOAD_FAILURES,
|
||||
},
|
||||
peer_set::ClientDropGuard,
|
||||
};
|
||||
|
||||
mod block_queue;
|
||||
|
@ -41,6 +40,7 @@ mod request_chain;
|
|||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use crate::peer_set::{PeerSetRequest, PeerSetResponse};
|
||||
use block_queue::{BlockQueue, ReadyQueueBatch};
|
||||
use chain_tracker::{BlocksToRetrieve, ChainEntry, ChainTracker};
|
||||
use download_batch::download_batch_task;
|
||||
|
@ -135,7 +135,7 @@ pub enum ChainSvcResponse {
|
|||
/// call this function again, so it can start the search again.
|
||||
#[instrument(level = "error", skip_all, name = "block_downloader")]
|
||||
pub fn download_blocks<N: NetworkZone, C>(
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
peer_set: BoxCloneService<PeerSetRequest, PeerSetResponse<N>, tower::BoxError>,
|
||||
our_chain_svc: C,
|
||||
config: BlockDownloaderConfig,
|
||||
) -> BufferStream<BlockBatch>
|
||||
|
@ -147,8 +147,7 @@ where
|
|||
{
|
||||
let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size);
|
||||
|
||||
let block_downloader =
|
||||
BlockDownloader::new(client_pool, our_chain_svc, buffer_appender, config);
|
||||
let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config);
|
||||
|
||||
tokio::spawn(
|
||||
block_downloader
|
||||
|
@ -186,8 +185,8 @@ where
|
|||
/// - download an already requested batch of blocks (this might happen due to an error in the previous request
|
||||
/// or because the queue of ready blocks is too large, so we need the oldest block to clear it).
|
||||
struct BlockDownloader<N: NetworkZone, C> {
|
||||
/// The client pool.
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
/// The peer set.
|
||||
peer_set: BoxCloneService<PeerSetRequest, PeerSetResponse<N>, tower::BoxError>,
|
||||
|
||||
/// The service that holds our current chain state.
|
||||
our_chain_svc: C,
|
||||
|
@ -208,7 +207,7 @@ struct BlockDownloader<N: NetworkZone, C> {
|
|||
///
|
||||
/// Returns a result of the chain entry or an error.
|
||||
#[expect(clippy::type_complexity)]
|
||||
chain_entry_task: JoinSet<Result<(ClientPoolDropGuard<N>, ChainEntry<N>), BlockDownloadError>>,
|
||||
chain_entry_task: JoinSet<Result<(ClientDropGuard<N>, ChainEntry<N>), BlockDownloadError>>,
|
||||
|
||||
/// The current inflight requests.
|
||||
///
|
||||
|
@ -235,13 +234,13 @@ where
|
|||
{
|
||||
/// Creates a new [`BlockDownloader`]
|
||||
fn new(
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
peer_set: BoxCloneService<PeerSetRequest, PeerSetResponse<N>, tower::BoxError>,
|
||||
our_chain_svc: C,
|
||||
buffer_appender: BufferAppender<BlockBatch>,
|
||||
config: BlockDownloaderConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
client_pool,
|
||||
peer_set,
|
||||
our_chain_svc,
|
||||
amount_of_blocks_to_request: config.initial_batch_size,
|
||||
amount_of_blocks_to_request_updated_at: 0,
|
||||
|
@ -259,7 +258,7 @@ where
|
|||
fn check_pending_peers(
|
||||
&mut self,
|
||||
chain_tracker: &mut ChainTracker<N>,
|
||||
pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientPoolDropGuard<N>>>,
|
||||
pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientDropGuard<N>>>,
|
||||
) {
|
||||
tracing::debug!("Checking if we can give any work to pending peers.");
|
||||
|
||||
|
@ -286,11 +285,11 @@ where
|
|||
/// This function will find the batch(es) that we are waiting on to clear our ready queue and sends another request
|
||||
/// for them.
|
||||
///
|
||||
/// Returns the [`ClientPoolDropGuard`] back if it doesn't have the batch according to its pruning seed.
|
||||
/// Returns the [`ClientDropGuard`] back if it doesn't have the batch according to its pruning seed.
|
||||
fn request_inflight_batch_again(
|
||||
&mut self,
|
||||
client: ClientPoolDropGuard<N>,
|
||||
) -> Option<ClientPoolDropGuard<N>> {
|
||||
client: ClientDropGuard<N>,
|
||||
) -> Option<ClientDropGuard<N>> {
|
||||
tracing::debug!(
|
||||
"Requesting an inflight batch, current ready queue size: {}",
|
||||
self.block_queue.size()
|
||||
|
@ -336,13 +335,13 @@ where
|
|||
///
|
||||
/// The batch requested will depend on our current state, failed batches will be prioritised.
|
||||
///
|
||||
/// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according
|
||||
/// Returns the [`ClientDropGuard`] back if it doesn't have the data we currently need according
|
||||
/// to its pruning seed.
|
||||
fn request_block_batch(
|
||||
&mut self,
|
||||
chain_tracker: &mut ChainTracker<N>,
|
||||
client: ClientPoolDropGuard<N>,
|
||||
) -> Option<ClientPoolDropGuard<N>> {
|
||||
client: ClientDropGuard<N>,
|
||||
) -> Option<ClientDropGuard<N>> {
|
||||
tracing::trace!("Using peer to request a batch of blocks.");
|
||||
// First look to see if we have any failed requests.
|
||||
while let Some(failed_request) = self.failed_batches.peek() {
|
||||
|
@ -416,13 +415,13 @@ where
|
|||
/// This function will use our current state to decide if we should send a request for a chain entry
|
||||
/// or if we should request a batch of blocks.
|
||||
///
|
||||
/// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according
|
||||
/// Returns the [`ClientDropGuard`] back if it doesn't have the data we currently need according
|
||||
/// to its pruning seed.
|
||||
fn try_handle_free_client(
|
||||
&mut self,
|
||||
chain_tracker: &mut ChainTracker<N>,
|
||||
client: ClientPoolDropGuard<N>,
|
||||
) -> Option<ClientPoolDropGuard<N>> {
|
||||
client: ClientDropGuard<N>,
|
||||
) -> Option<ClientDropGuard<N>> {
|
||||
// We send 2 requests, so if one of them is slow or doesn't have the next chain, we still have a backup.
|
||||
if self.chain_entry_task.len() < 2
|
||||
// If we have had too many failures then assume the tip has been found so no more chain entries.
|
||||
|
@ -463,7 +462,7 @@ where
|
|||
async fn check_for_free_clients(
|
||||
&mut self,
|
||||
chain_tracker: &mut ChainTracker<N>,
|
||||
pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientPoolDropGuard<N>>>,
|
||||
pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientDropGuard<N>>>,
|
||||
) -> Result<(), BlockDownloadError> {
|
||||
tracing::debug!("Checking for free peers");
|
||||
|
||||
|
@ -478,10 +477,19 @@ where
|
|||
panic!("Chain service returned wrong response.");
|
||||
};
|
||||
|
||||
for client in self
|
||||
.client_pool
|
||||
.clients_with_more_cumulative_difficulty(current_cumulative_difficulty)
|
||||
{
|
||||
let PeerSetResponse::PeersWithMorePoW(clients) = self
|
||||
.peer_set
|
||||
.ready()
|
||||
.await?
|
||||
.call(PeerSetRequest::PeersWithMorePoW(
|
||||
current_cumulative_difficulty,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
for client in clients {
|
||||
pending_peers
|
||||
.entry(client.info.pruning_seed)
|
||||
.or_default()
|
||||
|
@ -497,9 +505,9 @@ where
|
|||
async fn handle_download_batch_res(
|
||||
&mut self,
|
||||
start_height: usize,
|
||||
res: Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError>,
|
||||
res: Result<(ClientDropGuard<N>, BlockBatch), BlockDownloadError>,
|
||||
chain_tracker: &mut ChainTracker<N>,
|
||||
pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientPoolDropGuard<N>>>,
|
||||
pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientDropGuard<N>>>,
|
||||
) -> Result<(), BlockDownloadError> {
|
||||
tracing::debug!("Handling block download response");
|
||||
|
||||
|
@ -593,7 +601,7 @@ where
|
|||
/// Starts the main loop of the block downloader.
|
||||
async fn run(mut self) -> Result<(), BlockDownloadError> {
|
||||
let mut chain_tracker =
|
||||
initial_chain_search(&self.client_pool, &mut self.our_chain_svc).await?;
|
||||
initial_chain_search(&mut self.peer_set, &mut self.our_chain_svc).await?;
|
||||
|
||||
let mut pending_peers = BTreeMap::new();
|
||||
|
||||
|
@ -662,7 +670,7 @@ struct BlockDownloadTaskResponse<N: NetworkZone> {
|
|||
/// The start height of the batch.
|
||||
start_height: usize,
|
||||
/// A result containing the batch or an error.
|
||||
result: Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError>,
|
||||
result: Result<(ClientDropGuard<N>, BlockBatch), BlockDownloadError>,
|
||||
}
|
||||
|
||||
/// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`].
|
||||
|
|
|
@ -16,8 +16,8 @@ use cuprate_wire::protocol::{GetObjectsRequest, GetObjectsResponse};
|
|||
|
||||
use crate::{
|
||||
block_downloader::{BlockBatch, BlockDownloadError, BlockDownloadTaskResponse},
|
||||
client_pool::ClientPoolDropGuard,
|
||||
constants::{BLOCK_DOWNLOADER_REQUEST_TIMEOUT, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN},
|
||||
peer_set::ClientDropGuard,
|
||||
};
|
||||
|
||||
/// Attempts to request a batch of blocks from a peer, returning [`BlockDownloadTaskResponse`].
|
||||
|
@ -32,7 +32,7 @@ use crate::{
|
|||
)]
|
||||
#[expect(clippy::used_underscore_binding)]
|
||||
pub async fn download_batch_task<N: NetworkZone>(
|
||||
client: ClientPoolDropGuard<N>,
|
||||
client: ClientDropGuard<N>,
|
||||
ids: ByteArrayVec<32>,
|
||||
previous_id: [u8; 32],
|
||||
expected_start_height: usize,
|
||||
|
@ -49,11 +49,11 @@ pub async fn download_batch_task<N: NetworkZone>(
|
|||
/// This function will validate the blocks that were downloaded were the ones asked for and that they match
|
||||
/// the expected height.
|
||||
async fn request_batch_from_peer<N: NetworkZone>(
|
||||
mut client: ClientPoolDropGuard<N>,
|
||||
mut client: ClientDropGuard<N>,
|
||||
ids: ByteArrayVec<32>,
|
||||
previous_id: [u8; 32],
|
||||
expected_start_height: usize,
|
||||
) -> Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError> {
|
||||
) -> Result<(ClientDropGuard<N>, BlockBatch), BlockDownloadError> {
|
||||
let request = PeerRequest::Protocol(ProtocolRequest::GetObjects(GetObjectsRequest {
|
||||
blocks: ids.clone(),
|
||||
pruned: false,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::{mem, sync::Arc};
|
||||
use std::mem;
|
||||
|
||||
use tokio::{task::JoinSet, time::timeout};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tower::{util::BoxCloneService, Service, ServiceExt};
|
||||
use tracing::{instrument, Instrument, Span};
|
||||
|
||||
use cuprate_p2p_core::{
|
||||
|
@ -15,11 +15,11 @@ use crate::{
|
|||
chain_tracker::{ChainEntry, ChainTracker},
|
||||
BlockDownloadError, ChainSvcRequest, ChainSvcResponse,
|
||||
},
|
||||
client_pool::{ClientPool, ClientPoolDropGuard},
|
||||
constants::{
|
||||
BLOCK_DOWNLOADER_REQUEST_TIMEOUT, INITIAL_CHAIN_REQUESTS_TO_SEND,
|
||||
MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MEDIUM_BAN,
|
||||
},
|
||||
peer_set::{ClientDropGuard, PeerSetRequest, PeerSetResponse},
|
||||
};
|
||||
|
||||
/// Request a chain entry from a peer.
|
||||
|
@ -27,9 +27,9 @@ use crate::{
|
|||
/// Because the block downloader only follows and downloads one chain we only have to send the block hash of
|
||||
/// top block we have found and the genesis block, this is then called `short_history`.
|
||||
pub(crate) async fn request_chain_entry_from_peer<N: NetworkZone>(
|
||||
mut client: ClientPoolDropGuard<N>,
|
||||
mut client: ClientDropGuard<N>,
|
||||
short_history: [[u8; 32]; 2],
|
||||
) -> Result<(ClientPoolDropGuard<N>, ChainEntry<N>), BlockDownloadError> {
|
||||
) -> Result<(ClientDropGuard<N>, ChainEntry<N>), BlockDownloadError> {
|
||||
let PeerResponse::Protocol(ProtocolResponse::GetChain(chain_res)) = client
|
||||
.ready()
|
||||
.await?
|
||||
|
@ -80,7 +80,7 @@ pub(crate) async fn request_chain_entry_from_peer<N: NetworkZone>(
|
|||
/// We then wait for their response and choose the peer who claims the highest cumulative difficulty.
|
||||
#[instrument(level = "error", skip_all)]
|
||||
pub async fn initial_chain_search<N: NetworkZone, C>(
|
||||
client_pool: &Arc<ClientPool<N>>,
|
||||
peer_set: &mut BoxCloneService<PeerSetRequest, PeerSetResponse<N>, tower::BoxError>,
|
||||
mut our_chain_svc: C,
|
||||
) -> Result<ChainTracker<N>, BlockDownloadError>
|
||||
where
|
||||
|
@ -102,9 +102,15 @@ where
|
|||
|
||||
let our_genesis = *block_ids.last().expect("Blockchain had no genesis block.");
|
||||
|
||||
let mut peers = client_pool
|
||||
.clients_with_more_cumulative_difficulty(cumulative_difficulty)
|
||||
.into_iter();
|
||||
let PeerSetResponse::PeersWithMorePoW(clients) = peer_set
|
||||
.ready()
|
||||
.await?
|
||||
.call(PeerSetRequest::PeersWithMorePoW(cumulative_difficulty))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
let mut peers = clients.into_iter();
|
||||
|
||||
let mut futs = JoinSet::new();
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@ use monero_serai::{
|
|||
transaction::{Input, Timelock, Transaction, TransactionPrefix},
|
||||
};
|
||||
use proptest::{collection::vec, prelude::*};
|
||||
use tokio::time::timeout;
|
||||
use tower::{service_fn, Service};
|
||||
use tokio::{sync::mpsc, time::timeout};
|
||||
use tower::{buffer::Buffer, service_fn, Service, ServiceExt};
|
||||
|
||||
use cuprate_fixed_bytes::ByteArrayVec;
|
||||
use cuprate_p2p_core::{
|
||||
|
@ -31,7 +31,7 @@ use cuprate_wire::{
|
|||
|
||||
use crate::{
|
||||
block_downloader::{download_blocks, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse},
|
||||
client_pool::ClientPool,
|
||||
peer_set::PeerSet,
|
||||
};
|
||||
|
||||
proptest! {
|
||||
|
@ -48,19 +48,20 @@ proptest! {
|
|||
|
||||
let tokio_pool = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap();
|
||||
|
||||
#[expect(clippy::significant_drop_tightening)]
|
||||
tokio_pool.block_on(async move {
|
||||
timeout(Duration::from_secs(600), async move {
|
||||
let client_pool = ClientPool::new();
|
||||
let (new_connection_tx, new_connection_rx) = mpsc::channel(peers);
|
||||
|
||||
let peer_set = PeerSet::new(new_connection_rx);
|
||||
|
||||
for _ in 0..peers {
|
||||
let client = mock_block_downloader_client(Arc::clone(&blockchain));
|
||||
|
||||
client_pool.add_new_client(client);
|
||||
new_connection_tx.try_send(client).unwrap();
|
||||
}
|
||||
|
||||
let stream = download_blocks(
|
||||
client_pool,
|
||||
Buffer::new(peer_set, 10).boxed_clone(),
|
||||
OurChainSvc {
|
||||
genesis: *blockchain.blocks.first().unwrap().0
|
||||
},
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
//! # Client Pool.
|
||||
//!
|
||||
//! The [`ClientPool`], is a pool of currently connected peers that can be pulled from.
|
||||
//! It does _not_ necessarily contain every connected peer as another place could have
|
||||
//! taken a peer from the pool.
|
||||
//!
|
||||
//! When taking peers from the pool they are wrapped in [`ClientPoolDropGuard`], which
|
||||
//! returns the peer to the pool when it is dropped.
|
||||
//!
|
||||
//! Internally the pool is a [`DashMap`] which means care should be taken in `async` code
|
||||
//! as internally this uses blocking `RwLock`s.
|
||||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{Instrument, Span};
|
||||
|
||||
use cuprate_p2p_core::{
|
||||
client::{Client, InternalPeerID},
|
||||
handles::ConnectionHandle,
|
||||
NetworkZone,
|
||||
};
|
||||
|
||||
pub(crate) mod disconnect_monitor;
|
||||
mod drop_guard_client;
|
||||
|
||||
pub(crate) use drop_guard_client::ClientPoolDropGuard;
|
||||
|
||||
/// The client pool, which holds currently connected free peers.
|
||||
///
|
||||
/// See the [module docs](self) for more.
|
||||
pub struct ClientPool<N: NetworkZone> {
|
||||
/// The connected [`Client`]s.
|
||||
clients: DashMap<InternalPeerID<N::Addr>, Client<N>>,
|
||||
/// A channel to send new peer ids down to monitor for disconnect.
|
||||
new_connection_tx: mpsc::UnboundedSender<(ConnectionHandle, InternalPeerID<N::Addr>)>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> ClientPool<N> {
|
||||
/// Returns a new [`ClientPool`] wrapped in an [`Arc`].
|
||||
pub fn new() -> Arc<Self> {
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let pool = Arc::new(Self {
|
||||
clients: DashMap::new(),
|
||||
new_connection_tx: tx,
|
||||
});
|
||||
|
||||
tokio::spawn(
|
||||
disconnect_monitor::disconnect_monitor(rx, Arc::clone(&pool))
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
|
||||
pool
|
||||
}
|
||||
|
||||
/// Adds a [`Client`] to the pool, the client must have previously been taken from the
|
||||
/// pool.
|
||||
///
|
||||
/// See [`ClientPool::add_new_client`] to add a [`Client`] which was not taken from the pool before.
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if `client` already exists in the pool.
|
||||
fn add_client(&self, client: Client<N>) {
|
||||
let handle = client.info.handle.clone();
|
||||
let id = client.info.id;
|
||||
|
||||
// Fast path: if the client is disconnected don't add it to the peer set.
|
||||
if handle.is_closed() {
|
||||
return;
|
||||
}
|
||||
|
||||
assert!(self.clients.insert(id, client).is_none());
|
||||
|
||||
// We have to check this again otherwise we could have a race condition where a
|
||||
// peer is disconnected after the first check, the disconnect monitor tries to remove it,
|
||||
// and then it is added to the pool.
|
||||
if handle.is_closed() {
|
||||
self.remove_client(&id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a _new_ [`Client`] to the pool, this client should be a new connection, and not already
|
||||
/// from the pool.
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if `client` already exists in the pool.
|
||||
pub fn add_new_client(&self, client: Client<N>) {
|
||||
self.new_connection_tx
|
||||
.send((client.info.handle.clone(), client.info.id))
|
||||
.unwrap();
|
||||
|
||||
self.add_client(client);
|
||||
}
|
||||
|
||||
/// Remove a [`Client`] from the pool.
|
||||
///
|
||||
/// [`None`] is returned if the client did not exist in the pool.
|
||||
fn remove_client(&self, peer: &InternalPeerID<N::Addr>) -> Option<Client<N>> {
|
||||
self.clients.remove(peer).map(|(_, client)| client)
|
||||
}
|
||||
|
||||
/// Borrows a [`Client`] from the pool.
|
||||
///
|
||||
/// The [`Client`] is wrapped in [`ClientPoolDropGuard`] which
|
||||
/// will return the client to the pool when it's dropped.
|
||||
///
|
||||
/// See [`Self::borrow_clients`] for borrowing multiple clients.
|
||||
pub fn borrow_client(
|
||||
self: &Arc<Self>,
|
||||
peer: &InternalPeerID<N::Addr>,
|
||||
) -> Option<ClientPoolDropGuard<N>> {
|
||||
self.remove_client(peer).map(|client| ClientPoolDropGuard {
|
||||
pool: Arc::clone(self),
|
||||
client: Some(client),
|
||||
})
|
||||
}
|
||||
|
||||
/// Borrows multiple [`Client`]s from the pool.
|
||||
///
|
||||
/// Note that the returned iterator is not guaranteed to contain every peer asked for.
|
||||
///
|
||||
/// See [`Self::borrow_client`] for borrowing a single client.
|
||||
pub fn borrow_clients<'a, 'b>(
|
||||
self: &'a Arc<Self>,
|
||||
peers: &'b [InternalPeerID<N::Addr>],
|
||||
) -> impl Iterator<Item = ClientPoolDropGuard<N>> + sealed::Captures<(&'a (), &'b ())> {
|
||||
peers.iter().filter_map(|peer| self.borrow_client(peer))
|
||||
}
|
||||
|
||||
/// Borrows all [`Client`]s from the pool that have claimed a higher cumulative difficulty than
|
||||
/// the amount passed in.
|
||||
///
|
||||
/// The [`Client`]s are wrapped in [`ClientPoolDropGuard`] which
|
||||
/// will return the clients to the pool when they are dropped.
|
||||
pub fn clients_with_more_cumulative_difficulty(
|
||||
self: &Arc<Self>,
|
||||
cumulative_difficulty: u128,
|
||||
) -> Vec<ClientPoolDropGuard<N>> {
|
||||
let peers = self
|
||||
.clients
|
||||
.iter()
|
||||
.filter_map(|element| {
|
||||
let peer_sync_info = element.value().info.core_sync_data.lock().unwrap();
|
||||
|
||||
if peer_sync_info.cumulative_difficulty() > cumulative_difficulty {
|
||||
Some(*element.key())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.borrow_clients(&peers).collect()
|
||||
}
|
||||
|
||||
/// Checks all clients in the pool checking if any claim a higher cumulative difficulty than the
|
||||
/// amount specified.
|
||||
pub fn contains_client_with_more_cumulative_difficulty(
|
||||
&self,
|
||||
cumulative_difficulty: u128,
|
||||
) -> bool {
|
||||
self.clients.iter().any(|element| {
|
||||
let sync_data = element.value().info.core_sync_data.lock().unwrap();
|
||||
sync_data.cumulative_difficulty() > cumulative_difficulty
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
mod sealed {
|
||||
/// TODO: Remove me when 2024 Rust
|
||||
///
|
||||
/// <https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick>
|
||||
pub trait Captures<U> {}
|
||||
|
||||
impl<T: ?Sized, U> Captures<U> for T {}
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
//! # Disconnect Monitor
|
||||
//!
|
||||
//! This module contains the [`disconnect_monitor`] task, which monitors connected peers for disconnection
|
||||
//! and then removes them from the [`ClientPool`] if they do.
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::{stream::FuturesUnordered, StreamExt};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::WaitForCancellationFutureOwned;
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_p2p_core::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone};
|
||||
|
||||
use super::ClientPool;
|
||||
|
||||
/// The disconnect monitor task.
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn disconnect_monitor<N: NetworkZone>(
|
||||
mut new_connection_rx: mpsc::UnboundedReceiver<(ConnectionHandle, InternalPeerID<N::Addr>)>,
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
) {
|
||||
// We need to hold a weak reference otherwise the client pool and this would hold a reference to
|
||||
// each other causing the pool to be leaked.
|
||||
let weak_client_pool = Arc::downgrade(&client_pool);
|
||||
drop(client_pool);
|
||||
|
||||
tracing::info!("Starting peer disconnect monitor.");
|
||||
|
||||
let mut futs: FuturesUnordered<PeerDisconnectFut<N>> = FuturesUnordered::new();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some((con_handle, peer_id)) = new_connection_rx.recv() => {
|
||||
tracing::debug!("Monitoring {peer_id} for disconnect");
|
||||
futs.push(PeerDisconnectFut {
|
||||
closed_fut: con_handle.closed(),
|
||||
peer_id: Some(peer_id),
|
||||
});
|
||||
}
|
||||
Some(peer_id) = futs.next() => {
|
||||
tracing::debug!("{peer_id} has disconnected, removing from client pool.");
|
||||
let Some(pool) = weak_client_pool.upgrade() else {
|
||||
tracing::info!("Peer disconnect monitor shutting down.");
|
||||
return;
|
||||
};
|
||||
|
||||
pool.remove_client(&peer_id);
|
||||
drop(pool);
|
||||
}
|
||||
else => {
|
||||
tracing::info!("Peer disconnect monitor shutting down.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`Future`] that resolves when a peer disconnects.
|
||||
#[pin_project::pin_project]
|
||||
pub(crate) struct PeerDisconnectFut<N: NetworkZone> {
|
||||
/// The inner [`Future`] that resolves when a peer disconnects.
|
||||
#[pin]
|
||||
pub(crate) closed_fut: WaitForCancellationFutureOwned,
|
||||
/// The peers ID.
|
||||
pub(crate) peer_id: Option<InternalPeerID<N::Addr>>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> Future for PeerDisconnectFut<N> {
|
||||
type Output = InternalPeerID<N::Addr>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.project();
|
||||
|
||||
this.closed_fut
|
||||
.poll(cx)
|
||||
.map(|()| this.peer_id.take().unwrap())
|
||||
}
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
use std::{
|
||||
ops::{Deref, DerefMut},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use cuprate_p2p_core::{client::Client, NetworkZone};
|
||||
|
||||
use crate::client_pool::ClientPool;
|
||||
|
||||
/// A wrapper around [`Client`] which returns the client to the [`ClientPool`] when dropped.
|
||||
pub struct ClientPoolDropGuard<N: NetworkZone> {
|
||||
/// The [`ClientPool`] to return the peer to.
|
||||
pub(super) pool: Arc<ClientPool<N>>,
|
||||
/// The [`Client`].
|
||||
///
|
||||
/// This is set to [`Some`] when this guard is created, then
|
||||
/// [`take`](Option::take)n and returned to the pool when dropped.
|
||||
pub(super) client: Option<Client<N>>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> Deref for ClientPoolDropGuard<N> {
|
||||
type Target = Client<N>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.client.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> DerefMut for ClientPoolDropGuard<N> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.client.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> Drop for ClientPoolDropGuard<N> {
|
||||
fn drop(&mut self) {
|
||||
let client = self.client.take().unwrap();
|
||||
|
||||
self.pool.add_client(client);
|
||||
}
|
||||
}
|
|
@ -21,7 +21,6 @@ use cuprate_p2p_core::{
|
|||
};
|
||||
|
||||
use crate::{
|
||||
client_pool::ClientPool,
|
||||
config::P2PConfig,
|
||||
constants::{HANDSHAKE_TIMEOUT, MAX_SEED_CONNECTIONS, OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT},
|
||||
};
|
||||
|
@ -46,7 +45,7 @@ pub struct MakeConnectionRequest {
|
|||
/// This handles maintaining a minimum number of connections and making extra connections when needed, upto a maximum.
|
||||
pub struct OutboundConnectionKeeper<N: NetworkZone, A, C> {
|
||||
/// The pool of currently connected peers.
|
||||
pub client_pool: Arc<ClientPool<N>>,
|
||||
pub new_peers_tx: mpsc::Sender<Client<N>>,
|
||||
/// The channel that tells us to make new _extra_ outbound connections.
|
||||
pub make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
|
||||
/// The address book service
|
||||
|
@ -77,7 +76,7 @@ where
|
|||
{
|
||||
pub fn new(
|
||||
config: P2PConfig<N>,
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
new_peers_tx: mpsc::Sender<Client<N>>,
|
||||
make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
|
||||
address_book_svc: A,
|
||||
connector_svc: C,
|
||||
|
@ -86,7 +85,7 @@ where
|
|||
.expect("Gray peer percent is incorrect should be 0..=1");
|
||||
|
||||
Self {
|
||||
client_pool,
|
||||
new_peers_tx,
|
||||
make_connection_rx,
|
||||
address_book_svc,
|
||||
connector_svc,
|
||||
|
@ -149,7 +148,7 @@ where
|
|||
/// Connects to a given outbound peer.
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) {
|
||||
let client_pool = Arc::clone(&self.client_pool);
|
||||
let new_peers_tx = self.new_peers_tx.clone();
|
||||
let connection_fut = self
|
||||
.connector_svc
|
||||
.ready()
|
||||
|
@ -164,7 +163,7 @@ where
|
|||
async move {
|
||||
#[expect(clippy::significant_drop_in_scrutinee)]
|
||||
if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await {
|
||||
client_pool.add_new_client(peer);
|
||||
drop(new_peers_tx.send(peer).await);
|
||||
}
|
||||
}
|
||||
.instrument(Span::current()),
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::{pin::pin, sync::Arc};
|
|||
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use tokio::{
|
||||
sync::Semaphore,
|
||||
sync::{mpsc, Semaphore},
|
||||
task::JoinSet,
|
||||
time::{sleep, timeout},
|
||||
};
|
||||
|
@ -24,7 +24,6 @@ use cuprate_wire::{
|
|||
};
|
||||
|
||||
use crate::{
|
||||
client_pool::ClientPool,
|
||||
constants::{
|
||||
HANDSHAKE_TIMEOUT, INBOUND_CONNECTION_COOL_DOWN, PING_REQUEST_CONCURRENCY,
|
||||
PING_REQUEST_TIMEOUT,
|
||||
|
@ -36,7 +35,7 @@ use crate::{
|
|||
/// and initiate handshake if needed, after verifying the address isn't banned.
|
||||
#[instrument(level = "warn", skip_all)]
|
||||
pub async fn inbound_server<N, HS, A>(
|
||||
client_pool: Arc<ClientPool<N>>,
|
||||
new_connection_tx: mpsc::Sender<Client<N>>,
|
||||
mut handshaker: HS,
|
||||
mut address_book: A,
|
||||
config: P2PConfig<N>,
|
||||
|
@ -111,13 +110,13 @@ where
|
|||
permit: Some(permit),
|
||||
});
|
||||
|
||||
let cloned_pool = Arc::clone(&client_pool);
|
||||
let new_connection_tx = new_connection_tx.clone();
|
||||
|
||||
tokio::spawn(
|
||||
async move {
|
||||
let client = timeout(HANDSHAKE_TIMEOUT, fut).await;
|
||||
if let Ok(Ok(peer)) = client {
|
||||
cloned_pool.add_new_client(peer);
|
||||
drop(new_connection_tx.send(peer).await);
|
||||
}
|
||||
}
|
||||
.instrument(Span::current()),
|
||||
|
|
|
@ -18,16 +18,18 @@ use cuprate_p2p_core::{
|
|||
|
||||
pub mod block_downloader;
|
||||
mod broadcast;
|
||||
mod client_pool;
|
||||
pub mod config;
|
||||
pub mod connection_maintainer;
|
||||
pub mod constants;
|
||||
mod inbound_server;
|
||||
mod peer_set;
|
||||
|
||||
use block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse};
|
||||
pub use broadcast::{BroadcastRequest, BroadcastSvc};
|
||||
pub use config::{AddressBookConfig, P2PConfig};
|
||||
use connection_maintainer::MakeConnectionRequest;
|
||||
use peer_set::PeerSet;
|
||||
pub use peer_set::{ClientDropGuard, PeerSetRequest, PeerSetResponse};
|
||||
|
||||
/// Initializes the P2P [`NetworkInterface`] for a specific [`NetworkZone`].
|
||||
///
|
||||
|
@ -53,7 +55,10 @@ where
|
|||
cuprate_address_book::init_address_book(config.address_book_config.clone()).await?;
|
||||
let address_book = Buffer::new(
|
||||
address_book,
|
||||
config.max_inbound_connections + config.outbound_connections,
|
||||
config
|
||||
.max_inbound_connections
|
||||
.checked_add(config.outbound_connections)
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// Use the default config. Changing the defaults affects tx fluff times, which could affect D++ so for now don't allow changing
|
||||
|
@ -82,19 +87,25 @@ where
|
|||
|
||||
let outbound_handshaker = outbound_handshaker_builder.build();
|
||||
|
||||
let client_pool = client_pool::ClientPool::new();
|
||||
|
||||
let (new_connection_tx, new_connection_rx) = mpsc::channel(
|
||||
config
|
||||
.outbound_connections
|
||||
.checked_add(config.max_inbound_connections)
|
||||
.unwrap(),
|
||||
);
|
||||
let (make_connection_tx, make_connection_rx) = mpsc::channel(3);
|
||||
|
||||
let outbound_connector = Connector::new(outbound_handshaker);
|
||||
let outbound_connection_maintainer = connection_maintainer::OutboundConnectionKeeper::new(
|
||||
config.clone(),
|
||||
Arc::clone(&client_pool),
|
||||
new_connection_tx.clone(),
|
||||
make_connection_rx,
|
||||
address_book.clone(),
|
||||
outbound_connector,
|
||||
);
|
||||
|
||||
let peer_set = PeerSet::new(new_connection_rx);
|
||||
|
||||
let mut background_tasks = JoinSet::new();
|
||||
|
||||
background_tasks.spawn(
|
||||
|
@ -104,7 +115,7 @@ where
|
|||
);
|
||||
background_tasks.spawn(
|
||||
inbound_server::inbound_server(
|
||||
Arc::clone(&client_pool),
|
||||
new_connection_tx,
|
||||
inbound_handshaker,
|
||||
address_book.clone(),
|
||||
config,
|
||||
|
@ -120,7 +131,7 @@ where
|
|||
);
|
||||
|
||||
Ok(NetworkInterface {
|
||||
pool: client_pool,
|
||||
peer_set: Buffer::new(peer_set, 10).boxed_clone(),
|
||||
broadcast_svc,
|
||||
make_connection_tx,
|
||||
address_book: address_book.boxed_clone(),
|
||||
|
@ -132,7 +143,7 @@ where
|
|||
#[derive(Clone)]
|
||||
pub struct NetworkInterface<N: NetworkZone> {
|
||||
/// A pool of free connected peers.
|
||||
pool: Arc<client_pool::ClientPool<N>>,
|
||||
peer_set: BoxCloneService<PeerSetRequest, PeerSetResponse<N>, tower::BoxError>,
|
||||
/// A [`Service`] that allows broadcasting to all connected peers.
|
||||
broadcast_svc: BroadcastSvc<N>,
|
||||
/// A channel to request extra connections.
|
||||
|
@ -162,7 +173,7 @@ impl<N: NetworkZone> NetworkInterface<N> {
|
|||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
{
|
||||
block_downloader::download_blocks(Arc::clone(&self.pool), our_chain_service, config)
|
||||
block_downloader::download_blocks(self.peer_set.clone(), our_chain_service, config)
|
||||
}
|
||||
|
||||
/// Returns the address book service.
|
||||
|
@ -172,8 +183,10 @@ impl<N: NetworkZone> NetworkInterface<N> {
|
|||
self.address_book.clone()
|
||||
}
|
||||
|
||||
/// Borrows the `ClientPool`, for access to connected peers.
|
||||
pub const fn client_pool(&self) -> &Arc<client_pool::ClientPool<N>> {
|
||||
&self.pool
|
||||
/// Borrows the `PeerSet`, for access to connected peers.
|
||||
pub fn peer_set(
|
||||
&mut self,
|
||||
) -> &mut BoxCloneService<PeerSetRequest, PeerSetResponse<N>, tower::BoxError> {
|
||||
&mut self.peer_set
|
||||
}
|
||||
}
|
||||
|
|
217
p2p/p2p/src/peer_set.rs
Normal file
217
p2p/p2p/src/peer_set.rs
Normal file
|
@ -0,0 +1,217 @@
|
|||
use std::{
|
||||
future::{ready, Future, Ready},
|
||||
pin::{pin, Pin},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::{stream::FuturesUnordered, StreamExt};
|
||||
use indexmap::{IndexMap, IndexSet};
|
||||
use rand::{seq::index::sample, thread_rng};
|
||||
use tokio::sync::mpsc::Receiver;
|
||||
use tokio_util::sync::WaitForCancellationFutureOwned;
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
use cuprate_p2p_core::{
|
||||
client::{Client, InternalPeerID},
|
||||
ConnectionDirection, NetworkZone,
|
||||
};
|
||||
|
||||
mod client_wrappers;
|
||||
|
||||
pub use client_wrappers::ClientDropGuard;
|
||||
use client_wrappers::StoredClient;
|
||||
|
||||
/// A request to the peer-set.
|
||||
pub enum PeerSetRequest {
|
||||
/// The most claimed proof-of-work from a peer in the peer-set.
|
||||
MostPoWSeen,
|
||||
/// Peers with more cumulative difficulty than the given cumulative difficulty.
|
||||
///
|
||||
/// Returned peers will be remembered and won't be returned from subsequent calls until the guard is dropped.
|
||||
PeersWithMorePoW(u128),
|
||||
/// A random outbound peer.
|
||||
///
|
||||
/// The returned peer will be remembered and won't be returned from subsequent calls until the guard is dropped.
|
||||
StemPeer,
|
||||
}
|
||||
|
||||
/// A response from the peer-set.
|
||||
pub enum PeerSetResponse<N: NetworkZone> {
|
||||
/// [`PeerSetRequest::MostPoWSeen`]
|
||||
MostPoWSeen {
|
||||
/// The cumulative difficulty claimed.
|
||||
cumulative_difficulty: u128,
|
||||
/// The height claimed.
|
||||
height: usize,
|
||||
/// The claimed hash of the top block.
|
||||
top_hash: [u8; 32],
|
||||
},
|
||||
/// [`PeerSetRequest::PeersWithMorePoW`]
|
||||
///
|
||||
/// Returned peers will be remembered and won't be returned from subsequent calls until the guard is dropped.
|
||||
PeersWithMorePoW(Vec<ClientDropGuard<N>>),
|
||||
/// [`PeerSetRequest::StemPeer`]
|
||||
///
|
||||
/// The returned peer will be remembered and won't be returned from subsequent calls until the guard is dropped.
|
||||
StemPeer(Option<ClientDropGuard<N>>),
|
||||
}
|
||||
|
||||
/// A [`Future`] that completes when a peer disconnects.
|
||||
#[pin_project::pin_project]
|
||||
struct ClosedConnectionFuture<N: NetworkZone> {
|
||||
#[pin]
|
||||
fut: WaitForCancellationFutureOwned,
|
||||
id: Option<InternalPeerID<N::Addr>>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> Future for ClosedConnectionFuture<N> {
|
||||
type Output = InternalPeerID<N::Addr>;
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.project();
|
||||
|
||||
this.fut.poll(cx).map(|()| this.id.take().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
/// A collection of all connected peers on a [`NetworkZone`].
|
||||
pub(crate) struct PeerSet<N: NetworkZone> {
|
||||
/// The connected peers.
|
||||
peers: IndexMap<InternalPeerID<N::Addr>, StoredClient<N>>,
|
||||
/// A [`FuturesUnordered`] that resolves when a peer disconnects.
|
||||
closed_connections: FuturesUnordered<ClosedConnectionFuture<N>>,
|
||||
/// The [`InternalPeerID`]s of all outbound peers.
|
||||
outbound_peers: IndexSet<InternalPeerID<N::Addr>>,
|
||||
/// A channel of new peers from the inbound server or outbound connector.
|
||||
new_peers: Receiver<Client<N>>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> PeerSet<N> {
|
||||
pub(crate) fn new(new_peers: Receiver<Client<N>>) -> Self {
|
||||
Self {
|
||||
peers: IndexMap::new(),
|
||||
closed_connections: FuturesUnordered::new(),
|
||||
outbound_peers: IndexSet::new(),
|
||||
new_peers,
|
||||
}
|
||||
}
|
||||
|
||||
/// Polls the new peers channel for newly connected peers.
|
||||
fn poll_new_peers(&mut self, cx: &mut Context<'_>) {
|
||||
while let Poll::Ready(Some(new_peer)) = self.new_peers.poll_recv(cx) {
|
||||
if new_peer.info.direction == ConnectionDirection::Outbound {
|
||||
self.outbound_peers.insert(new_peer.info.id);
|
||||
}
|
||||
|
||||
self.closed_connections.push(ClosedConnectionFuture {
|
||||
fut: new_peer.info.handle.closed(),
|
||||
id: Some(new_peer.info.id),
|
||||
});
|
||||
|
||||
self.peers
|
||||
.insert(new_peer.info.id, StoredClient::new(new_peer));
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove disconnected peers from the peer set.
|
||||
fn remove_dead_peers(&mut self, cx: &mut Context<'_>) {
|
||||
while let Poll::Ready(Some(dead_peer)) = self.closed_connections.poll_next_unpin(cx) {
|
||||
let Some(peer) = self.peers.swap_remove(&dead_peer) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if peer.client.info.direction == ConnectionDirection::Outbound {
|
||||
self.outbound_peers.swap_remove(&peer.client.info.id);
|
||||
}
|
||||
|
||||
self.peers.swap_remove(&dead_peer);
|
||||
}
|
||||
}
|
||||
|
||||
/// [`PeerSetRequest::MostPoWSeen`]
|
||||
fn most_pow_seen(&self) -> PeerSetResponse<N> {
|
||||
let most_pow_chain = self
|
||||
.peers
|
||||
.values()
|
||||
.map(|peer| {
|
||||
let core_sync_data = peer.client.info.core_sync_data.lock().unwrap();
|
||||
|
||||
(
|
||||
core_sync_data.cumulative_difficulty(),
|
||||
u64_to_usize(core_sync_data.current_height),
|
||||
core_sync_data.top_id,
|
||||
)
|
||||
})
|
||||
.max_by_key(|(cumulative_difficulty, ..)| *cumulative_difficulty)
|
||||
.unwrap_or_default();
|
||||
|
||||
PeerSetResponse::MostPoWSeen {
|
||||
cumulative_difficulty: most_pow_chain.0,
|
||||
height: most_pow_chain.1,
|
||||
top_hash: most_pow_chain.2,
|
||||
}
|
||||
}
|
||||
|
||||
/// [`PeerSetRequest::PeersWithMorePoW`]
|
||||
fn peers_with_more_pow(&self, cumulative_difficulty: u128) -> PeerSetResponse<N> {
|
||||
PeerSetResponse::PeersWithMorePoW(
|
||||
self.peers
|
||||
.values()
|
||||
.filter(|&client| {
|
||||
!client.is_downloading_blocks()
|
||||
&& client
|
||||
.client
|
||||
.info
|
||||
.core_sync_data
|
||||
.lock()
|
||||
.unwrap()
|
||||
.cumulative_difficulty()
|
||||
> cumulative_difficulty
|
||||
})
|
||||
.map(StoredClient::downloading_blocks_guard)
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
/// [`PeerSetRequest::StemPeer`]
|
||||
fn random_peer_for_stem(&self) -> PeerSetResponse<N> {
|
||||
PeerSetResponse::StemPeer(
|
||||
sample(
|
||||
&mut thread_rng(),
|
||||
self.outbound_peers.len(),
|
||||
self.outbound_peers.len(),
|
||||
)
|
||||
.into_iter()
|
||||
.find_map(|i| {
|
||||
let peer = self.outbound_peers.get_index(i).unwrap();
|
||||
let client = self.peers.get(peer).unwrap();
|
||||
(!client.is_a_stem_peer()).then(|| client.stem_peer_guard())
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> Service<PeerSetRequest> for PeerSet<N> {
|
||||
type Response = PeerSetResponse<N>;
|
||||
type Error = tower::BoxError;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.poll_new_peers(cx);
|
||||
self.remove_dead_peers(cx);
|
||||
|
||||
// TODO: should we return `Pending` if we don't have any peers?
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: PeerSetRequest) -> Self::Future {
|
||||
ready(match req {
|
||||
PeerSetRequest::MostPoWSeen => Ok(self.most_pow_seen()),
|
||||
PeerSetRequest::PeersWithMorePoW(cumulative_difficulty) => {
|
||||
Ok(self.peers_with_more_pow(cumulative_difficulty))
|
||||
}
|
||||
PeerSetRequest::StemPeer => Ok(self.random_peer_for_stem()),
|
||||
})
|
||||
}
|
||||
}
|
86
p2p/p2p/src/peer_set/client_wrappers.rs
Normal file
86
p2p/p2p/src/peer_set/client_wrappers.rs
Normal file
|
@ -0,0 +1,86 @@
|
|||
use std::{
|
||||
ops::{Deref, DerefMut},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use cuprate_p2p_core::{
|
||||
client::{Client, WeakClient},
|
||||
NetworkZone,
|
||||
};
|
||||
|
||||
/// A client stored in the peer-set.
|
||||
pub(super) struct StoredClient<N: NetworkZone> {
|
||||
pub client: Client<N>,
|
||||
/// An [`AtomicBool`] for if the peer is currently downloading blocks.
|
||||
downloading_blocks: Arc<AtomicBool>,
|
||||
/// An [`AtomicBool`] for if the peer is currently being used to stem txs.
|
||||
stem_peer: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> StoredClient<N> {
|
||||
pub(super) fn new(client: Client<N>) -> Self {
|
||||
Self {
|
||||
client,
|
||||
downloading_blocks: Arc::new(AtomicBool::new(false)),
|
||||
stem_peer: Arc::new(AtomicBool::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns [`true`] if the [`StoredClient`] is currently downloading blocks.
|
||||
pub(super) fn is_downloading_blocks(&self) -> bool {
|
||||
self.downloading_blocks.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Returns [`true`] if the [`StoredClient`] is currently being used to stem txs.
|
||||
pub(super) fn is_a_stem_peer(&self) -> bool {
|
||||
self.stem_peer.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Returns a [`ClientDropGuard`] that while it is alive keeps the [`StoredClient`] in the downloading blocks state.
|
||||
pub(super) fn downloading_blocks_guard(&self) -> ClientDropGuard<N> {
|
||||
self.downloading_blocks.store(true, Ordering::Relaxed);
|
||||
|
||||
ClientDropGuard {
|
||||
client: self.client.downgrade(),
|
||||
bool: Arc::clone(&self.downloading_blocks),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [`ClientDropGuard`] that while it is alive keeps the [`StoredClient`] in the stemming peers state.
|
||||
pub(super) fn stem_peer_guard(&self) -> ClientDropGuard<N> {
|
||||
self.stem_peer.store(true, Ordering::Relaxed);
|
||||
|
||||
ClientDropGuard {
|
||||
client: self.client.downgrade(),
|
||||
bool: Arc::clone(&self.stem_peer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`Drop`] guard for a client returned from the peer-set.
|
||||
pub struct ClientDropGuard<N: NetworkZone> {
|
||||
client: WeakClient<N>,
|
||||
bool: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> Deref for ClientDropGuard<N> {
|
||||
type Target = WeakClient<N>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.client
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> DerefMut for ClientDropGuard<N> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.client
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: NetworkZone> Drop for ClientDropGuard<N> {
|
||||
fn drop(&mut self) {
|
||||
self.bool.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
|
@ -10,7 +10,7 @@ default = []
|
|||
borsh = ["dep:borsh"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-constants = { workspace = true }
|
||||
cuprate-constants = { workspace = true, features = ["block"] }
|
||||
|
||||
thiserror = { workspace = true }
|
||||
|
||||
|
|
|
@ -10,20 +10,20 @@ keywords = ["cuprate", "rpc", "interface"]
|
|||
|
||||
[features]
|
||||
default = ["dummy", "serde"]
|
||||
dummy = []
|
||||
dummy = ["dep:cuprate-helper", "dep:futures"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-epee-encoding = { workspace = true, default-features = false }
|
||||
cuprate-json-rpc = { workspace = true, default-features = false }
|
||||
cuprate-rpc-types = { workspace = true, features = ["serde", "epee"], default-features = false }
|
||||
cuprate-helper = { workspace = true, features = ["asynch"], default-features = false }
|
||||
cuprate-helper = { workspace = true, features = ["asynch"], default-features = false, optional = true }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
axum = { version = "0.7.5", features = ["json"], default-features = false }
|
||||
serde = { workspace = true, optional = true }
|
||||
tower = { workspace = true }
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
paste = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
futures = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-test-utils = { workspace = true }
|
||||
|
|
|
@ -10,16 +10,16 @@ keywords = ["cuprate", "rpc", "types", "monero"]
|
|||
|
||||
[features]
|
||||
default = ["serde", "epee"]
|
||||
serde = ["dep:serde", "cuprate-fixed-bytes/serde"]
|
||||
epee = ["dep:cuprate-epee-encoding"]
|
||||
serde = ["dep:serde", "cuprate-fixed-bytes/serde", "cuprate-types/serde"]
|
||||
epee = ["dep:cuprate-epee-encoding", "cuprate-types/epee"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-epee-encoding = { workspace = true, optional = true }
|
||||
cuprate-fixed-bytes = { workspace = true }
|
||||
cuprate-types = { workspace = true, default-features = false, features = ["epee", "serde"] }
|
||||
cuprate-types = { workspace = true, default-features = false }
|
||||
|
||||
paste = { workspace = true }
|
||||
serde = { workspace = true, optional = true }
|
||||
paste = { workspace = true }
|
||||
serde = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-test-utils = { workspace = true }
|
||||
|
|
|
@ -58,61 +58,37 @@ pub struct ResponseBase {
|
|||
}
|
||||
|
||||
impl ResponseBase {
|
||||
/// `const` version of [`Default::default`].
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_rpc_types::{misc::*, base::*};
|
||||
///
|
||||
/// let new = ResponseBase::new();
|
||||
/// assert_eq!(new, ResponseBase {
|
||||
/// status: Status::Ok,
|
||||
/// untrusted: false,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
status: Status::Ok,
|
||||
untrusted: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns OK and trusted [`Self`].
|
||||
/// [`Status::Ok`] and trusted [`Self`].
|
||||
///
|
||||
/// This is the most common version of [`Self`].
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_rpc_types::{misc::*, base::*};
|
||||
///
|
||||
/// let ok = ResponseBase::ok();
|
||||
/// assert_eq!(ok, ResponseBase {
|
||||
/// assert_eq!(ResponseBase::OK, ResponseBase {
|
||||
/// status: Status::Ok,
|
||||
/// untrusted: false,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn ok() -> Self {
|
||||
Self {
|
||||
status: Status::Ok,
|
||||
untrusted: false,
|
||||
}
|
||||
}
|
||||
pub const OK: Self = Self {
|
||||
status: Status::Ok,
|
||||
untrusted: false,
|
||||
};
|
||||
|
||||
/// Same as [`Self::ok`] but with [`Self::untrusted`] set to `true`.
|
||||
/// Same as [`Self::OK`] but with [`Self::untrusted`] set to `true`.
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_rpc_types::{misc::*, base::*};
|
||||
///
|
||||
/// let ok_untrusted = ResponseBase::ok_untrusted();
|
||||
/// assert_eq!(ok_untrusted, ResponseBase {
|
||||
/// assert_eq!(ResponseBase::OK_UNTRUSTED, ResponseBase {
|
||||
/// status: Status::Ok,
|
||||
/// untrusted: true,
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn ok_untrusted() -> Self {
|
||||
Self {
|
||||
status: Status::Ok,
|
||||
untrusted: true,
|
||||
}
|
||||
}
|
||||
pub const OK_UNTRUSTED: Self = Self {
|
||||
status: Status::Ok,
|
||||
untrusted: true,
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
|
@ -148,9 +124,9 @@ impl AccessResponseBase {
|
|||
/// ```rust
|
||||
/// use cuprate_rpc_types::{misc::*, base::*};
|
||||
///
|
||||
/// let new = AccessResponseBase::new(ResponseBase::ok());
|
||||
/// let new = AccessResponseBase::new(ResponseBase::OK);
|
||||
/// assert_eq!(new, AccessResponseBase {
|
||||
/// response_base: ResponseBase::ok(),
|
||||
/// response_base: ResponseBase::OK,
|
||||
/// credits: 0,
|
||||
/// top_hash: "".into(),
|
||||
/// });
|
||||
|
@ -163,47 +139,41 @@ impl AccessResponseBase {
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns OK and trusted [`Self`].
|
||||
/// [`Status::Ok`] and trusted [`Self`].
|
||||
///
|
||||
/// This is the most common version of [`Self`].
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_rpc_types::{misc::*, base::*};
|
||||
///
|
||||
/// let ok = AccessResponseBase::ok();
|
||||
/// assert_eq!(ok, AccessResponseBase {
|
||||
/// response_base: ResponseBase::ok(),
|
||||
/// assert_eq!(AccessResponseBase::OK, AccessResponseBase {
|
||||
/// response_base: ResponseBase::OK,
|
||||
/// credits: 0,
|
||||
/// top_hash: "".into(),
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn ok() -> Self {
|
||||
Self {
|
||||
response_base: ResponseBase::ok(),
|
||||
credits: 0,
|
||||
top_hash: String::new(),
|
||||
}
|
||||
}
|
||||
pub const OK: Self = Self {
|
||||
response_base: ResponseBase::OK,
|
||||
credits: 0,
|
||||
top_hash: String::new(),
|
||||
};
|
||||
|
||||
/// Same as [`Self::ok`] but with `untrusted` set to `true`.
|
||||
/// Same as [`Self::OK`] but with `untrusted` set to `true`.
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_rpc_types::{misc::*, base::*};
|
||||
///
|
||||
/// let ok_untrusted = AccessResponseBase::ok_untrusted();
|
||||
/// assert_eq!(ok_untrusted, AccessResponseBase {
|
||||
/// response_base: ResponseBase::ok_untrusted(),
|
||||
/// assert_eq!(AccessResponseBase::OK_UNTRUSTED, AccessResponseBase {
|
||||
/// response_base: ResponseBase::OK_UNTRUSTED,
|
||||
/// credits: 0,
|
||||
/// top_hash: "".into(),
|
||||
/// });
|
||||
/// ```
|
||||
pub const fn ok_untrusted() -> Self {
|
||||
Self {
|
||||
response_base: ResponseBase::ok_untrusted(),
|
||||
credits: 0,
|
||||
top_hash: String::new(),
|
||||
}
|
||||
}
|
||||
pub const OK_UNTRUSTED: Self = Self {
|
||||
response_base: ResponseBase::OK_UNTRUSTED,
|
||||
credits: 0,
|
||||
top_hash: String::new(),
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
|
|
|
@ -9,23 +9,20 @@ use cuprate_fixed_bytes::ByteArrayVec;
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
use cuprate_epee_encoding::{
|
||||
container_as_blob::ContainerAsBlob,
|
||||
epee_object, error,
|
||||
macros::bytes::{Buf, BufMut},
|
||||
read_epee_value, write_field, EpeeObject, EpeeObjectBuilder,
|
||||
};
|
||||
use cuprate_epee_encoding::container_as_blob::ContainerAsBlob;
|
||||
|
||||
use cuprate_types::BlockCompleteEntry;
|
||||
|
||||
use crate::{
|
||||
base::AccessResponseBase,
|
||||
defaults::{default_false, default_zero},
|
||||
macros::{define_request, define_request_and_response, define_request_and_response_doc},
|
||||
misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfoExtent, PoolTxInfo, Status},
|
||||
macros::define_request_and_response,
|
||||
misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfo},
|
||||
rpc_call::RpcCallValue,
|
||||
};
|
||||
|
||||
#[cfg(any(feature = "epee", feature = "serde"))]
|
||||
use crate::defaults::{default_false, default_zero};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Definitions
|
||||
define_request_and_response! {
|
||||
get_blocks_by_heightbin,
|
||||
|
@ -111,15 +108,14 @@ define_request_and_response! {
|
|||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- GetBlocks
|
||||
define_request! {
|
||||
#[doc = define_request_and_response_doc!(
|
||||
"response" => GetBlocksResponse,
|
||||
get_blocksbin,
|
||||
cc73fe71162d564ffda8e549b79a350bca53c454,
|
||||
core_rpc_server_commands_defs, h, 162, 262,
|
||||
)]
|
||||
GetBlocksRequest {
|
||||
define_request_and_response! {
|
||||
get_blocksbin,
|
||||
cc73fe71162d564ffda8e549b79a350bca53c454 =>
|
||||
core_rpc_server_commands_defs.h => 162..=262,
|
||||
|
||||
GetBlocks,
|
||||
|
||||
Request {
|
||||
requested_info: u8 = default_zero::<u8>(), "default_zero",
|
||||
// FIXME: This is a `std::list` in `monerod` because...?
|
||||
block_ids: ByteArrayVec<32>,
|
||||
|
@ -127,259 +123,17 @@ define_request! {
|
|||
prune: bool,
|
||||
no_miner_tx: bool = default_false(), "default_false",
|
||||
pool_info_since: u64 = default_zero::<u64>(), "default_zero",
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
#[doc = define_request_and_response_doc!(
|
||||
"request" => GetBlocksRequest,
|
||||
get_blocksbin,
|
||||
cc73fe71162d564ffda8e549b79a350bca53c454,
|
||||
core_rpc_server_commands_defs, h, 162, 262,
|
||||
)]
|
||||
///
|
||||
/// This response's variant depends upon [`PoolInfoExtent`].
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum GetBlocksResponse {
|
||||
/// Will always serialize a [`PoolInfoExtent::None`] field.
|
||||
PoolInfoNone(GetBlocksResponsePoolInfoNone),
|
||||
/// Will always serialize a [`PoolInfoExtent::Incremental`] field.
|
||||
PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental),
|
||||
/// Will always serialize a [`PoolInfoExtent::Full`] field.
|
||||
PoolInfoFull(GetBlocksResponsePoolInfoFull),
|
||||
}
|
||||
|
||||
impl Default for GetBlocksResponse {
|
||||
fn default() -> Self {
|
||||
Self::PoolInfoNone(GetBlocksResponsePoolInfoNone::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// Data within [`GetBlocksResponse::PoolInfoNone`].
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct GetBlocksResponsePoolInfoNone {
|
||||
pub status: Status,
|
||||
pub untrusted: bool,
|
||||
pub blocks: Vec<BlockCompleteEntry>,
|
||||
pub start_height: u64,
|
||||
pub current_height: u64,
|
||||
pub output_indices: Vec<BlockOutputIndices>,
|
||||
pub daemon_time: u64,
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
epee_object! {
|
||||
GetBlocksResponsePoolInfoNone,
|
||||
status: Status,
|
||||
untrusted: bool,
|
||||
blocks: Vec<BlockCompleteEntry>,
|
||||
start_height: u64,
|
||||
current_height: u64,
|
||||
output_indices: Vec<BlockOutputIndices>,
|
||||
daemon_time: u64,
|
||||
}
|
||||
|
||||
/// Data within [`GetBlocksResponse::PoolInfoIncremental`].
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct GetBlocksResponsePoolInfoIncremental {
|
||||
pub status: Status,
|
||||
pub untrusted: bool,
|
||||
pub blocks: Vec<BlockCompleteEntry>,
|
||||
pub start_height: u64,
|
||||
pub current_height: u64,
|
||||
pub output_indices: Vec<BlockOutputIndices>,
|
||||
pub daemon_time: u64,
|
||||
pub added_pool_txs: Vec<PoolTxInfo>,
|
||||
pub remaining_added_pool_txids: ByteArrayVec<32>,
|
||||
pub removed_pool_txids: ByteArrayVec<32>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
epee_object! {
|
||||
GetBlocksResponsePoolInfoIncremental,
|
||||
status: Status,
|
||||
untrusted: bool,
|
||||
blocks: Vec<BlockCompleteEntry>,
|
||||
start_height: u64,
|
||||
current_height: u64,
|
||||
output_indices: Vec<BlockOutputIndices>,
|
||||
daemon_time: u64,
|
||||
added_pool_txs: Vec<PoolTxInfo>,
|
||||
remaining_added_pool_txids: ByteArrayVec<32>,
|
||||
removed_pool_txids: ByteArrayVec<32>,
|
||||
}
|
||||
|
||||
/// Data within [`GetBlocksResponse::PoolInfoFull`].
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct GetBlocksResponsePoolInfoFull {
|
||||
pub status: Status,
|
||||
pub untrusted: bool,
|
||||
pub blocks: Vec<BlockCompleteEntry>,
|
||||
pub start_height: u64,
|
||||
pub current_height: u64,
|
||||
pub output_indices: Vec<BlockOutputIndices>,
|
||||
pub daemon_time: u64,
|
||||
pub added_pool_txs: Vec<PoolTxInfo>,
|
||||
pub remaining_added_pool_txids: ByteArrayVec<32>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
epee_object! {
|
||||
GetBlocksResponsePoolInfoFull,
|
||||
status: Status,
|
||||
untrusted: bool,
|
||||
blocks: Vec<BlockCompleteEntry>,
|
||||
start_height: u64,
|
||||
current_height: u64,
|
||||
output_indices: Vec<BlockOutputIndices>,
|
||||
daemon_time: u64,
|
||||
added_pool_txs: Vec<PoolTxInfo>,
|
||||
remaining_added_pool_txids: ByteArrayVec<32>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`].
|
||||
///
|
||||
/// Not for public usage.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct __GetBlocksResponseEpeeBuilder {
|
||||
pub status: Option<Status>,
|
||||
pub untrusted: Option<bool>,
|
||||
pub blocks: Option<Vec<BlockCompleteEntry>>,
|
||||
pub start_height: Option<u64>,
|
||||
pub current_height: Option<u64>,
|
||||
pub output_indices: Option<Vec<BlockOutputIndices>>,
|
||||
pub daemon_time: Option<u64>,
|
||||
pub pool_info_extent: Option<PoolInfoExtent>,
|
||||
pub added_pool_txs: Option<Vec<PoolTxInfo>>,
|
||||
pub remaining_added_pool_txids: Option<ByteArrayVec<32>>,
|
||||
pub removed_pool_txids: Option<ByteArrayVec<32>>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
impl EpeeObjectBuilder<GetBlocksResponse> for __GetBlocksResponseEpeeBuilder {
|
||||
fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> error::Result<bool> {
|
||||
macro_rules! read_epee_field {
|
||||
($($field:ident),*) => {
|
||||
match name {
|
||||
$(
|
||||
stringify!($field) => { self.$field = Some(read_epee_value(r)?); },
|
||||
)*
|
||||
_ => return Ok(false),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
read_epee_field! {
|
||||
status,
|
||||
untrusted,
|
||||
blocks,
|
||||
start_height,
|
||||
current_height,
|
||||
output_indices,
|
||||
daemon_time,
|
||||
pool_info_extent,
|
||||
added_pool_txs,
|
||||
remaining_added_pool_txids,
|
||||
removed_pool_txids
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn finish(self) -> error::Result<GetBlocksResponse> {
|
||||
const ELSE: error::Error = error::Error::Format("Required field was not found!");
|
||||
|
||||
let status = self.status.ok_or(ELSE)?;
|
||||
let untrusted = self.untrusted.ok_or(ELSE)?;
|
||||
let blocks = self.blocks.ok_or(ELSE)?;
|
||||
let start_height = self.start_height.ok_or(ELSE)?;
|
||||
let current_height = self.current_height.ok_or(ELSE)?;
|
||||
let output_indices = self.output_indices.ok_or(ELSE)?;
|
||||
let daemon_time = self.daemon_time.ok_or(ELSE)?;
|
||||
let pool_info_extent = self.pool_info_extent.ok_or(ELSE)?;
|
||||
|
||||
let this = match pool_info_extent {
|
||||
PoolInfoExtent::None => {
|
||||
GetBlocksResponse::PoolInfoNone(GetBlocksResponsePoolInfoNone {
|
||||
status,
|
||||
untrusted,
|
||||
blocks,
|
||||
start_height,
|
||||
current_height,
|
||||
output_indices,
|
||||
daemon_time,
|
||||
})
|
||||
}
|
||||
PoolInfoExtent::Incremental => {
|
||||
GetBlocksResponse::PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental {
|
||||
status,
|
||||
untrusted,
|
||||
blocks,
|
||||
start_height,
|
||||
current_height,
|
||||
output_indices,
|
||||
daemon_time,
|
||||
added_pool_txs: self.added_pool_txs.ok_or(ELSE)?,
|
||||
remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?,
|
||||
removed_pool_txids: self.removed_pool_txids.ok_or(ELSE)?,
|
||||
})
|
||||
}
|
||||
PoolInfoExtent::Full => {
|
||||
GetBlocksResponse::PoolInfoFull(GetBlocksResponsePoolInfoFull {
|
||||
status,
|
||||
untrusted,
|
||||
blocks,
|
||||
start_height,
|
||||
current_height,
|
||||
output_indices,
|
||||
daemon_time,
|
||||
added_pool_txs: self.added_pool_txs.ok_or(ELSE)?,
|
||||
remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?,
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
impl EpeeObject for GetBlocksResponse {
|
||||
type Builder = __GetBlocksResponseEpeeBuilder;
|
||||
|
||||
fn number_of_fields(&self) -> u64 {
|
||||
// [`PoolInfoExtent`] + inner struct fields.
|
||||
let inner_fields = match self {
|
||||
Self::PoolInfoNone(s) => s.number_of_fields(),
|
||||
Self::PoolInfoIncremental(s) => s.number_of_fields(),
|
||||
Self::PoolInfoFull(s) => s.number_of_fields(),
|
||||
};
|
||||
|
||||
1 + inner_fields
|
||||
}
|
||||
|
||||
fn write_fields<B: BufMut>(self, w: &mut B) -> error::Result<()> {
|
||||
match self {
|
||||
Self::PoolInfoNone(s) => {
|
||||
s.write_fields(w)?;
|
||||
write_field(PoolInfoExtent::None.to_u8(), "pool_info_extent", w)?;
|
||||
}
|
||||
Self::PoolInfoIncremental(s) => {
|
||||
s.write_fields(w)?;
|
||||
write_field(PoolInfoExtent::Incremental.to_u8(), "pool_info_extent", w)?;
|
||||
}
|
||||
Self::PoolInfoFull(s) => {
|
||||
s.write_fields(w)?;
|
||||
write_field(PoolInfoExtent::Full.to_u8(), "pool_info_extent", w)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
// TODO: add `top_block_hash` field
|
||||
// <https://github.com/monero-project/monero/blame/893916ad091a92e765ce3241b94e706ad012b62a/src/rpc/core_rpc_server_commands_defs.h#L263>
|
||||
AccessResponseBase {
|
||||
blocks: Vec<BlockCompleteEntry>,
|
||||
start_height: u64,
|
||||
current_height: u64,
|
||||
output_indices: Vec<BlockOutputIndices>,
|
||||
daemon_time: u64,
|
||||
pool_info: PoolInfo,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,10 +8,6 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
use crate::{
|
||||
base::{AccessResponseBase, ResponseBase},
|
||||
defaults::{
|
||||
default_false, default_height, default_one, default_string, default_true, default_vec,
|
||||
default_zero,
|
||||
},
|
||||
macros::define_request_and_response,
|
||||
misc::{
|
||||
AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan,
|
||||
|
@ -21,6 +17,12 @@ use crate::{
|
|||
rpc_call::RpcCallValue,
|
||||
};
|
||||
|
||||
#[cfg(any(feature = "epee", feature = "serde"))]
|
||||
use crate::defaults::{
|
||||
default_false, default_height, default_one, default_string, default_true, default_vec,
|
||||
default_zero,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Macro
|
||||
/// Adds a (de)serialization doc-test to a type in `json.rs`.
|
||||
///
|
||||
|
@ -184,7 +186,7 @@ define_request_and_response! {
|
|||
// <https://serde.rs/field-attrs.html#flatten>.
|
||||
#[doc = serde_doc_test!(
|
||||
GET_BLOCK_TEMPLATE_RESPONSE => GetBlockTemplateResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
blockhashing_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a00000000e0c20372be23d356347091025c5b5e8f2abf83ab618378565cce2b703491523401".into(),
|
||||
blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(),
|
||||
difficulty_top64: 0,
|
||||
|
@ -240,7 +242,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_BLOCK_COUNT_RESPONSE => GetBlockCountResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
count: 3195019,
|
||||
}
|
||||
)]
|
||||
|
@ -332,7 +334,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GENERATE_BLOCKS_RESPONSE => GenerateBlocksResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
blocks: vec!["49b712db7760e3728586f8434ee8bc8d7b3d410dac6bb6e98bf5845c83b917e4".into()],
|
||||
height: 9783,
|
||||
}
|
||||
|
@ -357,7 +359,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_LAST_BLOCK_HEADER_RESPONSE => GetLastBlockHeaderResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
block_header: BlockHeader {
|
||||
block_size: 200419,
|
||||
block_weight: 200419,
|
||||
|
@ -409,7 +411,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_BLOCK_HEADER_BY_HASH_RESPONSE => GetBlockHeaderByHashResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
block_headers: vec![],
|
||||
block_header: BlockHeader {
|
||||
block_size: 210,
|
||||
|
@ -464,7 +466,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_BLOCK_HEADER_BY_HEIGHT_RESPONSE => GetBlockHeaderByHeightResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
block_header: BlockHeader {
|
||||
block_size: 210,
|
||||
block_weight: 210,
|
||||
|
@ -519,7 +521,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_BLOCK_HEADERS_RANGE_RESPONSE => GetBlockHeadersRangeResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
headers: vec![
|
||||
BlockHeader {
|
||||
block_size: 301413,
|
||||
|
@ -601,7 +603,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_BLOCK_RESPONSE => GetBlockResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
blob: "1010c58bab9b06b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7807e07f502cef8a70101ff92f8a7010180e0a596bb1103d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85d034019f629d8b36bd16a2bfce3ea80c31dc4d8762c67165aec21845494e32b7582fe00211000000297a787a000000000000000000000000".into(),
|
||||
block_header: BlockHeader {
|
||||
block_size: 106,
|
||||
|
@ -654,11 +656,11 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_CONNECTIONS_RESPONSE => GetConnectionsResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
connections: vec![
|
||||
ConnectionInfo {
|
||||
address: "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion:18083".into(),
|
||||
address_type: 4,
|
||||
address_type: cuprate_types::AddressType::Tor,
|
||||
avg_download: 0,
|
||||
avg_upload: 0,
|
||||
connection_id: "22ef856d0f1d44cc95e84fecfd065fe2".into(),
|
||||
|
@ -680,12 +682,12 @@ define_request_and_response! {
|
|||
rpc_port: 0,
|
||||
send_count: 3406572,
|
||||
send_idle_time: 30,
|
||||
state: "normal".into(),
|
||||
state: cuprate_types::ConnectionState::Normal,
|
||||
support_flags: 0
|
||||
},
|
||||
ConnectionInfo {
|
||||
address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(),
|
||||
address_type: 4,
|
||||
address_type: cuprate_types::AddressType::Tor,
|
||||
avg_download: 0,
|
||||
avg_upload: 0,
|
||||
connection_id: "c7734e15936f485a86d2b0534f87e499".into(),
|
||||
|
@ -707,7 +709,7 @@ define_request_and_response! {
|
|||
rpc_port: 0,
|
||||
send_count: 3370566,
|
||||
send_idle_time: 120,
|
||||
state: "normal".into(),
|
||||
state: cuprate_types::ConnectionState::Normal,
|
||||
support_flags: 0
|
||||
}
|
||||
],
|
||||
|
@ -728,7 +730,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_INFO_RESPONSE => GetInfoResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
adjusted_time: 1721245289,
|
||||
alt_blocks_count: 16,
|
||||
block_size_limit: 600000,
|
||||
|
@ -831,7 +833,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
HARD_FORK_INFO_RESPONSE => HardForkInfoResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
earliest_height: 2689608,
|
||||
enabled: true,
|
||||
state: 0,
|
||||
|
@ -877,7 +879,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
SET_BANS_RESPONSE => SetBansResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
}
|
||||
)]
|
||||
ResponseBase {}
|
||||
|
@ -892,7 +894,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_BANS_RESPONSE => GetBansResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
bans: vec![
|
||||
GetBan {
|
||||
host: "104.248.206.131".into(),
|
||||
|
@ -994,7 +996,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_OUTPUT_HISTOGRAM_RESPONSE => GetOutputHistogramResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
histogram: vec![HistogramEntry {
|
||||
amount: 20000000000,
|
||||
recent_instances: 0,
|
||||
|
@ -1028,7 +1030,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_COINBASE_TX_SUM_RESPONSE => GetCoinbaseTxSumResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
emission_amount: 9387854817320,
|
||||
emission_amount_top64: 0,
|
||||
fee_amount: 83981380000,
|
||||
|
@ -1057,7 +1059,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_VERSION_RESPONSE => GetVersionResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
current_height: 3195051,
|
||||
hard_forks: vec![
|
||||
HardforkEntry {
|
||||
|
@ -1143,12 +1145,16 @@ define_request_and_response! {
|
|||
get_fee_estimate,
|
||||
cc73fe71162d564ffda8e549b79a350bca53c454 =>
|
||||
core_rpc_server_commands_defs.h => 2250..=2277,
|
||||
GetFeeEstimate (empty),
|
||||
Request {},
|
||||
|
||||
GetFeeEstimate,
|
||||
|
||||
Request {
|
||||
grace_blocks: u64 = default_zero::<u64>(), "default_zero",
|
||||
},
|
||||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_FEE_ESTIMATE_RESPONSE => GetFeeEstimateResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
fee: 20000,
|
||||
fees: vec![20000,80000,320000,4000000],
|
||||
quantization_mask: 10000,
|
||||
|
@ -1170,7 +1176,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_ALTERNATE_CHAINS_RESPONSE => GetAlternateChainsResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
chains: vec![
|
||||
ChainInfo {
|
||||
block_hash: "4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce".into(),
|
||||
|
@ -1238,7 +1244,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
SYNC_INFO_RESPONSE => SyncInfoResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
height: 3195157,
|
||||
next_needed_pruning_seed: 0,
|
||||
overview: "[]".into(),
|
||||
|
@ -1247,7 +1253,7 @@ define_request_and_response! {
|
|||
SyncInfoPeer {
|
||||
info: ConnectionInfo {
|
||||
address: "142.93.128.65:44986".into(),
|
||||
address_type: 1,
|
||||
address_type: cuprate_types::AddressType::Ipv4,
|
||||
avg_download: 1,
|
||||
avg_upload: 1,
|
||||
connection_id: "a5803c4c2dac49e7b201dccdef54c862".into(),
|
||||
|
@ -1269,14 +1275,14 @@ define_request_and_response! {
|
|||
rpc_port: 18089,
|
||||
send_count: 32235,
|
||||
send_idle_time: 6,
|
||||
state: "normal".into(),
|
||||
state: cuprate_types::ConnectionState::Normal,
|
||||
support_flags: 1
|
||||
}
|
||||
},
|
||||
SyncInfoPeer {
|
||||
info: ConnectionInfo {
|
||||
address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(),
|
||||
address_type: 4,
|
||||
address_type: cuprate_types::AddressType::Tor,
|
||||
avg_download: 0,
|
||||
avg_upload: 0,
|
||||
connection_id: "277f7c821bc546878c8bd29977e780f5".into(),
|
||||
|
@ -1298,7 +1304,7 @@ define_request_and_response! {
|
|||
rpc_port: 0,
|
||||
send_count: 99120,
|
||||
send_idle_time: 15,
|
||||
state: "normal".into(),
|
||||
state: cuprate_types::ConnectionState::Normal,
|
||||
support_flags: 0
|
||||
}
|
||||
}
|
||||
|
@ -1328,7 +1334,7 @@ define_request_and_response! {
|
|||
// TODO: enable test after binary string impl.
|
||||
// #[doc = serde_doc_test!(
|
||||
// GET_TRANSACTION_POOL_BACKLOG_RESPONSE => GetTransactionPoolBacklogResponse {
|
||||
// base: ResponseBase::ok(),
|
||||
// base: ResponseBase::OK,
|
||||
// backlog: "...Binary...".into(),
|
||||
// }
|
||||
// )]
|
||||
|
@ -1370,7 +1376,7 @@ define_request_and_response! {
|
|||
// TODO: enable test after binary string impl.
|
||||
// #[doc = serde_doc_test!(
|
||||
// GET_OUTPUT_DISTRIBUTION_RESPONSE => GetOutputDistributionResponse {
|
||||
// base: AccessResponseBase::ok(),
|
||||
// base: AccessResponseBase::OK,
|
||||
// distributions: vec![Distribution::Uncompressed(DistributionUncompressed {
|
||||
// start_height: 1462078,
|
||||
// base: 0,
|
||||
|
@ -1394,7 +1400,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_MINER_DATA_RESPONSE => GetMinerDataResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
already_generated_coins: 18186022843595960691,
|
||||
difficulty: "0x48afae42de".into(),
|
||||
height: 2731375,
|
||||
|
@ -1447,7 +1453,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
PRUNE_BLOCKCHAIN_RESPONSE => PruneBlockchainResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
pruned: true,
|
||||
pruning_seed: 387,
|
||||
}
|
||||
|
@ -1513,7 +1519,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
FLUSH_CACHE_RESPONSE => FlushCacheResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
}
|
||||
)]
|
||||
ResponseBase {}
|
||||
|
@ -1542,7 +1548,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
ADD_AUX_POW_RESPONSE => AddAuxPowResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
aux_pow: vec![AuxPow {
|
||||
hash: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into(),
|
||||
id: "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8".into(),
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
)]
|
||||
|
||||
mod constants;
|
||||
#[cfg(any(feature = "serde", feature = "epee"))]
|
||||
mod defaults;
|
||||
mod free;
|
||||
mod macros;
|
||||
|
|
|
@ -20,8 +20,8 @@ use cuprate_epee_encoding::{
|
|||
"rpc/core_rpc_server_commands_defs.h",
|
||||
45..=55
|
||||
)]
|
||||
#[cfg(feature = "epee")]
|
||||
fn compress_integer_array(_: &[u64]) -> error::Result<Vec<u8>> {
|
||||
#[cfg(any(feature = "epee", feature = "serde"))]
|
||||
fn compress_integer_array(_: &[u64]) -> Vec<u8> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@ fn compress_integer_array(_: &[u64]) -> error::Result<Vec<u8>> {
|
|||
"rpc/core_rpc_server_commands_defs.h",
|
||||
57..=72
|
||||
)]
|
||||
#[cfg(any(feature = "epee", feature = "serde"))]
|
||||
fn decompress_integer_array(_: &[u8]) -> Vec<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
@ -135,12 +136,7 @@ fn serialize_distribution_as_compressed_data<S>(v: &Vec<u64>, s: S) -> Result<S:
|
|||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match compress_integer_array(v) {
|
||||
Ok(compressed_data) => compressed_data.serialize(s),
|
||||
Err(_) => Err(serde::ser::Error::custom(
|
||||
"error compressing distribution array",
|
||||
)),
|
||||
}
|
||||
compress_integer_array(v).serialize(s)
|
||||
}
|
||||
|
||||
/// Deserializer function for [`DistributionCompressedBinary::distribution`].
|
||||
|
@ -256,7 +252,7 @@ impl EpeeObject for Distribution {
|
|||
distribution,
|
||||
amount,
|
||||
}) => {
|
||||
let compressed_data = compress_integer_array(&distribution)?;
|
||||
let compressed_data = compress_integer_array(&distribution);
|
||||
|
||||
start_height.write(w)?;
|
||||
base.write(w)?;
|
||||
|
|
|
@ -11,10 +11,10 @@ use serde::{Deserialize, Serialize};
|
|||
#[cfg(feature = "epee")]
|
||||
use cuprate_epee_encoding::epee_object;
|
||||
|
||||
use crate::{
|
||||
defaults::{default_string, default_zero},
|
||||
macros::monero_definition_link,
|
||||
};
|
||||
#[cfg(any(feature = "epee", feature = "serde"))]
|
||||
use crate::defaults::default_zero;
|
||||
|
||||
use crate::macros::monero_definition_link;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Macros
|
||||
/// This macro (local to this file) defines all the misc types.
|
||||
|
@ -110,7 +110,7 @@ define_struct_and_impl_epee! {
|
|||
/// Used in [`crate::json::GetConnectionsResponse`].
|
||||
ConnectionInfo {
|
||||
address: String,
|
||||
address_type: u8,
|
||||
address_type: cuprate_types::AddressType,
|
||||
avg_download: u64,
|
||||
avg_upload: u64,
|
||||
connection_id: String,
|
||||
|
@ -135,7 +135,7 @@ define_struct_and_impl_epee! {
|
|||
// Exists in the original definition, but isn't
|
||||
// used or (de)serialized for RPC purposes.
|
||||
// ssl: bool,
|
||||
state: String,
|
||||
state: cuprate_types::ConnectionState,
|
||||
support_flags: u32,
|
||||
}
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ define_struct_and_impl_epee! {
|
|||
)]
|
||||
/// Used in [`crate::json::SetBansRequest`].
|
||||
SetBan {
|
||||
#[cfg_attr(feature = "serde", serde(default = "default_string"))]
|
||||
#[cfg_attr(feature = "serde", serde(default = "crate::defaults::default_string"))]
|
||||
host: String,
|
||||
#[cfg_attr(feature = "serde", serde(default = "default_zero"))]
|
||||
ip: u32,
|
||||
|
|
|
@ -17,6 +17,7 @@ mod distribution;
|
|||
mod key_image_spent_status;
|
||||
#[expect(clippy::module_inception)]
|
||||
mod misc;
|
||||
mod pool_info;
|
||||
mod pool_info_extent;
|
||||
mod status;
|
||||
mod tx_entry;
|
||||
|
@ -30,6 +31,7 @@ pub use misc::{
|
|||
OutputDistributionData, Peer, PoolTxInfo, PublicNode, SetBan, Span, SpentKeyImageInfo,
|
||||
SyncInfoPeer, TxBacklogEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats,
|
||||
};
|
||||
pub use pool_info::PoolInfo;
|
||||
pub use pool_info_extent::PoolInfoExtent;
|
||||
pub use status::Status;
|
||||
pub use tx_entry::TxEntry;
|
||||
|
|
171
rpc/types/src/misc/pool_info.rs
Normal file
171
rpc/types/src/misc/pool_info.rs
Normal file
|
@ -0,0 +1,171 @@
|
|||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
use crate::misc::PoolInfoExtent;
|
||||
#[cfg(feature = "epee")]
|
||||
use cuprate_epee_encoding::{
|
||||
epee_object, error,
|
||||
macros::bytes::{Buf, BufMut},
|
||||
read_epee_value, write_field, EpeeObject, EpeeObjectBuilder,
|
||||
};
|
||||
|
||||
use cuprate_fixed_bytes::ByteArrayVec;
|
||||
|
||||
use crate::misc::PoolTxInfo;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- PoolInfo
|
||||
#[doc = crate::macros::monero_definition_link!(
|
||||
cc73fe71162d564ffda8e549b79a350bca53c454,
|
||||
"rpc/core_rpc_server_commands_defs.h",
|
||||
223..=228
|
||||
)]
|
||||
/// Used in [`crate::bin::GetBlocksResponse`].
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[repr(u8)]
|
||||
pub enum PoolInfo {
|
||||
#[default]
|
||||
None,
|
||||
Incremental(PoolInfoIncremental),
|
||||
Full(PoolInfoFull),
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Internal data
|
||||
/// Data within [`PoolInfo::Incremental`].
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct PoolInfoIncremental {
|
||||
pub added_pool_txs: Vec<PoolTxInfo>,
|
||||
pub remaining_added_pool_txids: ByteArrayVec<32>,
|
||||
pub removed_pool_txids: ByteArrayVec<32>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
epee_object! {
|
||||
PoolInfoIncremental,
|
||||
added_pool_txs: Vec<PoolTxInfo>,
|
||||
remaining_added_pool_txids: ByteArrayVec<32>,
|
||||
removed_pool_txids: ByteArrayVec<32>,
|
||||
}
|
||||
|
||||
/// Data within [`PoolInfo::Full`].
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct PoolInfoFull {
|
||||
pub added_pool_txs: Vec<PoolTxInfo>,
|
||||
pub remaining_added_pool_txids: ByteArrayVec<32>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
epee_object! {
|
||||
PoolInfoFull,
|
||||
added_pool_txs: Vec<PoolTxInfo>,
|
||||
remaining_added_pool_txids: ByteArrayVec<32>,
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- PoolInfo epee impl
|
||||
#[cfg(feature = "epee")]
|
||||
/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`].
|
||||
///
|
||||
/// Not for public usage.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct __PoolInfoEpeeBuilder {
|
||||
/// This is a distinct field in `monerod`,
|
||||
/// which as represented in this library with [`PoolInfo`]'s `u8` tag.
|
||||
pub pool_info_extent: Option<PoolInfoExtent>,
|
||||
|
||||
pub added_pool_txs: Option<Vec<PoolTxInfo>>,
|
||||
pub remaining_added_pool_txids: Option<ByteArrayVec<32>>,
|
||||
pub removed_pool_txids: Option<ByteArrayVec<32>>,
|
||||
}
|
||||
|
||||
// Custom epee implementation.
|
||||
//
|
||||
// HACK/INVARIANT:
|
||||
// If any data within [`PoolInfo`] changes, the below code should be changed as well.
|
||||
#[cfg(feature = "epee")]
|
||||
impl EpeeObjectBuilder<PoolInfo> for __PoolInfoEpeeBuilder {
|
||||
fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> error::Result<bool> {
|
||||
macro_rules! read_epee_field {
|
||||
($($field:ident),*) => {
|
||||
match name {
|
||||
$(
|
||||
stringify!($field) => { self.$field = Some(read_epee_value(r)?); },
|
||||
)*
|
||||
_ => return Ok(false),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
read_epee_field! {
|
||||
pool_info_extent,
|
||||
added_pool_txs,
|
||||
remaining_added_pool_txids,
|
||||
removed_pool_txids
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn finish(self) -> error::Result<PoolInfo> {
|
||||
// INVARIANT:
|
||||
// `monerod` omits serializing the field itself when a container is empty,
|
||||
// `unwrap_or_default()` is used over `error()` in these cases.
|
||||
// Some of the uses are when values have default fallbacks: `pool_info_extent`.
|
||||
|
||||
let pool_info_extent = self.pool_info_extent.unwrap_or_default();
|
||||
let this = match pool_info_extent {
|
||||
PoolInfoExtent::None => PoolInfo::None,
|
||||
PoolInfoExtent::Incremental => PoolInfo::Incremental(PoolInfoIncremental {
|
||||
added_pool_txs: self.added_pool_txs.unwrap_or_default(),
|
||||
remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(),
|
||||
removed_pool_txids: self.removed_pool_txids.unwrap_or_default(),
|
||||
}),
|
||||
PoolInfoExtent::Full => PoolInfo::Full(PoolInfoFull {
|
||||
added_pool_txs: self.added_pool_txs.unwrap_or_default(),
|
||||
remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(),
|
||||
}),
|
||||
};
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
impl EpeeObject for PoolInfo {
|
||||
type Builder = __PoolInfoEpeeBuilder;
|
||||
|
||||
fn number_of_fields(&self) -> u64 {
|
||||
// Inner struct fields.
|
||||
let inner_fields = match self {
|
||||
Self::None => 0,
|
||||
Self::Incremental(s) => s.number_of_fields(),
|
||||
Self::Full(s) => s.number_of_fields(),
|
||||
};
|
||||
|
||||
// [`PoolInfoExtent`] + inner struct fields
|
||||
1 + inner_fields
|
||||
}
|
||||
|
||||
fn write_fields<B: BufMut>(self, w: &mut B) -> error::Result<()> {
|
||||
const FIELD: &str = "pool_info_extent";
|
||||
|
||||
match self {
|
||||
Self::None => {
|
||||
write_field(PoolInfoExtent::None.to_u8(), FIELD, w)?;
|
||||
}
|
||||
Self::Incremental(s) => {
|
||||
s.write_fields(w)?;
|
||||
write_field(PoolInfoExtent::Incremental.to_u8(), FIELD, w)?;
|
||||
}
|
||||
Self::Full(s) => {
|
||||
s.write_fields(w)?;
|
||||
write_field(PoolInfoExtent::Full.to_u8(), FIELD, w)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
#[cfg(feature = "serde")]
|
||||
use crate::serde::{serde_false, serde_true};
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
|
@ -13,6 +11,9 @@ use cuprate_epee_encoding::{
|
|||
EpeeObject, EpeeObjectBuilder,
|
||||
};
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use crate::serde::{serde_false, serde_true};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TxEntry
|
||||
#[doc = crate::macros::monero_definition_link!(
|
||||
cc73fe71162d564ffda8e549b79a350bca53c454,
|
||||
|
|
|
@ -8,7 +8,6 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
use crate::{
|
||||
base::{AccessResponseBase, ResponseBase},
|
||||
defaults::{default_false, default_string, default_true, default_vec, default_zero},
|
||||
macros::define_request_and_response,
|
||||
misc::{
|
||||
GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo,
|
||||
|
@ -17,6 +16,9 @@ use crate::{
|
|||
RpcCallValue,
|
||||
};
|
||||
|
||||
#[cfg(any(feature = "serde", feature = "epee"))]
|
||||
use crate::defaults::{default_false, default_string, default_true, default_vec, default_zero};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Macro
|
||||
/// Adds a (de)serialization doc-test to a type in `other.rs`.
|
||||
///
|
||||
|
@ -102,7 +104,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_HEIGHT_RESPONSE => GetHeightResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
hash: "68bb1a1cff8e2a44c3221e8e1aff80bc6ca45d06fa8eff4d2a3a7ac31d4efe3f".into(),
|
||||
height: 3195160,
|
||||
}
|
||||
|
@ -157,7 +159,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_ALT_BLOCKS_HASHES_RESPONSE => GetAltBlocksHashesResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
blks_hashes: vec!["8ee10db35b1baf943f201b303890a29e7d45437bd76c2bd4df0d2f2ee34be109".into()],
|
||||
}
|
||||
)]
|
||||
|
@ -187,7 +189,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
IS_KEY_IMAGE_SPENT_RESPONSE => IsKeyImageSpentResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
spent_status: vec![1, 1],
|
||||
}
|
||||
)]
|
||||
|
@ -283,7 +285,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
START_MINING_RESPONSE => StartMiningResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
}
|
||||
)]
|
||||
ResponseBase {}
|
||||
|
@ -298,7 +300,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
STOP_MINING_RESPONSE => StopMiningResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
}
|
||||
)]
|
||||
ResponseBase {}
|
||||
|
@ -313,7 +315,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
MINING_STATUS_RESPONSE => MiningStatusResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
active: false,
|
||||
address: "".into(),
|
||||
bg_idle_threshold: 0,
|
||||
|
@ -359,7 +361,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
SAVE_BC_RESPONSE => SaveBcResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
}
|
||||
)]
|
||||
ResponseBase {}
|
||||
|
@ -385,7 +387,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_PEER_LIST_RESPONSE => GetPeerListResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
gray_list: vec![
|
||||
Peer {
|
||||
host: "161.97.193.0".into(),
|
||||
|
@ -467,7 +469,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
SET_LOG_HASH_RATE_RESPONSE => SetLogHashRateResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
}
|
||||
)]
|
||||
ResponseBase {}
|
||||
|
@ -492,7 +494,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
SET_LOG_LEVEL_RESPONSE => SetLogLevelResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
}
|
||||
)]
|
||||
ResponseBase {}
|
||||
|
@ -516,7 +518,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
SET_LOG_CATEGORIES_RESPONSE => SetLogCategoriesResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
categories: "*:INFO".into(),
|
||||
}
|
||||
)]
|
||||
|
@ -582,7 +584,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_TRANSACTION_POOL_STATS_RESPONSE => GetTransactionPoolStatsResponse {
|
||||
base: AccessResponseBase::ok(),
|
||||
base: AccessResponseBase::OK,
|
||||
pool_stats: TxpoolStats {
|
||||
bytes_max: 11843,
|
||||
bytes_med: 2219,
|
||||
|
@ -644,7 +646,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_LIMIT_RESPONSE => GetLimitResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
limit_down: 1280000,
|
||||
limit_up: 1280000,
|
||||
}
|
||||
|
@ -676,7 +678,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
SET_LIMIT_RESPONSE => SetLimitResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
limit_down: 1024,
|
||||
limit_up: 128,
|
||||
}
|
||||
|
@ -707,7 +709,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
OUT_PEERS_RESPONSE => OutPeersResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
out_peers: 3232235535,
|
||||
}
|
||||
)]
|
||||
|
@ -740,7 +742,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_NET_STATS_RESPONSE => GetNetStatsResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
start_time: 1721251858,
|
||||
total_bytes_in: 16283817214,
|
||||
total_bytes_out: 34225244079,
|
||||
|
@ -779,7 +781,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_OUTS_RESPONSE => GetOutsResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
outs: vec![
|
||||
OutKey {
|
||||
height: 51941,
|
||||
|
@ -823,7 +825,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
UPDATE_RESPONSE => UpdateResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
auto_uri: "".into(),
|
||||
hash: "".into(),
|
||||
path: "".into(),
|
||||
|
@ -860,7 +862,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
POP_BLOCKS_RESPONSE => PopBlocksResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
height: 76482,
|
||||
}
|
||||
)]
|
||||
|
@ -879,7 +881,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_TRANSACTION_POOL_HASHES_RESPONSE => GetTransactionPoolHashesResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
tx_hashes: vec![
|
||||
"aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03".into(),
|
||||
"794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11".into(),
|
||||
|
@ -929,7 +931,7 @@ define_request_and_response! {
|
|||
|
||||
#[doc = serde_doc_test!(
|
||||
GET_PUBLIC_NODES_RESPONSE => GetPublicNodesResponse {
|
||||
base: ResponseBase::ok(),
|
||||
base: ResponseBase::OK,
|
||||
gray: vec![],
|
||||
white: vec![
|
||||
PublicNode {
|
||||
|
|
|
@ -9,32 +9,31 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/cuprate-bloc
|
|||
keywords = ["cuprate", "blockchain", "database"]
|
||||
|
||||
[features]
|
||||
default = ["heed", "service"]
|
||||
default = ["heed"]
|
||||
# default = ["redb", "service"]
|
||||
# default = ["redb-memory", "service"]
|
||||
heed = ["cuprate-database/heed"]
|
||||
redb = ["cuprate-database/redb"]
|
||||
redb-memory = ["cuprate-database/redb-memory"]
|
||||
service = ["dep:thread_local", "dep:rayon", "cuprate-helper/thread"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-database = { workspace = true }
|
||||
cuprate-database-service = { workspace = true }
|
||||
cuprate-helper = { workspace = true, features = ["fs", "map", "crypto"] }
|
||||
cuprate-helper = { workspace = true, features = ["fs", "map", "crypto", "tx", "thread"] }
|
||||
cuprate-types = { workspace = true, features = ["blockchain"] }
|
||||
cuprate-pruning = { workspace = true }
|
||||
|
||||
bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] }
|
||||
bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] }
|
||||
curve25519-dalek = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
rand = { workspace = true, features = ["std", "std_rng"] }
|
||||
monero-serai = { workspace = true, features = ["std"] }
|
||||
serde = { workspace = true, optional = true }
|
||||
|
||||
# `service` feature.
|
||||
tower = { workspace = true }
|
||||
thread_local = { workspace = true, optional = true }
|
||||
rayon = { workspace = true, optional = true }
|
||||
thread_local = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-constants = { workspace = true }
|
||||
|
|
|
@ -32,9 +32,6 @@ use cuprate_blockchain::{
|
|||
This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_blockchain` internally.
|
||||
|
||||
# Feature flags
|
||||
The `service` module requires the `service` feature to be enabled.
|
||||
See the module for more documentation.
|
||||
|
||||
Different database backends are enabled by the feature flags:
|
||||
- `heed` (LMDB)
|
||||
- `redb`
|
||||
|
@ -45,7 +42,7 @@ The default is `heed`.
|
|||
<!-- FIXME: tracing should be behind a feature flag -->
|
||||
|
||||
# Invariants when not using `service`
|
||||
`cuprate_blockchain` can be used without the `service` feature enabled but
|
||||
`cuprate_blockchain` can be used without the `service` module but
|
||||
there are some things that must be kept in mind when doing so.
|
||||
|
||||
Failing to uphold these invariants may cause panics.
|
||||
|
|
|
@ -29,16 +29,12 @@ pub use free::open;
|
|||
|
||||
pub mod config;
|
||||
pub mod ops;
|
||||
pub mod service;
|
||||
pub mod tables;
|
||||
pub mod types;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Feature-gated
|
||||
#[cfg(feature = "service")]
|
||||
pub mod service;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Private
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests;
|
||||
|
||||
#[cfg(feature = "service")] // only needed in `service` for now
|
||||
pub(crate) mod unsafe_sendable;
|
||||
|
|
|
@ -42,7 +42,7 @@ use crate::{
|
|||
/// # Panics
|
||||
/// This function will panic if:
|
||||
/// - `block.height > u32::MAX` (not normally possible)
|
||||
/// - `block.height` is not != [`chain_height`]
|
||||
/// - `block.height` is != [`chain_height`]
|
||||
// no inline, too big.
|
||||
pub fn add_block(
|
||||
block: &VerifiedBlockInformation,
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
//!
|
||||
//! The system is managed by this crate, and only requires [`init`] by the user.
|
||||
//!
|
||||
//! This module must be enabled with the `service` feature.
|
||||
//!
|
||||
//! ## Handles
|
||||
//! The 2 handles to the database are:
|
||||
//! - [`BlockchainReadHandle`]
|
||||
|
|
|
@ -121,6 +121,8 @@ fn map_request(
|
|||
R::DatabaseSize => database_size(env),
|
||||
R::OutputHistogram(input) => output_histogram(env, input),
|
||||
R::CoinbaseTxSum { height, count } => coinbase_tx_sum(env, height, count),
|
||||
R::AltChains => alt_chains(env),
|
||||
R::AltChainCount => alt_chain_count(env),
|
||||
}
|
||||
|
||||
/* SOMEDAY: post-request handling, run some code for each request? */
|
||||
|
@ -648,3 +650,13 @@ fn output_histogram(env: &ConcreteEnv, input: OutputHistogramInput) -> ResponseR
|
|||
fn coinbase_tx_sum(env: &ConcreteEnv, height: usize, count: u64) -> ResponseResult {
|
||||
Ok(BlockchainResponse::CoinbaseTxSum(todo!()))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::AltChains`]
|
||||
fn alt_chains(env: &ConcreteEnv) -> ResponseResult {
|
||||
Ok(BlockchainResponse::AltChains(todo!()))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::AltChainCount`]
|
||||
fn alt_chain_count(env: &ConcreteEnv) -> ResponseResult {
|
||||
Ok(BlockchainResponse::AltChainCount(todo!()))
|
||||
}
|
||||
|
|
|
@ -9,10 +9,10 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/database"
|
|||
keywords = ["cuprate", "database"]
|
||||
|
||||
[features]
|
||||
# default = ["heed"]
|
||||
default = ["heed"]
|
||||
# default = ["redb"]
|
||||
# default = ["redb-memory"]
|
||||
heed = ["dep:heed"]
|
||||
heed = []
|
||||
redb = ["dep:redb"]
|
||||
redb-memory = ["redb"]
|
||||
|
||||
|
@ -25,7 +25,7 @@ paste = { workspace = true }
|
|||
thiserror = { workspace = true }
|
||||
|
||||
# Optional features.
|
||||
heed = { version = "0.20.5", features = ["read-txn-no-tls"], optional = true }
|
||||
heed = { version = "0.20.5", features = ["read-txn-no-tls"] }
|
||||
redb = { version = "2.1.3", optional = true }
|
||||
serde = { workspace = true, optional = true }
|
||||
|
||||
|
|
|
@ -4,6 +4,8 @@ cfg_if::cfg_if! {
|
|||
// If both backends are enabled, fallback to `heed`.
|
||||
// This is useful when using `--all-features`.
|
||||
if #[cfg(all(feature = "redb", not(feature = "heed")))] {
|
||||
use heed as _;
|
||||
|
||||
mod redb;
|
||||
pub use redb::ConcreteEnv;
|
||||
} else {
|
||||
|
|
|
@ -8,14 +8,20 @@ authors = ["Boog900"]
|
|||
repository = "https://github.com/Cuprate/cuprate/tree/main/storage/service"
|
||||
keywords = ["cuprate", "service", "database"]
|
||||
|
||||
[features]
|
||||
default = ["heed"]
|
||||
heed = ["cuprate-database/heed"]
|
||||
redb = ["cuprate-database/redb"]
|
||||
redb-memorey = ["cuprate-database/redb-memory"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-database = { workspace = true }
|
||||
cuprate-helper = { workspace = true, features = ["fs", "thread", "map"] }
|
||||
cuprate-helper = { workspace = true, features = ["fs", "thread", "map", "asynch"] }
|
||||
|
||||
serde = { workspace = true, optional = true }
|
||||
rayon = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
futures = { workspace = true, features = ["std"] }
|
||||
crossbeam = { workspace = true, features = ["std"] }
|
||||
|
||||
[lints]
|
||||
|
|
|
@ -30,6 +30,14 @@ pub struct DatabaseWriteHandle<Req, Res> {
|
|||
crossbeam::channel::Sender<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>,
|
||||
}
|
||||
|
||||
impl<Req, Res> Clone for DatabaseWriteHandle<Req, Res> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
sender: self.sender.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Req, Res> DatabaseWriteHandle<Req, Res>
|
||||
where
|
||||
Req: Send + 'static,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue