Compare commits

...

2 commits

Author SHA1 Message Date
7b8756fa80
cuprated: P2P protocol request handler (#303)
Some checks failed
CI / typo (push) Has been cancelled
CI / ci (macos-latest, stable, bash) (push) Has been cancelled
CI / fmt (push) Has been cancelled
CI / ci (ubuntu-latest, stable, bash) (push) Has been cancelled
CI / ci (windows-latest, stable-x86_64-pc-windows-gnu, msys2 {0}) (push) Has been cancelled
Deny / audit (push) Has been cancelled
Doc / build (push) Has been cancelled
Doc / deploy (push) Has been cancelled
* add cuprated skeleton

* fmt and add deny exception

* add main chain batch handler

* add blockchain init

* very rough block manager

* misc changes

* move more config values

* add new tables & types

* add function to fully add an alt block

* resolve current todo!s

* add new requests

* WIP: starting re-orgs

* add last service request

* commit Cargo.lock

* add test

* more docs + cleanup + alt blocks request

* clippy + fmt

* document types

* move tx_fee to helper

* more doc updates

* fmt

* fix imports

* remove config files

* fix merge errors

* fix generated coins

* handle more p2p requests + alt blocks

* clean up handler code

* add function for incoming blocks

* add docs to handler functions

* broadcast new blocks + add commands

* add fluffy block handler

* fix new block handling

* small cleanup

* increase outbound peer count

* fix merge

* clean up the blockchain manger

* add more docs + cleanup imports

* fix typo

* fix doc

* remove unrelated changes

* add `get_objects` handler

* add `get_chain` handler

* add `fluffy_missing_txs` handler

* add `new_fluffy_block` handler

* improve interface globals

* manger -> manager

* enums instead of bools

* move chain service to separate file

* more review fixes

* sort imports + docs

* init dandelion integration

* add dandelion start function

* finish incoming tx handler

* Add tx blob hash table

* Add missing txpool requests

* handle duplicate stem txs

* check txpool on incoming block

* add request to remove tx in new blocks from the pool

* tell the txpool about incoming blocks

* fix merge

* typos

* remove blockchain height from txpool

* fix merge

* fix merge

* handle incoming txs in p2p request handler

* Allow `IncomingTxHandler` to be given later

* add p2p clearnet init

* fix build

* misc changes

* doc updates

* more doc updates

* sort imports

* review changes

* Result -> DbResult

* use `NonZero`

* review fixes

* remove `rust-2024-compatibility` lint
2024-12-03 20:21:05 +00:00
ecd077b402
cuprated: config & args (#304)
* init config

* split sections

* finish initial config.

* fix clap

* misc changes

* fix doc

* fix test & clippy

* fix test 2

* try fix windows

* testing

* testing 2

* fix windows test

* fix windows: the remix.

* review comments

* fix imports

* rename & fix default config file

* fix cargo hack

* enable serde on `cuprate-helper`

* changes from matrix chats

* fix ci

* fix doc

* fix doc test

* move Cuprated.toml

* remove default.rs

* `size` -> `bytes`

* `addressbook_path` -> `address_book_path`

* fix config output

* fix ci

* Update binaries/cuprated/src/config/args.rs

Co-authored-by: hinto-janai <hinto.janai@protonmail.com>

---------

Co-authored-by: hinto-janai <hinto.janai@protonmail.com>
2024-12-03 15:17:21 +00:00
48 changed files with 1749 additions and 215 deletions

41
Cargo.lock generated
View file

@ -446,6 +446,7 @@ checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54"
dependencies = [ dependencies = [
"anstyle", "anstyle",
"clap_lex", "clap_lex",
"terminal_size",
] ]
[[package]] [[package]]
@ -703,6 +704,7 @@ version = "0.0.0"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.6.0",
"bytemuck", "bytemuck",
"bytes",
"cuprate-constants", "cuprate-constants",
"cuprate-database", "cuprate-database",
"cuprate-database-service", "cuprate-database-service",
@ -933,6 +935,7 @@ dependencies = [
"libc", "libc",
"monero-serai", "monero-serai",
"rayon", "rayon",
"serde",
"tokio", "tokio",
"windows", "windows",
] ]
@ -1188,7 +1191,6 @@ dependencies = [
"cuprate-consensus", "cuprate-consensus",
"cuprate-consensus-context", "cuprate-consensus-context",
"cuprate-consensus-rules", "cuprate-consensus-rules",
"cuprate-constants",
"cuprate-cryptonight", "cuprate-cryptonight",
"cuprate-dandelion-tower", "cuprate-dandelion-tower",
"cuprate-database", "cuprate-database",
@ -1230,6 +1232,7 @@ dependencies = [
"tokio", "tokio",
"tokio-stream", "tokio-stream",
"tokio-util", "tokio-util",
"toml",
"tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
@ -2904,6 +2907,15 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "serde_spanned"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "serde_urlencoded" name = "serde_urlencoded"
version = "0.7.1" version = "0.7.1"
@ -3121,6 +3133,16 @@ dependencies = [
"windows-sys 0.59.0", "windows-sys 0.59.0",
] ]
[[package]]
name = "terminal_size"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef"
dependencies = [
"rustix",
"windows-sys 0.59.0",
]
[[package]] [[package]]
name = "thiserror" name = "thiserror"
version = "1.0.66" version = "1.0.66"
@ -3262,11 +3284,26 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "toml"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
]
[[package]] [[package]]
name = "toml_datetime" name = "toml_datetime"
version = "0.6.8" version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "toml_edit" name = "toml_edit"
@ -3275,6 +3312,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"serde",
"serde_spanned",
"toml_datetime", "toml_datetime",
"winnow", "winnow",
] ]

View file

@ -55,6 +55,7 @@ members = [
] ]
[profile.release] [profile.release]
panic = "abort"
lto = true # Build with LTO lto = true # Build with LTO
strip = "none" # Keep panic stack traces strip = "none" # Keep panic stack traces
codegen-units = 1 # Optimize for binary speed over compile times codegen-units = 1 # Optimize for binary speed over compile times
@ -144,6 +145,7 @@ tokio-util = { version = "0.7", default-features = false }
tokio-stream = { version = "0.1", default-features = false } tokio-stream = { version = "0.1", default-features = false }
tokio = { version = "1", default-features = false } tokio = { version = "1", default-features = false }
tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # <https://github.com/tower-rs/tower/pull/796> tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # <https://github.com/tower-rs/tower/pull/796>
toml = { version = "0.8", default-features = false }
tracing-subscriber = { version = "0.3", default-features = false } tracing-subscriber = { version = "0.3", default-features = false }
tracing = { version = "0.1", default-features = false } tracing = { version = "0.1", default-features = false }
@ -374,7 +376,6 @@ unused_lifetimes = "deny"
unused_macro_rules = "deny" unused_macro_rules = "deny"
ambiguous_glob_imports = "deny" ambiguous_glob_imports = "deny"
unused_unsafe = "deny" unused_unsafe = "deny"
rust_2024_compatibility = "deny"
# Warm # Warm
let_underscore = { level = "deny", priority = -1 } let_underscore = { level = "deny", priority = -1 }

View file

@ -2,7 +2,7 @@
name = "cuprated" name = "cuprated"
version = "0.0.1" version = "0.0.1"
edition = "2021" edition = "2021"
description = "The Cuprate Monero Rust node." description = "The Cuprate Rust Monero node."
license = "AGPL-3.0-only" license = "AGPL-3.0-only"
authors = ["Boog900", "hinto-janai", "SyntheticBird45"] authors = ["Boog900", "hinto-janai", "SyntheticBird45"]
repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
@ -12,29 +12,29 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
cuprate-consensus = { workspace = true } cuprate-consensus = { workspace = true }
cuprate-fast-sync = { workspace = true } cuprate-fast-sync = { workspace = true }
cuprate-consensus-context = { workspace = true } cuprate-consensus-context = { workspace = true }
cuprate-consensus-rules = { workspace = true } cuprate-consensus-rules = { workspace = true }
cuprate-constants = { workspace = true } cuprate-cryptonight = { workspace = true }
cuprate-cryptonight = { workspace = true } cuprate-helper = { workspace = true, features = ["serde"] }
cuprate-helper = { workspace = true } cuprate-epee-encoding = { workspace = true }
cuprate-epee-encoding = { workspace = true } cuprate-fixed-bytes = { workspace = true }
cuprate-fixed-bytes = { workspace = true } cuprate-levin = { workspace = true }
cuprate-levin = { workspace = true } cuprate-wire = { workspace = true }
cuprate-wire = { workspace = true } cuprate-p2p = { workspace = true }
cuprate-p2p = { workspace = true } cuprate-p2p-core = { workspace = true }
cuprate-p2p-core = { workspace = true } cuprate-dandelion-tower = { workspace = true, features = ["txpool"] }
cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } cuprate-async-buffer = { workspace = true }
cuprate-async-buffer = { workspace = true } cuprate-address-book = { workspace = true }
cuprate-address-book = { workspace = true } cuprate-blockchain = { workspace = true }
cuprate-blockchain = { workspace = true } cuprate-database-service = { workspace = true, features = ["serde"] }
cuprate-database-service = { workspace = true } cuprate-txpool = { workspace = true }
cuprate-txpool = { workspace = true } cuprate-database = { workspace = true, features = ["serde"] }
cuprate-database = { workspace = true } cuprate-pruning = { workspace = true }
cuprate-pruning = { workspace = true } cuprate-test-utils = { workspace = true }
cuprate-test-utils = { workspace = true } cuprate-types = { workspace = true }
cuprate-types = { workspace = true } cuprate-json-rpc = { workspace = true }
cuprate-json-rpc = { workspace = true } cuprate-rpc-interface = { workspace = true }
cuprate-rpc-interface = { workspace = true } cuprate-rpc-types = { workspace = true }
cuprate-rpc-types = { workspace = true }
# TODO: after v1.0.0, remove unneeded dependencies. # TODO: after v1.0.0, remove unneeded dependencies.
anyhow = { workspace = true } anyhow = { workspace = true }
@ -44,7 +44,7 @@ borsh = { workspace = true }
bytemuck = { workspace = true } bytemuck = { workspace = true }
bytes = { workspace = true } bytes = { workspace = true }
cfg-if = { workspace = true } cfg-if = { workspace = true }
clap = { workspace = true, features = ["cargo"] } clap = { workspace = true, features = ["cargo", "help", "wrap_help"] }
chrono = { workspace = true } chrono = { workspace = true }
crypto-bigint = { workspace = true } crypto-bigint = { workspace = true }
crossbeam = { workspace = true } crossbeam = { workspace = true }
@ -71,15 +71,10 @@ thread_local = { workspace = true }
tokio-util = { workspace = true } tokio-util = { workspace = true }
tokio-stream = { workspace = true } tokio-stream = { workspace = true }
tokio = { workspace = true } tokio = { workspace = true }
toml = { workspace = true, features = ["parse", "display"]}
tower = { workspace = true } tower = { workspace = true }
tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] } tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] }
tracing = { workspace = true } tracing = { workspace = true, features = ["default"] }
[lints] [lints]
workspace = true workspace = true
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"

View file

@ -0,0 +1,67 @@
# ____ _
# / ___| _ _ __ _ __ __ _| |_ ___
# | | | | | | '_ \| '__/ _` | __/ _ \
# | |__| |_| | |_) | | | (_| | || __/
# \____\__,_| .__/|_| \__,_|\__\___|
# |_|
#
## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet".
network = "Mainnet"
## Tracing config.
[tracing]
## The minimum level for log events to be displayed.
level = "info"
## Clear-net config.
[p2p.clear_net]
## The number of outbound connections we should make and maintain.
outbound_connections = 64
## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing.
extra_outbound_connections = 8
## The maximum number of incoming we should allow.
max_inbound_connections = 128
## The percent of outbound connections that should be to nodes we have not connected to before.
gray_peers_percent = 0.7
## The port to accept connections on, if left `0` no connections will be accepted.
p2p_port = 0
## The IP address to listen to connections on.
listen_on = "0.0.0.0"
## The Clear-net addressbook config.
[p2p.clear_net.address_book_config]
## The size of the white peer list, which contains peers we have made a connection to before.
max_white_list_length = 1_000
## The size of the gray peer list, which contains peers we have not made a connection to before.
max_gray_list_length = 5_000
## The amount of time between address book saves.
peer_save_period = { secs = 90, nanos = 0 }
## The block downloader config.
[p2p.block_downloader]
## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes).
buffer_bytes = 50_000_000
## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes).
in_progress_queue_bytes = 50_000_000
## The target size of a batch of blocks (bytes), must not exceed 100MB.
target_batch_bytes= 5_000_000
## The amount of time between checking the pool of connected peers for free peers to download blocks.
check_client_pool_interval = { secs = 30, nanos = 0 }
## Storage config
[storage]
## The amount of reader threads to spawn.
reader_threads = "OnePerThread"
## Txpool storage config.
[storage.txpool]
## The database sync mode for the txpool.
sync_mode = "Async"
## The maximum size of all the txs in the pool (bytes).
max_txpool_byte_size = 100_000_000
## Blockchain storage config.
[storage.blockchain]
## The database sync mode for the blockchain.
sync_mode = "Async"

View file

@ -1 +1,159 @@
//! cuprated config //! cuprated config
use std::{
fs::{read_to_string, File},
io,
path::Path,
time::Duration,
};
use clap::Parser;
use serde::{Deserialize, Serialize};
use cuprate_consensus::ContextConfig;
use cuprate_helper::{
fs::{CUPRATE_CONFIG_DIR, DEFAULT_CONFIG_FILE_NAME},
network::Network,
};
use cuprate_p2p::block_downloader::BlockDownloaderConfig;
use cuprate_p2p_core::{ClearNet, ClearNetServerCfg};
mod args;
mod fs;
mod p2p;
mod storage;
mod tracing_config;
use crate::config::fs::FileSystemConfig;
use p2p::P2PConfig;
use storage::StorageConfig;
use tracing_config::TracingConfig;
/// Reads the args & config file, returning a [`Config`].
pub fn read_config_and_args() -> Config {
let args = args::Args::parse();
args.do_quick_requests();
let config: Config = if let Some(config_file) = &args.config_file {
// If a config file was set in the args try to read it and exit if we can't.
match Config::read_from_path(config_file) {
Ok(config) => config,
Err(e) => {
eprintln!("Failed to read config from file: {e}");
std::process::exit(1);
}
}
} else {
// First attempt to read the config file from the current directory.
std::env::current_dir()
.map(|path| path.join(DEFAULT_CONFIG_FILE_NAME))
.map_err(Into::into)
.and_then(Config::read_from_path)
.inspect_err(|e| tracing::debug!("Failed to read config from current dir: {e}"))
// otherwise try the main config directory.
.or_else(|_| {
let file = CUPRATE_CONFIG_DIR.join(DEFAULT_CONFIG_FILE_NAME);
Config::read_from_path(file)
})
.inspect_err(|e| {
tracing::debug!("Failed to read config from config dir: {e}");
eprintln!("Failed to find/read config file, using default config.");
})
.unwrap_or_default()
};
args.apply_args(config)
}
/// The config for all of Cuprate.
#[derive(Default, Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The network we should run on.
network: Network,
/// [`tracing`] config.
tracing: TracingConfig,
/// The P2P network config.
p2p: P2PConfig,
/// The storage config.
storage: StorageConfig,
fs: FileSystemConfig,
}
impl Config {
/// Attempts to read a config file in [`toml`] format from the given [`Path`].
///
/// # Errors
///
/// Will return an [`Err`] if the file cannot be read or if the file is not a valid [`toml`] config.
fn read_from_path(file: impl AsRef<Path>) -> Result<Self, anyhow::Error> {
let file_text = read_to_string(file.as_ref())?;
Ok(toml::from_str(&file_text)
.inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy()))
.inspect_err(|e| {
eprintln!("{e}");
eprintln!(
"Failed to parse config file at: {}",
file.as_ref().to_string_lossy()
);
})?)
}
/// Returns the current [`Network`] we are running on.
pub const fn network(&self) -> Network {
self.network
}
/// The [`ClearNet`], [`cuprate_p2p::P2PConfig`].
pub fn clearnet_p2p_config(&self) -> cuprate_p2p::P2PConfig<ClearNet> {
cuprate_p2p::P2PConfig {
network: self.network,
seeds: p2p::clear_net_seed_nodes(self.network),
outbound_connections: self.p2p.clear_net.general.outbound_connections,
extra_outbound_connections: self.p2p.clear_net.general.extra_outbound_connections,
max_inbound_connections: self.p2p.clear_net.general.max_inbound_connections,
gray_peers_percent: self.p2p.clear_net.general.gray_peers_percent,
server_config: Some(ClearNetServerCfg {
ip: self.p2p.clear_net.listen_on,
}),
p2p_port: self.p2p.clear_net.general.p2p_port,
// TODO: set this if a public RPC server is set.
rpc_port: 0,
address_book_config: self
.p2p
.clear_net
.general
.address_book_config(&self.fs.cache_directory, self.network),
}
}
/// The [`ContextConfig`].
pub const fn context_config(&self) -> ContextConfig {
match self.network {
Network::Mainnet => ContextConfig::main_net(),
Network::Stagenet => ContextConfig::stage_net(),
Network::Testnet => ContextConfig::test_net(),
}
}
/// The [`cuprate_blockchain`] config.
pub fn blockchain_config(&self) -> cuprate_blockchain::config::Config {
let blockchain = &self.storage.blockchain;
// We don't set reader threads as we manually make the reader threadpool.
cuprate_blockchain::config::ConfigBuilder::default()
.network(self.network)
.data_directory(self.fs.data_directory.clone())
.sync_mode(blockchain.shared.sync_mode)
.build()
}
/// The [`BlockDownloaderConfig`].
pub fn block_downloader_config(&self) -> BlockDownloaderConfig {
self.p2p.block_downloader.clone().into()
}
}

View file

@ -0,0 +1,55 @@
use std::{io::Write, path::PathBuf, process::exit};
use clap::builder::TypedValueParser;
use cuprate_helper::network::Network;
use crate::{config::Config, constants::EXAMPLE_CONFIG};
/// Cuprate Args.
#[derive(clap::Parser, Debug)]
#[command(version, about)]
pub struct Args {
/// The network to run on.
#[arg(
long,
default_value_t = Network::Mainnet,
value_parser = clap::builder::PossibleValuesParser::new(["mainnet", "testnet", "stagenet"])
.map(|s| s.parse::<Network>().unwrap()),
)]
pub network: Network,
/// The amount of outbound clear-net connections to maintain.
#[arg(long)]
pub outbound_connections: Option<usize>,
/// The PATH of the `cuprated` config file.
#[arg(long)]
pub config_file: Option<PathBuf>,
/// Generate a config file and print it to stdout.
#[arg(long)]
pub generate_config: bool,
}
impl Args {
/// Complete any quick requests asked for in [`Args`].
///
/// May cause the process to [`exit`].
pub fn do_quick_requests(&self) {
if self.generate_config {
println!("{EXAMPLE_CONFIG}");
exit(0);
}
}
/// Apply the [`Args`] to the given [`Config`].
///
/// This may exit the program if a config value was set that requires an early exit.
pub const fn apply_args(&self, mut config: Config) -> Config {
config.network = self.network;
if let Some(outbound_connections) = self.outbound_connections {
config.p2p.clear_net.general.outbound_connections = outbound_connections;
}
config
}
}

View file

@ -0,0 +1,21 @@
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
use cuprate_helper::fs::{CUPRATE_CACHE_DIR, CUPRATE_DATA_DIR};
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct FileSystemConfig {
pub data_directory: PathBuf,
pub cache_directory: PathBuf,
}
impl Default for FileSystemConfig {
fn default() -> Self {
Self {
data_directory: CUPRATE_DATA_DIR.to_path_buf(),
cache_directory: CUPRATE_CACHE_DIR.to_path_buf(),
}
}
}

View file

@ -0,0 +1,178 @@
use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
path::Path,
time::Duration,
};
use serde::{Deserialize, Serialize};
use cuprate_helper::{fs::address_book_path, network::Network};
/// P2P config.
#[derive(Default, Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct P2PConfig {
/// Clear-net config.
pub clear_net: ClearNetConfig,
/// Block downloader config.
pub block_downloader: BlockDownloaderConfig,
}
#[derive(Clone, Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct BlockDownloaderConfig {
/// The size in bytes of the buffer between the block downloader and the place which
/// is consuming the downloaded blocks.
pub buffer_bytes: usize,
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
pub in_progress_queue_bytes: usize,
/// The [`Duration`] between checking the client pool for free peers.
pub check_client_pool_interval: Duration,
/// The target size of a single batch of blocks (in bytes).
pub target_batch_bytes: usize,
}
impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloaderConfig {
fn from(value: BlockDownloaderConfig) -> Self {
Self {
buffer_bytes: value.buffer_bytes,
in_progress_queue_bytes: value.in_progress_queue_bytes,
check_client_pool_interval: value.check_client_pool_interval,
target_batch_bytes: value.target_batch_bytes,
initial_batch_len: 1,
}
}
}
impl Default for BlockDownloaderConfig {
fn default() -> Self {
Self {
buffer_bytes: 50_000_000,
in_progress_queue_bytes: 50_000_000,
check_client_pool_interval: Duration::from_secs(30),
target_batch_bytes: 5_000_000,
}
}
}
/// The config values for P2P clear-net.
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct ClearNetConfig {
/// The server config.
pub listen_on: IpAddr,
#[serde(flatten)]
pub general: SharedNetConfig,
}
impl Default for ClearNetConfig {
fn default() -> Self {
Self {
listen_on: IpAddr::V4(Ipv4Addr::UNSPECIFIED),
general: Default::default(),
}
}
}
/// Network config values shared between all network zones.
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct SharedNetConfig {
/// The number of outbound connections to make and try keep.
pub outbound_connections: usize,
/// The amount of extra connections we can make if we are under load from the rest of Cuprate.
pub extra_outbound_connections: usize,
/// The maximum amount of inbound connections
pub max_inbound_connections: usize,
/// The percent of connections that should be to peers we haven't connected to before.
pub gray_peers_percent: f64,
/// port to use to accept p2p connections.
pub p2p_port: u16,
/// The address book config.
address_book_config: AddressBookConfig,
}
impl SharedNetConfig {
/// Returns the [`AddressBookConfig`].
pub fn address_book_config(
&self,
cache_dir: &Path,
network: Network,
) -> cuprate_address_book::AddressBookConfig {
cuprate_address_book::AddressBookConfig {
max_white_list_length: self.address_book_config.max_white_list_length,
max_gray_list_length: self.address_book_config.max_gray_list_length,
peer_store_directory: address_book_path(cache_dir, network),
peer_save_period: self.address_book_config.peer_save_period,
}
}
}
impl Default for SharedNetConfig {
fn default() -> Self {
Self {
outbound_connections: 64,
extra_outbound_connections: 8,
max_inbound_connections: 128,
gray_peers_percent: 0.7,
p2p_port: 0,
address_book_config: AddressBookConfig::default(),
}
}
}
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct AddressBookConfig {
max_white_list_length: usize,
max_gray_list_length: usize,
peer_save_period: Duration,
}
impl Default for AddressBookConfig {
fn default() -> Self {
Self {
max_white_list_length: 1_000,
max_gray_list_length: 5_000,
peer_save_period: Duration::from_secs(30),
}
}
}
/// Seed nodes for [`ClearNet`](cuprate_p2p_core::ClearNet).
pub fn clear_net_seed_nodes(network: Network) -> Vec<SocketAddr> {
let seeds = match network {
Network::Mainnet => [
"176.9.0.187:18080",
"88.198.163.90:18080",
"66.85.74.134:18080",
"51.79.173.165:18080",
"192.99.8.110:18080",
"37.187.74.171:18080",
"77.172.183.193:18080",
]
.as_slice(),
Network::Stagenet => [
"176.9.0.187:38080",
"51.79.173.165:38080",
"192.99.8.110:38080",
"37.187.74.171:38080",
"77.172.183.193:38080",
]
.as_slice(),
Network::Testnet => [
"176.9.0.187:28080",
"51.79.173.165:28080",
"192.99.8.110:28080",
"37.187.74.171:28080",
"77.172.183.193:28080",
]
.as_slice(),
};
seeds
.iter()
.map(|s| s.parse())
.collect::<Result<_, _>>()
.unwrap()
}

View file

@ -0,0 +1,67 @@
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
use cuprate_database::config::SyncMode;
use cuprate_database_service::ReaderThreads;
use cuprate_helper::fs::CUPRATE_DATA_DIR;
/// The storage config.
#[derive(Default, Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct StorageConfig {
/// The amount of reader threads to spawn between the tx-pool and blockchain.
pub reader_threads: ReaderThreads,
/// The tx-pool config.
pub txpool: TxpoolConfig,
/// The blockchain config.
pub blockchain: BlockchainConfig,
}
/// The blockchain config.
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct BlockchainConfig {
#[serde(flatten)]
pub shared: SharedStorageConfig,
}
impl Default for BlockchainConfig {
fn default() -> Self {
Self {
shared: SharedStorageConfig {
sync_mode: SyncMode::Async,
},
}
}
}
/// The tx-pool config.
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct TxpoolConfig {
#[serde(flatten)]
pub shared: SharedStorageConfig,
/// The maximum size of the tx-pool.
pub max_txpool_byte_size: usize,
}
impl Default for TxpoolConfig {
fn default() -> Self {
Self {
shared: SharedStorageConfig {
sync_mode: SyncMode::Async,
},
max_txpool_byte_size: 100_000_000,
}
}
}
/// Config values shared between the tx-pool and blockchain.
#[derive(Default, Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct SharedStorageConfig {
/// The [`SyncMode`] of the database.
pub sync_mode: SyncMode,
}

View file

@ -0,0 +1,42 @@
use serde::{Deserialize, Serialize};
use tracing::level_filters::LevelFilter;
/// [`tracing`] config.
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields, default)]
pub struct TracingConfig {
/// The default minimum log level.
#[serde(with = "level_filter_serde")]
level: LevelFilter,
}
impl Default for TracingConfig {
fn default() -> Self {
Self {
level: LevelFilter::INFO,
}
}
}
mod level_filter_serde {
use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serializer};
use tracing::level_filters::LevelFilter;
#[expect(clippy::trivially_copy_pass_by_ref, reason = "serde")]
pub fn serialize<S>(level_filter: &LevelFilter, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_str(&level_filter.to_string())
}
pub fn deserialize<'de, D>(d: D) -> Result<LevelFilter, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(d)?;
LevelFilter::from_str(&s).map_err(serde::de::Error::custom)
}
}

View file

@ -18,9 +18,12 @@ pub const VERSION_BUILD: &str = if cfg!(debug_assertions) {
pub const PANIC_CRITICAL_SERVICE_ERROR: &str = pub const PANIC_CRITICAL_SERVICE_ERROR: &str =
"A service critical to Cuprate's function returned an unexpected error."; "A service critical to Cuprate's function returned an unexpected error.";
pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml");
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::config::Config;
#[test] #[test]
fn version() { fn version() {
@ -35,4 +38,9 @@ mod test {
assert_eq!(VERSION_BUILD, "0.0.1-release"); assert_eq!(VERSION_BUILD, "0.0.1-release");
} }
} }
#[test]
fn generate_config_text_is_valid() {
let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap();
}
} }

View file

@ -29,6 +29,8 @@ fn main() {
// Initialize global static `LazyLock` data. // Initialize global static `LazyLock` data.
statics::init_lazylock_statics(); statics::init_lazylock_statics();
let _config = config::read_config_and_args();
// TODO: everything else. // TODO: everything else.
todo!() todo!()
} }

View file

@ -1,8 +1,57 @@
//! P2P //! P2P
//! //!
//! Will handle initiating the P2P and contains a protocol request handler. //! Will handle initiating the P2P and contains a protocol request handler.
use futures::{FutureExt, TryFutureExt};
use tokio::sync::oneshot;
use tower::ServiceExt;
use cuprate_blockchain::service::BlockchainReadHandle;
use cuprate_consensus::BlockChainContextService;
use cuprate_p2p::{NetworkInterface, P2PConfig};
use cuprate_p2p_core::ClearNet;
use cuprate_txpool::service::TxpoolReadHandle;
use crate::txpool::IncomingTxHandler;
mod core_sync_service;
mod network_address; mod network_address;
pub mod request_handler; pub mod request_handler;
pub use network_address::CrossNetworkInternalPeerId; pub use network_address::CrossNetworkInternalPeerId;
/// Starts the P2P clearnet network, returning a [`NetworkInterface`] to interact with it.
///
/// A [`oneshot::Sender`] is also returned to provide the [`IncomingTxHandler`], until this is provided network
/// handshakes can not be completed.
pub async fn start_clearnet_p2p(
blockchain_read_handle: BlockchainReadHandle,
blockchain_context_service: BlockChainContextService,
txpool_read_handle: TxpoolReadHandle,
config: P2PConfig<ClearNet>,
) -> Result<
(
NetworkInterface<ClearNet>,
oneshot::Sender<IncomingTxHandler>,
),
tower::BoxError,
> {
let (incoming_tx_handler_tx, incoming_tx_handler_rx) = oneshot::channel();
let request_handler_maker = request_handler::P2pProtocolRequestHandlerMaker {
blockchain_read_handle,
blockchain_context_service: blockchain_context_service.clone(),
txpool_read_handle,
incoming_tx_handler: None,
incoming_tx_handler_fut: incoming_tx_handler_rx.shared(),
};
Ok((
cuprate_p2p::initialize_network(
request_handler_maker.map_response(|s| s.map_err(Into::into)),
core_sync_service::CoreSyncService(blockchain_context_service),
config,
)
.await?,
incoming_tx_handler_tx,
))
}

View file

@ -0,0 +1,49 @@
use std::task::{Context, Poll};
use futures::{future::BoxFuture, FutureExt, TryFutureExt};
use tower::Service;
use cuprate_consensus::{
BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService,
};
use cuprate_helper::{cast::usize_to_u64, map::split_u128_into_low_high_bits};
use cuprate_p2p_core::services::{CoreSyncDataRequest, CoreSyncDataResponse};
use cuprate_wire::CoreSyncData;
/// The core sync service.
#[derive(Clone)]
pub struct CoreSyncService(pub BlockChainContextService);
impl Service<CoreSyncDataRequest> for CoreSyncService {
type Response = CoreSyncDataResponse;
type Error = tower::BoxError;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future {
self.0
.call(BlockChainContextRequest::Context)
.map_ok(|res| {
let BlockChainContextResponse::Context(context) = res else {
unreachable!()
};
let context = context.unchecked_blockchain_context();
let (cumulative_difficulty, cumulative_difficulty_top64) =
split_u128_into_low_high_bits(context.cumulative_difficulty);
CoreSyncDataResponse(CoreSyncData {
cumulative_difficulty,
cumulative_difficulty_top64,
current_height: usize_to_u64(context.chain_height),
pruning_seed: 0,
top_id: context.top_hash,
top_version: context.current_hf.as_u8(),
})
})
.boxed()
}
}

View file

@ -1 +1,422 @@
use std::{
collections::HashSet,
future::{ready, Ready},
hash::Hash,
task::{Context, Poll},
};
use bytes::Bytes;
use futures::{
future::{BoxFuture, Shared},
FutureExt,
};
use monero_serai::{block::Block, transaction::Transaction};
use tokio::sync::{broadcast, oneshot, watch};
use tokio_stream::wrappers::WatchStream;
use tower::{Service, ServiceExt};
use cuprate_blockchain::service::BlockchainReadHandle;
use cuprate_consensus::{
transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse,
BlockChainContextService,
};
use cuprate_dandelion_tower::TxState;
use cuprate_fixed_bytes::ByteArrayVec;
use cuprate_helper::cast::u64_to_usize;
use cuprate_helper::{
asynch::rayon_spawn_async,
cast::usize_to_u64,
map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits},
};
use cuprate_p2p::constants::{
MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MAX_BLOCK_BATCH_LEN, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN,
};
use cuprate_p2p_core::{
client::{InternalPeerID, PeerInformation},
NetZoneAddress, NetworkZone, ProtocolRequest, ProtocolResponse,
};
use cuprate_txpool::service::TxpoolReadHandle;
use cuprate_types::{
blockchain::{BlockchainReadRequest, BlockchainResponse},
BlockCompleteEntry, TransactionBlobs, TxsInBlock,
};
use cuprate_wire::protocol::{
ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
GetObjectsResponse, NewFluffyBlock, NewTransactions,
};
use crate::{
blockchain::interface::{self as blockchain_interface, IncomingBlockError},
constants::PANIC_CRITICAL_SERVICE_ERROR,
p2p::CrossNetworkInternalPeerId,
txpool::{IncomingTxError, IncomingTxHandler, IncomingTxs},
};
/// The P2P protocol request handler [`MakeService`](tower::MakeService).
#[derive(Clone)]
pub struct P2pProtocolRequestHandlerMaker {
pub blockchain_read_handle: BlockchainReadHandle,
pub blockchain_context_service: BlockChainContextService,
pub txpool_read_handle: TxpoolReadHandle,
/// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`]
/// and the [`IncomingTxHandler`].
pub incoming_tx_handler: Option<IncomingTxHandler>,
/// A [`Future`](std::future::Future) that produces the [`IncomingTxHandler`].
pub incoming_tx_handler_fut: Shared<oneshot::Receiver<IncomingTxHandler>>,
}
impl<A: NetZoneAddress> Service<PeerInformation<A>> for P2pProtocolRequestHandlerMaker
where
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
{
type Response = P2pProtocolRequestHandler<A>;
type Error = tower::BoxError;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.incoming_tx_handler.is_none() {
return self
.incoming_tx_handler_fut
.poll_unpin(cx)
.map(|incoming_tx_handler| {
self.incoming_tx_handler = Some(incoming_tx_handler?);
Ok(())
});
}
Poll::Ready(Ok(()))
}
fn call(&mut self, peer_information: PeerInformation<A>) -> Self::Future {
let Some(incoming_tx_handler) = self.incoming_tx_handler.clone() else {
panic!("poll_ready was not called or did not return `Poll::Ready`")
};
// TODO: check sync info?
let blockchain_read_handle = self.blockchain_read_handle.clone();
let txpool_read_handle = self.txpool_read_handle.clone();
ready(Ok(P2pProtocolRequestHandler {
peer_information,
blockchain_read_handle,
blockchain_context_service: self.blockchain_context_service.clone(),
txpool_read_handle,
incoming_tx_handler,
}))
}
}
/// The P2P protocol request handler.
#[derive(Clone)]
pub struct P2pProtocolRequestHandler<N: NetZoneAddress> {
peer_information: PeerInformation<N>,
blockchain_read_handle: BlockchainReadHandle,
blockchain_context_service: BlockChainContextService,
txpool_read_handle: TxpoolReadHandle,
incoming_tx_handler: IncomingTxHandler,
}
impl<A: NetZoneAddress> Service<ProtocolRequest> for P2pProtocolRequestHandler<A>
where
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
{
type Response = ProtocolResponse;
type Error = anyhow::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, request: ProtocolRequest) -> Self::Future {
match request {
ProtocolRequest::GetObjects(r) => {
get_objects(r, self.blockchain_read_handle.clone()).boxed()
}
ProtocolRequest::GetChain(r) => {
get_chain(r, self.blockchain_read_handle.clone()).boxed()
}
ProtocolRequest::FluffyMissingTxs(r) => {
fluffy_missing_txs(r, self.blockchain_read_handle.clone()).boxed()
}
ProtocolRequest::NewBlock(_) => ready(Err(anyhow::anyhow!(
"Peer sent a full block when we support fluffy blocks"
)))
.boxed(),
ProtocolRequest::NewFluffyBlock(r) => new_fluffy_block(
self.peer_information.clone(),
r,
self.blockchain_read_handle.clone(),
self.txpool_read_handle.clone(),
)
.boxed(),
ProtocolRequest::NewTransactions(r) => new_transactions(
self.peer_information.clone(),
r,
self.blockchain_context_service.clone(),
self.incoming_tx_handler.clone(),
)
.boxed(),
ProtocolRequest::GetTxPoolCompliment(_) => ready(Ok(ProtocolResponse::NA)).boxed(), // TODO: should we support this?
}
}
}
//---------------------------------------------------------------------------------------------------- Handler functions
/// [`ProtocolRequest::GetObjects`]
async fn get_objects(
request: GetObjectsRequest,
mut blockchain_read_handle: BlockchainReadHandle,
) -> anyhow::Result<ProtocolResponse> {
if request.blocks.len() > MAX_BLOCK_BATCH_LEN {
anyhow::bail!("Peer requested more blocks than allowed.")
}
let block_hashes: Vec<[u8; 32]> = (&request.blocks).into();
// deallocate the backing `Bytes`.
drop(request);
let BlockchainResponse::BlockCompleteEntries {
blocks,
missing_hashes,
blockchain_height,
} = blockchain_read_handle
.ready()
.await?
.call(BlockchainReadRequest::BlockCompleteEntries(block_hashes))
.await?
else {
unreachable!();
};
Ok(ProtocolResponse::GetObjects(GetObjectsResponse {
blocks,
missed_ids: ByteArrayVec::from(missing_hashes),
current_blockchain_height: usize_to_u64(blockchain_height),
}))
}
/// [`ProtocolRequest::GetChain`]
async fn get_chain(
request: ChainRequest,
mut blockchain_read_handle: BlockchainReadHandle,
) -> anyhow::Result<ProtocolResponse> {
if request.block_ids.len() > MAX_BLOCKS_IDS_IN_CHAIN_ENTRY {
anyhow::bail!("Peer sent too many block hashes in chain request.")
}
let block_hashes: Vec<[u8; 32]> = (&request.block_ids).into();
let want_pruned_data = request.prune;
// deallocate the backing `Bytes`.
drop(request);
let BlockchainResponse::NextChainEntry {
start_height,
chain_height,
block_ids,
block_weights,
cumulative_difficulty,
first_block_blob,
} = blockchain_read_handle
.ready()
.await?
.call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000))
.await?
else {
unreachable!();
};
let Some(start_height) = start_height else {
anyhow::bail!("The peers chain has a different genesis block than ours.");
};
let (cumulative_difficulty_low64, cumulative_difficulty_top64) =
split_u128_into_low_high_bits(cumulative_difficulty);
Ok(ProtocolResponse::GetChain(ChainResponse {
start_height: usize_to_u64(std::num::NonZero::get(start_height)),
total_height: usize_to_u64(chain_height),
cumulative_difficulty_low64,
cumulative_difficulty_top64,
m_block_ids: ByteArrayVec::from(block_ids),
first_block: first_block_blob.map_or(Bytes::new(), Bytes::from),
// only needed when pruned
m_block_weights: if want_pruned_data {
block_weights.into_iter().map(usize_to_u64).collect()
} else {
vec![]
},
}))
}
/// [`ProtocolRequest::FluffyMissingTxs`]
async fn fluffy_missing_txs(
mut request: FluffyMissingTransactionsRequest,
mut blockchain_read_handle: BlockchainReadHandle,
) -> anyhow::Result<ProtocolResponse> {
let tx_indexes = std::mem::take(&mut request.missing_tx_indices);
let block_hash: [u8; 32] = *request.block_hash;
let current_blockchain_height = request.current_blockchain_height;
// deallocate the backing `Bytes`.
drop(request);
let BlockchainResponse::TxsInBlock(res) = blockchain_read_handle
.ready()
.await?
.call(BlockchainReadRequest::TxsInBlock {
block_hash,
tx_indexes,
})
.await?
else {
unreachable!();
};
let Some(TxsInBlock { block, txs }) = res else {
anyhow::bail!("The peer requested txs out of range.");
};
Ok(ProtocolResponse::NewFluffyBlock(NewFluffyBlock {
b: BlockCompleteEntry {
block: Bytes::from(block),
txs: TransactionBlobs::Normal(txs.into_iter().map(Bytes::from).collect()),
pruned: false,
// only needed for pruned blocks.
block_weight: 0,
},
current_blockchain_height,
}))
}
/// [`ProtocolRequest::NewFluffyBlock`]
async fn new_fluffy_block<A: NetZoneAddress>(
peer_information: PeerInformation<A>,
request: NewFluffyBlock,
mut blockchain_read_handle: BlockchainReadHandle,
mut txpool_read_handle: TxpoolReadHandle,
) -> anyhow::Result<ProtocolResponse> {
// TODO: check context service here and ignore the block?
let current_blockchain_height = request.current_blockchain_height;
peer_information
.core_sync_data
.lock()
.unwrap()
.current_height = current_blockchain_height;
let (block, txs) = rayon_spawn_async(move || -> Result<_, anyhow::Error> {
let block = Block::read(&mut request.b.block.as_ref())?;
let tx_blobs = request
.b
.txs
.take_normal()
.ok_or(anyhow::anyhow!("Peer sent pruned txs in fluffy block"))?;
let txs = tx_blobs
.into_iter()
.map(|tx_blob| {
if tx_blob.len() > MAX_TRANSACTION_BLOB_SIZE {
anyhow::bail!("Peer sent a transaction over the size limit.");
}
let tx = Transaction::read(&mut tx_blob.as_ref())?;
Ok((tx.hash(), tx))
})
.collect::<Result<_, anyhow::Error>>()?;
// The backing `Bytes` will be deallocated when this closure returns.
Ok((block, txs))
})
.await?;
let res = blockchain_interface::handle_incoming_block(
block,
txs,
&mut blockchain_read_handle,
&mut txpool_read_handle,
)
.await;
match res {
Ok(_) => Ok(ProtocolResponse::NA),
Err(IncomingBlockError::UnknownTransactions(block_hash, missing_tx_indices)) => Ok(
ProtocolResponse::FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest {
block_hash: block_hash.into(),
current_blockchain_height,
missing_tx_indices: missing_tx_indices.into_iter().map(usize_to_u64).collect(),
}),
),
Err(IncomingBlockError::Orphan) => {
// Block's parent was unknown, could be syncing?
Ok(ProtocolResponse::NA)
}
Err(e) => Err(e.into()),
}
}
/// [`ProtocolRequest::NewTransactions`]
async fn new_transactions<A>(
peer_information: PeerInformation<A>,
request: NewTransactions,
mut blockchain_context_service: BlockChainContextService,
mut incoming_tx_handler: IncomingTxHandler,
) -> anyhow::Result<ProtocolResponse>
where
A: NetZoneAddress,
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
{
let BlockChainContextResponse::Context(context) = blockchain_context_service
.ready()
.await
.expect(PANIC_CRITICAL_SERVICE_ERROR)
.call(BlockChainContextRequest::Context)
.await
.expect(PANIC_CRITICAL_SERVICE_ERROR)
else {
unreachable!()
};
let context = context.unchecked_blockchain_context();
// If we are more than 2 blocks behind the peer then ignore the txs - we are probably still syncing.
if usize_to_u64(context.chain_height + 2)
< peer_information
.core_sync_data
.lock()
.unwrap()
.current_height
{
return Ok(ProtocolResponse::NA);
}
let state = if request.dandelionpp_fluff {
TxState::Fluff
} else {
TxState::Stem {
from: peer_information.id.into(),
}
};
// Drop all the data except the stuff we still need.
let NewTransactions { txs, .. } = request;
let res = incoming_tx_handler
.ready()
.await
.expect(PANIC_CRITICAL_SERVICE_ERROR)
.call(IncomingTxs { txs, state })
.await;
match res {
Ok(()) => Ok(ProtocolResponse::NA),
Err(e) => Err(e.into()),
}
}

View file

@ -12,4 +12,4 @@ mod dandelion;
mod incoming_tx; mod incoming_tx;
mod txs_being_handled; mod txs_being_handled;
pub use incoming_tx::IncomingTxHandler; pub use incoming_tx::{IncomingTxError, IncomingTxHandler, IncomingTxs};

View file

@ -43,9 +43,13 @@ use crate::{
}; };
/// An error that can happen handling an incoming tx. /// An error that can happen handling an incoming tx.
#[derive(Debug, thiserror::Error)]
pub enum IncomingTxError { pub enum IncomingTxError {
#[error("Error parsing tx: {0}")]
Parse(std::io::Error), Parse(std::io::Error),
#[error(transparent)]
Consensus(ExtendedConsensusError), Consensus(ExtendedConsensusError),
#[error("Duplicate tx in message")]
DuplicateTransaction, DuplicateTransaction,
} }
@ -67,6 +71,7 @@ pub(super) type TxId = [u8; 32];
/// The service than handles incoming transaction pool transactions. /// The service than handles incoming transaction pool transactions.
/// ///
/// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes. /// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes.
#[derive(Clone)]
pub struct IncomingTxHandler { pub struct IncomingTxHandler {
/// A store of txs currently being handled in incoming tx requests. /// A store of txs currently being handled in incoming tx requests.
pub(super) txs_being_handled: TxsBeingHandled, pub(super) txs_being_handled: TxsBeingHandled,

View file

@ -35,6 +35,8 @@ futures = { workspace = true, optional = true, features = ["std"] }
monero-serai = { workspace = true, optional = true } monero-serai = { workspace = true, optional = true }
rayon = { workspace = true, optional = true } rayon = { workspace = true, optional = true }
serde = { workspace = true, optional = true, features = ["derive"] }
# This is kinda a stupid work around. # This is kinda a stupid work around.
# [thread] needs to activate one of these libs (windows|libc) # [thread] needs to activate one of these libs (windows|libc)
# although it depends on what target we're building for. # although it depends on what target we're building for.

View file

@ -28,7 +28,12 @@
//! - <https://docs.rs/dirs> //! - <https://docs.rs/dirs>
//---------------------------------------------------------------------------------------------------- Use //---------------------------------------------------------------------------------------------------- Use
use std::{path::PathBuf, sync::LazyLock}; use std::{
path::{Path, PathBuf},
sync::LazyLock,
};
use crate::network::Network;
//---------------------------------------------------------------------------------------------------- Const //---------------------------------------------------------------------------------------------------- Const
/// Cuprate's main directory. /// Cuprate's main directory.
@ -58,6 +63,9 @@ pub const CUPRATE_DIR: &str = {
} }
}; };
/// The default name of Cuprate's config file.
pub const DEFAULT_CONFIG_FILE_NAME: &str = "Cuprated.toml";
//---------------------------------------------------------------------------------------------------- Directories //---------------------------------------------------------------------------------------------------- Directories
/// Create a `LazyLock` for common PATHs used by Cuprate. /// Create a `LazyLock` for common PATHs used by Cuprate.
/// ///
@ -150,32 +158,61 @@ impl_path_lazylock! {
CUPRATE_DATA_DIR, CUPRATE_DATA_DIR,
data_dir, data_dir,
"", "",
}
/// Cuprate's blockchain directory. /// Joins the [`Network`] to the [`Path`].
/// ///
/// This is the PATH used for any Cuprate blockchain files. /// This will keep the path the same for [`Network::Mainnet`].
/// fn path_with_network(path: &Path, network: Network) -> PathBuf {
/// | OS | PATH | match network {
/// |---------|----------------------------------------------------------------| Network::Mainnet => path.to_path_buf(),
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` | network => path.join(network.to_string()),
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` | }
/// | Linux | `/home/alice/.local/share/cuprate/blockchain/` | }
CUPRATE_BLOCKCHAIN_DIR,
data_dir,
"blockchain",
/// Cuprate's transaction pool directory. /// Cuprate's blockchain directory.
/// ///
/// This is the PATH used for any Cuprate txpool files. /// This is the PATH used for any Cuprate blockchain files.
/// ///
/// | OS | PATH | /// ```rust
/// |---------|------------------------------------------------------------| /// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, blockchain_path}};
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` | ///
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` | /// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("blockchain"));
/// | Linux | `/home/alice/.local/share/cuprate/txpool/` | /// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("blockchain"));
CUPRATE_TXPOOL_DIR, /// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("blockchain"));
data_dir, /// ```
"txpool", pub fn blockchain_path(data_dir: &Path, network: Network) -> PathBuf {
path_with_network(data_dir, network).join("blockchain")
}
/// Cuprate's txpool directory.
///
/// This is the PATH used for any Cuprate txpool files.
///
/// ```rust
/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, txpool_path}};
///
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("txpool"));
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("txpool"));
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("txpool"));
/// ```
pub fn txpool_path(data_dir: &Path, network: Network) -> PathBuf {
path_with_network(data_dir, network).join("txpool")
}
/// Cuprate's address-book directory.
///
/// This is the PATH used for any Cuprate address-book files.
///
/// ```rust
/// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, address_book_path}};
///
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook"));
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook"));
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook"));
/// ```
pub fn address_book_path(cache_dir: &Path, network: Network) -> PathBuf {
path_with_network(cache_dir, network).join("addressbook")
} }
//---------------------------------------------------------------------------------------------------- Tests //---------------------------------------------------------------------------------------------------- Tests
@ -197,29 +234,21 @@ mod test {
(&*CUPRATE_CACHE_DIR, ""), (&*CUPRATE_CACHE_DIR, ""),
(&*CUPRATE_CONFIG_DIR, ""), (&*CUPRATE_CONFIG_DIR, ""),
(&*CUPRATE_DATA_DIR, ""), (&*CUPRATE_DATA_DIR, ""),
(&*CUPRATE_BLOCKCHAIN_DIR, ""),
(&*CUPRATE_TXPOOL_DIR, ""),
]; ];
if cfg!(target_os = "windows") { if cfg!(target_os = "windows") {
array[0].1 = r"AppData\Local\Cuprate"; array[0].1 = r"AppData\Local\Cuprate";
array[1].1 = r"AppData\Roaming\Cuprate"; array[1].1 = r"AppData\Roaming\Cuprate";
array[2].1 = r"AppData\Roaming\Cuprate"; array[2].1 = r"AppData\Roaming\Cuprate";
array[3].1 = r"AppData\Roaming\Cuprate\blockchain";
array[4].1 = r"AppData\Roaming\Cuprate\txpool";
} else if cfg!(target_os = "macos") { } else if cfg!(target_os = "macos") {
array[0].1 = "Library/Caches/Cuprate"; array[0].1 = "Library/Caches/Cuprate";
array[1].1 = "Library/Application Support/Cuprate"; array[1].1 = "Library/Application Support/Cuprate";
array[2].1 = "Library/Application Support/Cuprate"; array[2].1 = "Library/Application Support/Cuprate";
array[3].1 = "Library/Application Support/Cuprate/blockchain";
array[4].1 = "Library/Application Support/Cuprate/txpool";
} else { } else {
// Assumes Linux. // Assumes Linux.
array[0].1 = ".cache/cuprate"; array[0].1 = ".cache/cuprate";
array[1].1 = ".config/cuprate"; array[1].1 = ".config/cuprate";
array[2].1 = ".local/share/cuprate"; array[2].1 = ".local/share/cuprate";
array[3].1 = ".local/share/cuprate/blockchain";
array[4].1 = ".local/share/cuprate/txpool";
}; };
for (path, expected) in array { for (path, expected) in array {

View file

@ -5,6 +5,12 @@
//! into it's own crate. //! into it's own crate.
//! //!
//! `#[no_std]` compatible. //! `#[no_std]` compatible.
// TODO: move to types crate.
use core::{
fmt::{Display, Formatter},
str::FromStr,
};
const MAINNET_NETWORK_ID: [u8; 16] = [ const MAINNET_NETWORK_ID: [u8; 16] = [
0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x10, 0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x10,
@ -17,7 +23,8 @@ const STAGENET_NETWORK_ID: [u8; 16] = [
]; ];
/// An enum representing every Monero network. /// An enum representing every Monero network.
#[derive(Debug, Clone, Copy, Default)] #[derive(Debug, Clone, Copy, Default, Ord, PartialOrd, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub enum Network { pub enum Network {
/// Mainnet /// Mainnet
#[default] #[default]
@ -38,3 +45,28 @@ impl Network {
} }
} }
} }
#[derive(Debug, PartialEq, Eq)]
pub struct ParseNetworkError;
impl FromStr for Network {
type Err = ParseNetworkError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"mainnet" | "Mainnet" => Ok(Self::Mainnet),
"testnet" | "Testnet" => Ok(Self::Testnet),
"stagenet" | "Stagenet" => Ok(Self::Stagenet),
_ => Err(ParseNetworkError),
}
}
}
impl Display for Network {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
f.write_str(match self {
Self::Mainnet => "mainnet",
Self::Testnet => "testnet",
Self::Stagenet => "stagenet",
})
}
}

View file

@ -159,7 +159,7 @@ epee_object!(
current_blockchain_height: u64, current_blockchain_height: u64,
); );
/// A request for Txs we are missing from our `TxPool` /// A request for txs we are missing from an incoming block.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct FluffyMissingTransactionsRequest { pub struct FluffyMissingTransactionsRequest {
/// The Block we are missing the Txs in /// The Block we are missing the Txs in

View file

@ -23,7 +23,7 @@ indexmap = { workspace = true, features = ["std"] }
rand = { workspace = true, features = ["std", "std_rng"] } rand = { workspace = true, features = ["std", "std_rng"] }
borsh = { workspace = true, features = ["derive", "std"]} borsh = { workspace = true, features = ["derive", "std"] }
[dev-dependencies] [dev-dependencies]
cuprate-test-utils = { workspace = true } cuprate-test-utils = { workspace = true }

View file

@ -15,7 +15,7 @@ fn test_cfg() -> AddressBookConfig {
AddressBookConfig { AddressBookConfig {
max_white_list_length: 100, max_white_list_length: 100,
max_gray_list_length: 500, max_gray_list_length: 500,
peer_store_file: PathBuf::new(), peer_store_directory: PathBuf::new(),
peer_save_period: Duration::from_secs(60), peer_save_period: Duration::from_secs(60),
} }
} }

View file

@ -29,8 +29,8 @@ pub struct AddressBookConfig {
/// ///
/// Gray peers are peers we are yet to make a connection to. /// Gray peers are peers we are yet to make a connection to.
pub max_gray_list_length: usize, pub max_gray_list_length: usize,
/// The location to store the address book. /// The location to store the peer store files.
pub peer_store_file: PathBuf, pub peer_store_directory: PathBuf,
/// The amount of time between saving the address book to disk. /// The amount of time between saving the address book to disk.
pub peer_save_period: Duration, pub peer_save_period: Duration,
} }
@ -63,11 +63,6 @@ pub enum AddressBookError {
pub async fn init_address_book<Z: BorshNetworkZone>( pub async fn init_address_book<Z: BorshNetworkZone>(
cfg: AddressBookConfig, cfg: AddressBookConfig,
) -> Result<book::AddressBook<Z>, std::io::Error> { ) -> Result<book::AddressBook<Z>, std::io::Error> {
tracing::info!(
"Loading peers from file: {} ",
cfg.peer_store_file.display()
);
let (white_list, gray_list) = match store::read_peers_from_disk::<Z>(&cfg).await { let (white_list, gray_list) = match store::read_peers_from_disk::<Z>(&cfg).await {
Ok(res) => res, Ok(res) => res,
Err(e) if e.kind() == ErrorKind::NotFound => (vec![], vec![]), Err(e) if e.kind() == ErrorKind::NotFound => (vec![], vec![]),

View file

@ -39,7 +39,9 @@ pub(crate) fn save_peers_to_disk<Z: BorshNetworkZone>(
}) })
.unwrap(); .unwrap();
let file = cfg.peer_store_file.clone(); let file = cfg
.peer_store_directory
.join(format!("{}_p2p_state", Z::NAME));
spawn_blocking(move || fs::write(&file, &data)) spawn_blocking(move || fs::write(&file, &data))
} }
@ -52,7 +54,12 @@ pub(crate) async fn read_peers_from_disk<Z: BorshNetworkZone>(
), ),
std::io::Error, std::io::Error,
> { > {
let file = cfg.peer_store_file.clone(); let file = cfg
.peer_store_directory
.join(format!("{}_p2p_state", Z::NAME));
tracing::info!("Loading peers from file: {} ", file.display());
let data = spawn_blocking(move || fs::read(file)).await.unwrap()?; let data = spawn_blocking(move || fs::read(file)).await.unwrap()?;
let de_ser: DeserPeerDataV1<Z::Addr> = from_slice(&data)?; let de_ser: DeserPeerDataV1<Z::Addr> = from_slice(&data)?;

View file

@ -116,6 +116,7 @@ pub enum ProtocolResponse {
GetChain(ChainResponse), GetChain(ChainResponse),
NewFluffyBlock(NewFluffyBlock), NewFluffyBlock(NewFluffyBlock),
NewTransactions(NewTransactions), NewTransactions(NewTransactions),
FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest),
NA, NA,
} }
@ -139,6 +140,9 @@ impl PeerResponse {
ProtocolResponse::GetChain(_) => MessageID::GetChain, ProtocolResponse::GetChain(_) => MessageID::GetChain,
ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock, ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock,
ProtocolResponse::NewTransactions(_) => MessageID::NewFluffyBlock, ProtocolResponse::NewTransactions(_) => MessageID::NewFluffyBlock,
ProtocolResponse::FluffyMissingTransactionsRequest(_) => {
MessageID::FluffyMissingTxs
}
ProtocolResponse::NA => return None, ProtocolResponse::NA => return None,
}, },

View file

@ -71,6 +71,9 @@ impl TryFrom<ProtocolResponse> for ProtocolMessage {
ProtocolResponse::NewFluffyBlock(val) => Self::NewFluffyBlock(val), ProtocolResponse::NewFluffyBlock(val) => Self::NewFluffyBlock(val),
ProtocolResponse::GetChain(val) => Self::ChainEntryResponse(val), ProtocolResponse::GetChain(val) => Self::ChainEntryResponse(val),
ProtocolResponse::GetObjects(val) => Self::GetObjectsResponse(val), ProtocolResponse::GetObjects(val) => Self::GetObjectsResponse(val),
ProtocolResponse::FluffyMissingTransactionsRequest(val) => {
Self::FluffyMissingTransactionsRequest(val)
}
ProtocolResponse::NA => return Err(MessageConversionError), ProtocolResponse::NA => return Err(MessageConversionError),
}) })
} }

View file

@ -62,15 +62,15 @@ pub struct BlockBatch {
pub struct BlockDownloaderConfig { pub struct BlockDownloaderConfig {
/// The size in bytes of the buffer between the block downloader and the place which /// The size in bytes of the buffer between the block downloader and the place which
/// is consuming the downloaded blocks. /// is consuming the downloaded blocks.
pub buffer_size: usize, pub buffer_bytes: usize,
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks. /// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
pub in_progress_queue_size: usize, pub in_progress_queue_bytes: usize,
/// The [`Duration`] between checking the client pool for free peers. /// The [`Duration`] between checking the client pool for free peers.
pub check_client_pool_interval: Duration, pub check_client_pool_interval: Duration,
/// The target size of a single batch of blocks (in bytes). /// The target size of a single batch of blocks (in bytes).
pub target_batch_size: usize, pub target_batch_bytes: usize,
/// The initial amount of blocks to request (in number of blocks) /// The initial amount of blocks to request (in number of blocks)
pub initial_batch_size: usize, pub initial_batch_len: usize,
} }
/// An error that occurred in the [`BlockDownloader`]. /// An error that occurred in the [`BlockDownloader`].
@ -145,7 +145,7 @@ where
+ 'static, + 'static,
C::Future: Send + 'static, C::Future: Send + 'static,
{ {
let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size); let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_bytes);
let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config); let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config);
@ -242,7 +242,7 @@ where
Self { Self {
peer_set, peer_set,
our_chain_svc, our_chain_svc,
amount_of_blocks_to_request: config.initial_batch_size, amount_of_blocks_to_request: config.initial_batch_len,
amount_of_blocks_to_request_updated_at: 0, amount_of_blocks_to_request_updated_at: 0,
amount_of_empty_chain_entries: 0, amount_of_empty_chain_entries: 0,
block_download_tasks: JoinSet::new(), block_download_tasks: JoinSet::new(),
@ -381,7 +381,7 @@ where
} }
// If our ready queue is too large send duplicate requests for the blocks we are waiting on. // If our ready queue is too large send duplicate requests for the blocks we are waiting on.
if self.block_queue.size() >= self.config.in_progress_queue_size { if self.block_queue.size() >= self.config.in_progress_queue_bytes {
return self.request_inflight_batch_again(client); return self.request_inflight_batch_again(client);
} }
@ -565,7 +565,7 @@ where
self.amount_of_blocks_to_request = calculate_next_block_batch_size( self.amount_of_blocks_to_request = calculate_next_block_batch_size(
block_batch.size, block_batch.size,
block_batch.blocks.len(), block_batch.blocks.len(),
self.config.target_batch_size, self.config.target_batch_bytes,
); );
tracing::debug!( tracing::debug!(

View file

@ -66,11 +66,11 @@ proptest! {
genesis: *blockchain.blocks.first().unwrap().0 genesis: *blockchain.blocks.first().unwrap().0
}, },
BlockDownloaderConfig { BlockDownloaderConfig {
buffer_size: 1_000, buffer_bytes: 1_000,
in_progress_queue_size: 10_000, in_progress_queue_bytes: 10_000,
check_client_pool_interval: Duration::from_secs(5), check_client_pool_interval: Duration::from_secs(5),
target_batch_size: 5_000, target_batch_bytes: 5_000,
initial_batch_size: 1, initial_batch_len: 1,
}); });
let blocks = stream.map(|blocks| blocks.blocks).concat().await; let blocks = stream.map(|blocks| blocks.blocks).concat().await;

View file

@ -52,7 +52,7 @@ pub(crate) const INITIAL_CHAIN_REQUESTS_TO_SEND: usize = 3;
/// The enforced maximum amount of blocks to request in a batch. /// The enforced maximum amount of blocks to request in a batch.
/// ///
/// Requesting more than this will cause the peer to disconnect and potentially lead to bans. /// Requesting more than this will cause the peer to disconnect and potentially lead to bans.
pub(crate) const MAX_BLOCK_BATCH_LEN: usize = 100; pub const MAX_BLOCK_BATCH_LEN: usize = 100;
/// The timeout that the block downloader will use for requests. /// The timeout that the block downloader will use for requests.
pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_secs(30); pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
@ -61,13 +61,13 @@ pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_sec
/// be less than. /// be less than.
/// ///
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size> /// ref: <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size>
pub(crate) const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000; pub const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000;
/// The maximum amount of block IDs allowed in a chain entry response. /// The maximum amount of block IDs allowed in a chain entry response.
/// ///
/// ref: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/cryptonote_config.h#L97> /// ref: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/cryptonote_config.h#L97>
// TODO: link to the protocol book when this section is added. // TODO: link to the protocol book when this section is added.
pub(crate) const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000; pub const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000;
/// The amount of failures downloading a specific batch before we stop attempting to download it. /// The amount of failures downloading a specific batch before we stop attempting to download it.
pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5; pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5;

View file

@ -15,7 +15,7 @@ default = ["heed"]
heed = ["cuprate-database/heed"] heed = ["cuprate-database/heed"]
redb = ["cuprate-database/redb"] redb = ["cuprate-database/redb"]
redb-memory = ["cuprate-database/redb-memory"] redb-memory = ["cuprate-database/redb-memory"]
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"]
[dependencies] [dependencies]
cuprate-database = { workspace = true } cuprate-database = { workspace = true }
@ -34,6 +34,7 @@ serde = { workspace = true, optional = true }
tower = { workspace = true } tower = { workspace = true }
thread_local = { workspace = true } thread_local = { workspace = true }
rayon = { workspace = true } rayon = { workspace = true }
bytes = { workspace = true }
[dev-dependencies] [dev-dependencies]
cuprate-constants = { workspace = true } cuprate-constants = { workspace = true }

View file

@ -76,7 +76,7 @@ use cuprate_blockchain::{
let tmp_dir = tempfile::tempdir()?; let tmp_dir = tempfile::tempdir()?;
let db_dir = tmp_dir.path().to_owned(); let db_dir = tmp_dir.path().to_owned();
let config = ConfigBuilder::new() let config = ConfigBuilder::new()
.db_directory(db_dir.into()) .data_directory(db_dir.into())
.build(); .build();
// Initialize the database environment. // Initialize the database environment.

View file

@ -25,7 +25,7 @@
//! //!
//! let config = ConfigBuilder::new() //! let config = ConfigBuilder::new()
//! // Use a custom database directory. //! // Use a custom database directory.
//! .db_directory(db_dir.into()) //! .data_directory(db_dir.into())
//! // Use as many reader threads as possible (when using `service`). //! // Use as many reader threads as possible (when using `service`).
//! .reader_threads(ReaderThreads::OnePerThread) //! .reader_threads(ReaderThreads::OnePerThread)
//! // Use the fastest sync mode. //! // Use the fastest sync mode.
@ -41,13 +41,16 @@
//! ``` //! ```
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::{borrow::Cow, path::Path}; use std::{borrow::Cow, path::PathBuf};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm}; use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm};
use cuprate_helper::fs::CUPRATE_BLOCKCHAIN_DIR; use cuprate_helper::{
fs::{blockchain_path, CUPRATE_DATA_DIR},
network::Network,
};
// re-exports // re-exports
pub use cuprate_database_service::ReaderThreads; pub use cuprate_database_service::ReaderThreads;
@ -59,8 +62,9 @@ pub use cuprate_database_service::ReaderThreads;
#[derive(Debug, Clone, PartialEq, PartialOrd)] #[derive(Debug, Clone, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ConfigBuilder { pub struct ConfigBuilder {
/// [`Config::db_directory`]. network: Network,
db_directory: Option<Cow<'static, Path>>,
data_dir: Option<PathBuf>,
/// [`Config::cuprate_database_config`]. /// [`Config::cuprate_database_config`].
db_config: cuprate_database::config::ConfigBuilder, db_config: cuprate_database::config::ConfigBuilder,
@ -76,10 +80,12 @@ impl ConfigBuilder {
/// after this function to use default values. /// after this function to use default values.
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
db_directory: None, network: Network::default(),
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( data_dir: None,
&*CUPRATE_BLOCKCHAIN_DIR, db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path(
)), &CUPRATE_DATA_DIR,
Network::Mainnet,
))),
reader_threads: None, reader_threads: None,
} }
} }
@ -87,21 +93,21 @@ impl ConfigBuilder {
/// Build into a [`Config`]. /// Build into a [`Config`].
/// ///
/// # Default values /// # Default values
/// If [`ConfigBuilder::db_directory`] was not called, /// If [`ConfigBuilder::data_directory`] was not called,
/// the default [`CUPRATE_BLOCKCHAIN_DIR`] will be used. /// [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`] will be used.
/// ///
/// For all other values, [`Default::default`] is used. /// For all other values, [`Default::default`] is used.
pub fn build(self) -> Config { pub fn build(self) -> Config {
// INVARIANT: all PATH safety checks are done // INVARIANT: all PATH safety checks are done
// in `helper::fs`. No need to do them here. // in `helper::fs`. No need to do them here.
let db_directory = self let data_dir = self
.db_directory .data_dir
.unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)); .unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf());
let reader_threads = self.reader_threads.unwrap_or_default(); let reader_threads = self.reader_threads.unwrap_or_default();
let db_config = self let db_config = self
.db_config .db_config
.db_directory(db_directory) .db_directory(Cow::Owned(blockchain_path(&data_dir, self.network)))
.reader_threads(reader_threads.as_threads()) .reader_threads(reader_threads.as_threads())
.build(); .build();
@ -111,10 +117,17 @@ impl ConfigBuilder {
} }
} }
/// Set a custom database directory (and file) [`Path`]. /// Change the network this blockchain database is for.
#[must_use] #[must_use]
pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { pub const fn network(mut self, network: Network) -> Self {
self.db_directory = Some(db_directory); self.network = network;
self
}
/// Set a custom database directory (and file) [`PathBuf`].
#[must_use]
pub fn data_directory(mut self, db_directory: PathBuf) -> Self {
self.data_dir = Some(db_directory);
self self
} }
@ -145,9 +158,7 @@ impl ConfigBuilder {
/// Good default for testing, and resource-available machines. /// Good default for testing, and resource-available machines.
#[must_use] #[must_use]
pub fn fast(mut self) -> Self { pub fn fast(mut self) -> Self {
self.db_config = self.db_config = self.db_config.fast();
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR))
.fast();
self.reader_threads = Some(ReaderThreads::OnePerThread); self.reader_threads = Some(ReaderThreads::OnePerThread);
self self
@ -159,9 +170,7 @@ impl ConfigBuilder {
/// Good default for resource-limited machines, e.g. a cheap VPS. /// Good default for resource-limited machines, e.g. a cheap VPS.
#[must_use] #[must_use]
pub fn low_power(mut self) -> Self { pub fn low_power(mut self) -> Self {
self.db_config = self.db_config = self.db_config.low_power();
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR))
.low_power();
self.reader_threads = Some(ReaderThreads::One); self.reader_threads = Some(ReaderThreads::One);
self self
@ -170,10 +179,13 @@ impl ConfigBuilder {
impl Default for ConfigBuilder { impl Default for ConfigBuilder {
fn default() -> Self { fn default() -> Self {
let db_directory = Cow::Borrowed(&**CUPRATE_BLOCKCHAIN_DIR);
Self { Self {
db_directory: Some(db_directory.clone()), network: Network::default(),
db_config: cuprate_database::config::ConfigBuilder::new(db_directory), data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()),
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path(
&CUPRATE_DATA_DIR,
Network::default(),
))),
reader_threads: Some(ReaderThreads::default()), reader_threads: Some(ReaderThreads::default()),
} }
} }
@ -201,7 +213,7 @@ impl Config {
/// Create a new [`Config`] with sane default settings. /// Create a new [`Config`] with sane default settings.
/// ///
/// The [`cuprate_database::config::Config::db_directory`] /// The [`cuprate_database::config::Config::db_directory`]
/// will be set to [`CUPRATE_BLOCKCHAIN_DIR`]. /// will be set to [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`].
/// ///
/// All other values will be [`Default::default`]. /// All other values will be [`Default::default`].
/// ///
@ -213,14 +225,14 @@ impl Config {
/// resize::ResizeAlgorithm, /// resize::ResizeAlgorithm,
/// DATABASE_DATA_FILENAME, /// DATABASE_DATA_FILENAME,
/// }; /// };
/// use cuprate_helper::fs::*; /// use cuprate_helper::{fs::*, network::Network};
/// ///
/// use cuprate_blockchain::config::*; /// use cuprate_blockchain::config::*;
/// ///
/// let config = Config::new(); /// let config = Config::new();
/// ///
/// assert_eq!(config.db_config.db_directory(), &*CUPRATE_BLOCKCHAIN_DIR); /// assert_eq!(config.db_config.db_directory().as_ref(), blockchain_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path());
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_BLOCKCHAIN_DIR)); /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR));
/// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME));
/// assert_eq!(config.db_config.sync_mode, SyncMode::default()); /// assert_eq!(config.db_config.sync_mode, SyncMode::default());
/// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default());

View file

@ -2,21 +2,23 @@
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use bytemuck::TransparentWrapper; use bytemuck::TransparentWrapper;
use bytes::Bytes;
use monero_serai::{ use monero_serai::{
block::{Block, BlockHeader}, block::{Block, BlockHeader},
transaction::Transaction, transaction::Transaction,
}; };
use cuprate_database::{ use cuprate_database::{
DbResult, RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, DbResult, RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw},
}; };
use cuprate_helper::cast::usize_to_u64;
use cuprate_helper::{ use cuprate_helper::{
map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits},
tx::tx_fee, tx::tx_fee,
}; };
use cuprate_types::{ use cuprate_types::{
AltBlockInformation, ChainId, ExtendedBlockHeader, HardFork, VerifiedBlockInformation, AltBlockInformation, BlockCompleteEntry, ChainId, ExtendedBlockHeader, HardFork,
VerifiedTransactionInformation, TransactionBlobs, VerifiedBlockInformation, VerifiedTransactionInformation,
}; };
use crate::{ use crate::{
@ -27,7 +29,7 @@ use crate::{
output::get_rct_num_outputs, output::get_rct_num_outputs,
tx::{add_tx, remove_tx}, tx::{add_tx, remove_tx},
}, },
tables::{BlockHeights, BlockInfos, Tables, TablesMut}, tables::{BlockHeights, BlockInfos, Tables, TablesIter, TablesMut},
types::{BlockHash, BlockHeight, BlockInfo}, types::{BlockHash, BlockHeight, BlockInfo},
}; };
@ -222,6 +224,64 @@ pub fn pop_block(
Ok((block_height, block_info.block_hash, block)) Ok((block_height, block_info.block_hash, block))
} }
//---------------------------------------------------------------------------------------------------- `get_block_blob_with_tx_indexes`
/// Retrieve a block's raw bytes, the index of the miner transaction and the number of non miner-txs in the block.
///
#[doc = doc_error!()]
pub fn get_block_blob_with_tx_indexes(
block_height: &BlockHeight,
tables: &impl Tables,
) -> Result<(Vec<u8>, u64, usize), RuntimeError> {
let miner_tx_idx = tables.block_infos().get(block_height)?.mining_tx_index;
let block_txs = tables.block_txs_hashes().get(block_height)?.0;
let numb_txs = block_txs.len();
// Get the block header
let mut block = tables.block_header_blobs().get(block_height)?.0;
// Add the miner tx to the blob.
let mut miner_tx_blob = tables.tx_blobs().get(&miner_tx_idx)?.0;
block.append(&mut miner_tx_blob);
// Add the blocks tx hashes.
monero_serai::io::write_varint(&block_txs.len(), &mut block)
.expect("The number of txs per block will not exceed u64::MAX");
let block_txs_bytes = bytemuck::must_cast_slice(&block_txs);
block.extend_from_slice(block_txs_bytes);
Ok((block, miner_tx_idx, numb_txs))
}
//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*`
/// Retrieve a [`BlockCompleteEntry`] from the database.
///
#[doc = doc_error!()]
pub fn get_block_complete_entry(
block_hash: &BlockHash,
tables: &impl TablesIter,
) -> Result<BlockCompleteEntry, RuntimeError> {
let block_height = tables.block_heights().get(block_hash)?;
let (block_blob, miner_tx_idx, numb_non_miner_txs) =
get_block_blob_with_tx_indexes(&block_height, tables)?;
let first_tx_idx = miner_tx_idx + 1;
let tx_blobs = tables
.tx_blobs_iter()
.get_range(first_tx_idx..(usize_to_u64(numb_non_miner_txs) + first_tx_idx))?
.map(|tx_blob| Ok(Bytes::from(tx_blob?.0)))
.collect::<Result<_, RuntimeError>>()?;
Ok(BlockCompleteEntry {
block: Bytes::from(block_blob),
txs: TransactionBlobs::Normal(tx_blobs),
pruned: false,
block_weight: 0,
})
}
//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*` //---------------------------------------------------------------------------------------------------- `get_block_extended_header_*`
/// Retrieve a [`ExtendedBlockHeader`] from the database. /// Retrieve a [`ExtendedBlockHeader`] from the database.
/// ///

View file

@ -4,9 +4,9 @@
use cuprate_database::{DatabaseRo, DbResult, RuntimeError}; use cuprate_database::{DatabaseRo, DbResult, RuntimeError};
use crate::{ use crate::{
ops::macros::doc_error, ops::{block::block_exists, macros::doc_error},
tables::{BlockHeights, BlockInfos}, tables::{BlockHeights, BlockInfos},
types::BlockHeight, types::{BlockHash, BlockHeight},
}; };
//---------------------------------------------------------------------------------------------------- Free Functions //---------------------------------------------------------------------------------------------------- Free Functions
@ -76,6 +76,44 @@ pub fn cumulative_generated_coins(
} }
} }
/// Find the split point between our chain and a list of [`BlockHash`]s from another chain.
///
/// This function accepts chains in chronological and reverse chronological order, however
/// if the wrong order is specified the return value is meaningless.
///
/// For chronologically ordered chains this will return the index of the first unknown, for reverse
/// chronologically ordered chains this will return the index of the first known.
///
/// If all blocks are known for chronologically ordered chains or unknown for reverse chronologically
/// ordered chains then the length of the chain will be returned.
#[doc = doc_error!()]
#[inline]
pub fn find_split_point(
block_ids: &[BlockHash],
chronological_order: bool,
table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<usize, RuntimeError> {
let mut err = None;
// Do a binary search to find the first unknown/known block in the batch.
let idx = block_ids.partition_point(|block_id| {
match block_exists(block_id, table_block_heights) {
Ok(exists) => exists == chronological_order,
Err(e) => {
err.get_or_insert(e);
// if this happens the search is scrapped, just return `false` back.
false
}
}
});
if let Some(e) = err {
return Err(e);
}
Ok(idx)
}
//---------------------------------------------------------------------------------------------------- Tests //---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)] #[cfg(test)]
mod test { mod test {

View file

@ -71,7 +71,7 @@
//! let tmp_dir = tempfile::tempdir()?; //! let tmp_dir = tempfile::tempdir()?;
//! let db_dir = tmp_dir.path().to_owned(); //! let db_dir = tmp_dir.path().to_owned();
//! let config = ConfigBuilder::new() //! let config = ConfigBuilder::new()
//! .db_directory(db_dir.into()) //! .data_directory(db_dir.into())
//! .build(); //! .build();
//! //!
//! // Initialize the database environment. //! // Initialize the database environment.

View file

@ -77,7 +77,7 @@
//! let tmp_dir = tempfile::tempdir()?; //! let tmp_dir = tempfile::tempdir()?;
//! let db_dir = tmp_dir.path().to_owned(); //! let db_dir = tmp_dir.path().to_owned();
//! let config = ConfigBuilder::new() //! let config = ConfigBuilder::new()
//! .db_directory(db_dir.into()) //! .data_directory(db_dir.into())
//! .build(); //! .build();
//! //!
//! // Initialize the database thread-pool. //! // Initialize the database thread-pool.

View file

@ -10,23 +10,26 @@
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::{ use std::{
cmp::min,
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
sync::Arc, sync::Arc,
}; };
use rayon::{ use rayon::{
iter::{IntoParallelIterator, ParallelIterator}, iter::{Either, IntoParallelIterator, ParallelIterator},
prelude::*, prelude::*,
ThreadPool, ThreadPool,
}; };
use thread_local::ThreadLocal; use thread_local::ThreadLocal;
use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError}; use cuprate_database::{
ConcreteEnv, DatabaseIter, DatabaseRo, DbResult, Env, EnvInner, RuntimeError,
};
use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads};
use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_helper::map::combine_low_high_bits_to_u128;
use cuprate_types::{ use cuprate_types::{
blockchain::{BlockchainReadRequest, BlockchainResponse}, blockchain::{BlockchainReadRequest, BlockchainResponse},
Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, TxsInBlock,
}; };
use crate::{ use crate::{
@ -36,9 +39,10 @@ use crate::{
get_alt_chain_history_ranges, get_alt_chain_history_ranges,
}, },
block::{ block::{
block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, block_exists, get_block_blob_with_tx_indexes, get_block_complete_entry,
get_block_extended_header_from_height, get_block_height, get_block_info,
}, },
blockchain::{cumulative_generated_coins, top_block_height}, blockchain::{cumulative_generated_coins, find_split_point, top_block_height},
key_image::key_image_exists, key_image::key_image_exists,
output::id_to_output_on_chain, output::id_to_output_on_chain,
}, },
@ -46,7 +50,7 @@ use crate::{
free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, free::{compact_history_genesis_not_included, compact_history_index_to_height_offset},
types::{BlockchainReadHandle, ResponseResult}, types::{BlockchainReadHandle, ResponseResult},
}, },
tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables}, tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables, TablesIter},
types::{ types::{
AltBlockHeight, Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId, AltBlockHeight, Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId,
}, },
@ -100,6 +104,7 @@ fn map_request(
/* SOMEDAY: pre-request handling, run some code for each request? */ /* SOMEDAY: pre-request handling, run some code for each request? */
match request { match request {
R::BlockCompleteEntries(block_hashes) => block_complete_entries(env, block_hashes),
R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockExtendedHeader(block) => block_extended_header(env, block),
R::BlockHash(block, chain) => block_hash(env, block, chain), R::BlockHash(block, chain) => block_hash(env, block, chain),
R::FindBlock(block_hash) => find_block(env, block_hash), R::FindBlock(block_hash) => find_block(env, block_hash),
@ -113,7 +118,12 @@ fn map_request(
R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec),
R::KeyImagesSpent(set) => key_images_spent(env, set), R::KeyImagesSpent(set) => key_images_spent(env, set),
R::CompactChainHistory => compact_chain_history(env), R::CompactChainHistory => compact_chain_history(env),
R::NextChainEntry(block_hashes, amount) => next_chain_entry(env, &block_hashes, amount),
R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids),
R::TxsInBlock {
block_hash,
tx_indexes,
} => txs_in_block(env, block_hash, tx_indexes),
R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id), R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id),
R::Block { height } => block(env, height), R::Block { height } => block(env, height),
R::BlockByHash(hash) => block_by_hash(env, hash), R::BlockByHash(hash) => block_by_hash(env, hash),
@ -198,6 +208,38 @@ macro_rules! get_tables {
// TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal // TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal
// amount of parallelism. // amount of parallelism.
/// [`BlockchainReadRequest::BlockCompleteEntries`].
fn block_complete_entries(env: &ConcreteEnv, block_hashes: Vec<BlockHash>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`.
let env_inner = env.env_inner();
let tx_ro = thread_local(env);
let tables = thread_local(env);
let (missing_hashes, blocks) = block_hashes
.into_par_iter()
.map(|block_hash| {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
match get_block_complete_entry(&block_hash, tables) {
Err(RuntimeError::KeyNotFound) => Ok(Either::Left(block_hash)),
res => res.map(Either::Right),
}
})
.collect::<DbResult<_>>()?;
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
let blockchain_height = crate::ops::blockchain::chain_height(tables.block_heights())?;
Ok(BlockchainResponse::BlockCompleteEntries {
blocks,
missing_hashes,
blockchain_height,
})
}
/// [`BlockchainReadRequest::BlockExtendedHeader`]. /// [`BlockchainReadRequest::BlockExtendedHeader`].
#[inline] #[inline]
fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
@ -335,7 +377,7 @@ fn block_extended_header_in_range(
} }
}) })
}) })
.collect::<Result<Vec<_>, _>>()? .collect::<DbResult<Vec<_>>>()?
} }
}; };
@ -534,6 +576,76 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult {
}) })
} }
/// [`BlockchainReadRequest::NextChainEntry`]
///
/// # Invariant
/// `block_ids` must be sorted in reverse chronological block order, or else
/// the returned result is unspecified and meaningless, as this function
/// performs a binary search.
fn next_chain_entry(
env: &ConcreteEnv,
block_ids: &[BlockHash],
next_entry_size: usize,
) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let tables = env_inner.open_tables(&tx_ro)?;
let table_block_heights = tables.block_heights();
let table_block_infos = tables.block_infos_iter();
let idx = find_split_point(block_ids, false, table_block_heights)?;
// This will happen if we have a different genesis block.
if idx == block_ids.len() {
return Ok(BlockchainResponse::NextChainEntry {
start_height: None,
chain_height: 0,
block_ids: vec![],
block_weights: vec![],
cumulative_difficulty: 0,
first_block_blob: None,
});
}
// The returned chain entry must overlap with one of the blocks we were told about.
let first_known_block_hash = block_ids[idx];
let first_known_height = table_block_heights.get(&first_known_block_hash)?;
let chain_height = crate::ops::blockchain::chain_height(table_block_heights)?;
let last_height_in_chain_entry = min(first_known_height + next_entry_size, chain_height);
let (block_ids, block_weights) = table_block_infos
.get_range(first_known_height..last_height_in_chain_entry)?
.map(|block_info| {
let block_info = block_info?;
Ok((block_info.block_hash, block_info.weight))
})
.collect::<DbResult<(Vec<_>, Vec<_>)>>()?;
let top_block_info = table_block_infos.get(&(chain_height - 1))?;
let first_block_blob = if block_ids.len() >= 2 {
Some(get_block_blob_with_tx_indexes(&(first_known_height + 1), &tables)?.0)
} else {
None
};
Ok(BlockchainResponse::NextChainEntry {
start_height: std::num::NonZero::new(first_known_height),
chain_height,
block_ids,
block_weights,
cumulative_difficulty: combine_low_high_bits_to_u128(
top_block_info.cumulative_difficulty_low,
top_block_info.cumulative_difficulty_high,
),
first_block_blob,
})
}
/// [`BlockchainReadRequest::FindFirstUnknown`] /// [`BlockchainReadRequest::FindFirstUnknown`]
/// ///
/// # Invariant /// # Invariant
@ -546,24 +658,7 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?; let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
let mut err = None; let idx = find_split_point(block_ids, true, &table_block_heights)?;
// Do a binary search to find the first unknown block in the batch.
let idx =
block_ids.partition_point(
|block_id| match block_exists(block_id, &table_block_heights) {
Ok(exists) => exists,
Err(e) => {
err.get_or_insert(e);
// if this happens the search is scrapped, just return `false` back.
false
}
},
);
if let Some(e) = err {
return Err(e);
}
Ok(if idx == block_ids.len() { Ok(if idx == block_ids.len() {
BlockchainResponse::FindFirstUnknown(None) BlockchainResponse::FindFirstUnknown(None)
@ -576,6 +671,33 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
}) })
} }
/// [`BlockchainReadRequest::TxsInBlock`]
fn txs_in_block(env: &ConcreteEnv, block_hash: [u8; 32], missing_txs: Vec<u64>) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let tables = env_inner.open_tables(&tx_ro)?;
let block_height = tables.block_heights().get(&block_hash)?;
let (block, miner_tx_index, numb_txs) = get_block_blob_with_tx_indexes(&block_height, &tables)?;
let first_tx_index = miner_tx_index + 1;
if numb_txs < missing_txs.len() {
return Ok(BlockchainResponse::TxsInBlock(None));
}
let txs = missing_txs
.into_iter()
.map(|index_offset| Ok(tables.tx_blobs().get(&(first_tx_index + index_offset))?.0))
.collect::<DbResult<_>>()?;
Ok(BlockchainResponse::TxsInBlock(Some(TxsInBlock {
block,
txs,
})))
}
/// [`BlockchainReadRequest::AltBlocksInChain`] /// [`BlockchainReadRequest::AltBlocksInChain`]
fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`. // Prepare tx/tables in `ThreadLocal`.
@ -613,7 +735,7 @@ fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult {
) )
}) })
}) })
.collect::<Result<_, _>>()?; .collect::<DbResult<_>>()?;
Ok(BlockchainResponse::AltBlocksInChain(blocks)) Ok(BlockchainResponse::AltBlocksInChain(blocks))
} }

View file

@ -7,7 +7,6 @@
//---------------------------------------------------------------------------------------------------- Use //---------------------------------------------------------------------------------------------------- Use
use std::{ use std::{
borrow::Cow,
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
sync::Arc, sync::Arc,
}; };
@ -46,7 +45,7 @@ fn init_service() -> (
) { ) {
let tempdir = tempfile::tempdir().unwrap(); let tempdir = tempfile::tempdir().unwrap();
let config = ConfigBuilder::new() let config = ConfigBuilder::new()
.db_directory(Cow::Owned(tempdir.path().into())) .data_directory(tempdir.path().into())
.low_power() .low_power()
.build(); .build();
let (reader, writer, env) = init(config).unwrap(); let (reader, writer, env) = init(config).unwrap();

View file

@ -5,7 +5,7 @@
//! - only used internally //! - only used internally
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::{borrow::Cow, fmt::Debug}; use std::fmt::Debug;
use pretty_assertions::assert_eq; use pretty_assertions::assert_eq;
@ -74,7 +74,7 @@ impl AssertTableLen {
pub(crate) fn tmp_concrete_env() -> (impl Env, tempfile::TempDir) { pub(crate) fn tmp_concrete_env() -> (impl Env, tempfile::TempDir) {
let tempdir = tempfile::tempdir().unwrap(); let tempdir = tempfile::tempdir().unwrap();
let config = ConfigBuilder::new() let config = ConfigBuilder::new()
.db_directory(Cow::Owned(tempdir.path().into())) .data_directory(tempdir.path().into())
.low_power() .low_power()
.build(); .build();
let env = crate::open(config).unwrap(); let env = crate::open(config).unwrap();

View file

@ -15,7 +15,7 @@ default = ["heed"]
heed = ["cuprate-database/heed"] heed = ["cuprate-database/heed"]
redb = ["cuprate-database/redb"] redb = ["cuprate-database/redb"]
redb-memory = ["cuprate-database/redb-memory"] redb-memory = ["cuprate-database/redb-memory"]
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"]
[dependencies] [dependencies]
cuprate-database = { workspace = true, features = ["heed"] } cuprate-database = { workspace = true, features = ["heed"] }

View file

@ -78,7 +78,7 @@ use cuprate_txpool::{
let tmp_dir = tempfile::tempdir()?; let tmp_dir = tempfile::tempdir()?;
let db_dir = tmp_dir.path().to_owned(); let db_dir = tmp_dir.path().to_owned();
let config = ConfigBuilder::new() let config = ConfigBuilder::new()
.db_directory(db_dir.into()) .data_directory(db_dir.into())
.build(); .build();
// Initialize the database environment. // Initialize the database environment.

View file

@ -1,15 +1,18 @@
//! The transaction pool [`Config`]. //! The transaction pool [`Config`].
use std::{borrow::Cow, path::Path}; use std::{borrow::Cow, path::PathBuf};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use cuprate_database::{ use cuprate_database::{
config::{Config as DbConfig, SyncMode}, config::{Config as DbConfig, SyncMode},
resize::ResizeAlgorithm, resize::ResizeAlgorithm,
}; };
use cuprate_database_service::ReaderThreads; use cuprate_database_service::ReaderThreads;
use cuprate_helper::fs::CUPRATE_TXPOOL_DIR; use cuprate_helper::{
fs::{txpool_path, CUPRATE_DATA_DIR},
#[cfg(feature = "serde")] network::Network,
use serde::{Deserialize, Serialize}; };
/// The default transaction pool weight limit. /// The default transaction pool weight limit.
const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024; const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024;
@ -21,8 +24,9 @@ const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024;
#[derive(Debug, Clone, PartialEq, PartialOrd)] #[derive(Debug, Clone, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ConfigBuilder { pub struct ConfigBuilder {
/// [`Config::db_directory`]. network: Network,
db_directory: Option<Cow<'static, Path>>,
data_dir: Option<PathBuf>,
/// [`Config::cuprate_database_config`]. /// [`Config::cuprate_database_config`].
db_config: cuprate_database::config::ConfigBuilder, db_config: cuprate_database::config::ConfigBuilder,
@ -41,10 +45,12 @@ impl ConfigBuilder {
/// after this function to use default values. /// after this function to use default values.
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
db_directory: None, network: Network::default(),
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( data_dir: None,
&*CUPRATE_TXPOOL_DIR, db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path(
)), &CUPRATE_DATA_DIR,
Network::Mainnet,
))),
reader_threads: None, reader_threads: None,
max_txpool_weight: None, max_txpool_weight: None,
} }
@ -53,16 +59,16 @@ impl ConfigBuilder {
/// Build into a [`Config`]. /// Build into a [`Config`].
/// ///
/// # Default values /// # Default values
/// If [`ConfigBuilder::db_directory`] was not called, /// If [`ConfigBuilder::data_directory`] was not called,
/// the default [`CUPRATE_TXPOOL_DIR`] will be used. /// [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`] will be used.
/// ///
/// For all other values, [`Default::default`] is used. /// For all other values, [`Default::default`] is used.
pub fn build(self) -> Config { pub fn build(self) -> Config {
// INVARIANT: all PATH safety checks are done // INVARIANT: all PATH safety checks are done
// in `helper::fs`. No need to do them here. // in `helper::fs`. No need to do them here.
let db_directory = self let data_dir = self
.db_directory .data_dir
.unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)); .unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf());
let reader_threads = self.reader_threads.unwrap_or_default(); let reader_threads = self.reader_threads.unwrap_or_default();
@ -72,7 +78,7 @@ impl ConfigBuilder {
let db_config = self let db_config = self
.db_config .db_config
.db_directory(db_directory) .db_directory(Cow::Owned(txpool_path(&data_dir, self.network)))
.reader_threads(reader_threads.as_threads()) .reader_threads(reader_threads.as_threads())
.build(); .build();
@ -83,6 +89,13 @@ impl ConfigBuilder {
} }
} }
/// Change the network this database is for.
#[must_use]
pub const fn network(mut self, network: Network) -> Self {
self.network = network;
self
}
/// Sets a new maximum weight for the transaction pool. /// Sets a new maximum weight for the transaction pool.
#[must_use] #[must_use]
pub const fn max_txpool_weight(mut self, max_txpool_weight: usize) -> Self { pub const fn max_txpool_weight(mut self, max_txpool_weight: usize) -> Self {
@ -90,10 +103,10 @@ impl ConfigBuilder {
self self
} }
/// Set a custom database directory (and file) [`Path`]. /// Set a custom data directory [`PathBuf`].
#[must_use] #[must_use]
pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { pub fn data_directory(mut self, db_directory: PathBuf) -> Self {
self.db_directory = Some(db_directory); self.data_dir = Some(db_directory);
self self
} }
@ -124,9 +137,7 @@ impl ConfigBuilder {
/// Good default for testing, and resource-available machines. /// Good default for testing, and resource-available machines.
#[must_use] #[must_use]
pub fn fast(mut self) -> Self { pub fn fast(mut self) -> Self {
self.db_config = self.db_config = self.db_config.fast();
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR))
.fast();
self.reader_threads = Some(ReaderThreads::OnePerThread); self.reader_threads = Some(ReaderThreads::OnePerThread);
self self
@ -138,9 +149,7 @@ impl ConfigBuilder {
/// Good default for resource-limited machines, e.g. a cheap VPS. /// Good default for resource-limited machines, e.g. a cheap VPS.
#[must_use] #[must_use]
pub fn low_power(mut self) -> Self { pub fn low_power(mut self) -> Self {
self.db_config = self.db_config = self.db_config.low_power();
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR))
.low_power();
self.reader_threads = Some(ReaderThreads::One); self.reader_threads = Some(ReaderThreads::One);
self self
@ -149,10 +158,13 @@ impl ConfigBuilder {
impl Default for ConfigBuilder { impl Default for ConfigBuilder {
fn default() -> Self { fn default() -> Self {
let db_directory = Cow::Borrowed(CUPRATE_TXPOOL_DIR.as_path());
Self { Self {
db_directory: Some(db_directory.clone()), network: Network::default(),
db_config: cuprate_database::config::ConfigBuilder::new(db_directory), data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()),
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path(
&CUPRATE_DATA_DIR,
Network::Mainnet,
))),
reader_threads: Some(ReaderThreads::default()), reader_threads: Some(ReaderThreads::default()),
max_txpool_weight: Some(DEFAULT_TXPOOL_WEIGHT_LIMIT), max_txpool_weight: Some(DEFAULT_TXPOOL_WEIGHT_LIMIT),
} }
@ -184,7 +196,7 @@ impl Config {
/// Create a new [`Config`] with sane default settings. /// Create a new [`Config`] with sane default settings.
/// ///
/// The [`DbConfig::db_directory`] /// The [`DbConfig::db_directory`]
/// will be set to [`CUPRATE_TXPOOL_DIR`]. /// will be set to [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`].
/// ///
/// All other values will be [`Default::default`]. /// All other values will be [`Default::default`].
/// ///
@ -197,25 +209,21 @@ impl Config {
/// DATABASE_DATA_FILENAME, /// DATABASE_DATA_FILENAME,
/// }; /// };
/// use cuprate_database_service::ReaderThreads; /// use cuprate_database_service::ReaderThreads;
/// use cuprate_helper::fs::*; /// use cuprate_helper::{fs::*, network::Network};
/// ///
/// use cuprate_txpool::Config; /// use cuprate_txpool::Config;
/// ///
/// let config = Config::new(); /// let config = Config::new();
/// ///
/// assert_eq!(config.db_config.db_directory(), &*CUPRATE_TXPOOL_DIR); /// assert_eq!(config.db_config.db_directory(), txpool_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path());
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_TXPOOL_DIR)); /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR));
/// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME));
/// assert_eq!(config.db_config.sync_mode, SyncMode::default()); /// assert_eq!(config.db_config.sync_mode, SyncMode::default());
/// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default());
/// assert_eq!(config.reader_threads, ReaderThreads::default()); /// assert_eq!(config.reader_threads, ReaderThreads::default());
/// ``` /// ```
pub fn new() -> Self { pub fn new() -> Self {
Self { ConfigBuilder::new().build()
db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)),
reader_threads: ReaderThreads::default(),
max_txpool_weight: 0,
}
} }
} }

View file

@ -51,7 +51,7 @@
//! let tmp_dir = tempfile::tempdir()?; //! let tmp_dir = tempfile::tempdir()?;
//! let db_dir = tmp_dir.path().to_owned(); //! let db_dir = tmp_dir.path().to_owned();
//! let config = ConfigBuilder::new() //! let config = ConfigBuilder::new()
//! .db_directory(db_dir.into()) //! .data_directory(db_dir.into())
//! .build(); //! .build();
//! //!
//! // Initialize the database environment. //! // Initialize the database environment.

View file

@ -83,7 +83,7 @@
//! let tmp_dir = tempfile::tempdir()?; //! let tmp_dir = tempfile::tempdir()?;
//! let db_dir = tmp_dir.path().to_owned(); //! let db_dir = tmp_dir.path().to_owned();
//! let config = ConfigBuilder::new() //! let config = ConfigBuilder::new()
//! .db_directory(db_dir.into()) //! .data_directory(db_dir.into())
//! .build(); //! .build();
//! //!
//! // Initialize the database thread-pool. //! // Initialize the database thread-pool.

View file

@ -11,9 +11,9 @@ use std::{
use monero_serai::block::Block; use monero_serai::block::Block;
use crate::{ use crate::{
types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}, types::{Chain, ExtendedBlockHeader, OutputOnChain, TxsInBlock, VerifiedBlockInformation},
AltBlockInformation, ChainId, ChainInfo, CoinbaseTxSum, OutputHistogramEntry, AltBlockInformation, BlockCompleteEntry, ChainId, ChainInfo, CoinbaseTxSum,
OutputHistogramInput, OutputHistogramEntry, OutputHistogramInput,
}; };
//---------------------------------------------------------------------------------------------------- ReadRequest //---------------------------------------------------------------------------------------------------- ReadRequest
@ -27,6 +27,11 @@ use crate::{
/// See `Response` for the expected responses per `Request`. /// See `Response` for the expected responses per `Request`.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum BlockchainReadRequest { pub enum BlockchainReadRequest {
/// Request [`BlockCompleteEntry`]s.
///
/// The input is the block hashes.
BlockCompleteEntries(Vec<[u8; 32]>),
/// Request a block's extended header. /// Request a block's extended header.
/// ///
/// The input is the block's height. /// The input is the block's height.
@ -96,6 +101,16 @@ pub enum BlockchainReadRequest {
/// A request for the compact chain history. /// A request for the compact chain history.
CompactChainHistory, CompactChainHistory,
/// A request for the next chain entry.
///
/// Input is a list of block hashes and the amount of block hashes to return in the next chain entry.
///
/// # Invariant
/// The [`Vec`] containing the block IDs must be sorted in reverse chronological block
/// order, or else the returned response is unspecified and meaningless,
/// as this request performs a binary search
NextChainEntry(Vec<[u8; 32]>, usize),
/// A request to find the first unknown block ID in a list of block IDs. /// A request to find the first unknown block ID in a list of block IDs.
/// ///
/// # Invariant /// # Invariant
@ -104,6 +119,16 @@ pub enum BlockchainReadRequest {
/// as this request performs a binary search. /// as this request performs a binary search.
FindFirstUnknown(Vec<[u8; 32]>), FindFirstUnknown(Vec<[u8; 32]>),
/// A request for transactions from a specific block.
TxsInBlock {
/// The block to get transactions from.
block_hash: [u8; 32],
/// The indexes of the transactions from the block.
/// This is not the global index of the txs, instead it is the local index as they appear in
/// the block.
tx_indexes: Vec<u64>,
},
/// A request for all alt blocks in the chain with the given [`ChainId`]. /// A request for all alt blocks in the chain with the given [`ChainId`].
AltBlocksInChain(ChainId), AltBlocksInChain(ChainId),
@ -182,6 +207,16 @@ pub enum BlockchainWriteRequest {
#[expect(clippy::large_enum_variant)] #[expect(clippy::large_enum_variant)]
pub enum BlockchainResponse { pub enum BlockchainResponse {
//------------------------------------------------------ Reads //------------------------------------------------------ Reads
/// Response to [`BlockchainReadRequest::BlockCompleteEntries`].
BlockCompleteEntries {
/// The [`BlockCompleteEntry`]s that we had.
blocks: Vec<BlockCompleteEntry>,
/// The hashes of blocks that were requested, but we don't have.
missing_hashes: Vec<[u8; 32]>,
/// Our blockchain height.
blockchain_height: usize,
},
/// Response to [`BlockchainReadRequest::BlockExtendedHeader`]. /// Response to [`BlockchainReadRequest::BlockExtendedHeader`].
/// ///
/// Inner value is the extended headed of the requested block. /// Inner value is the extended headed of the requested block.
@ -248,6 +283,24 @@ pub enum BlockchainResponse {
cumulative_difficulty: u128, cumulative_difficulty: u128,
}, },
/// Response to [`BlockchainReadRequest::NextChainEntry`].
///
/// If all blocks were unknown `start_height` will be [`None`], the other fields will be meaningless.
NextChainEntry {
/// The start height of this entry, [`None`] if we could not find the split point.
start_height: Option<std::num::NonZero<usize>>,
/// The current chain height.
chain_height: usize,
/// The next block hashes in the entry.
block_ids: Vec<[u8; 32]>,
/// The block weights of the next blocks.
block_weights: Vec<usize>,
/// The current cumulative difficulty of our chain.
cumulative_difficulty: u128,
/// The block blob of the 2nd block in `block_ids`, if there is one.
first_block_blob: Option<Vec<u8>>,
},
/// Response to [`BlockchainReadRequest::FindFirstUnknown`]. /// Response to [`BlockchainReadRequest::FindFirstUnknown`].
/// ///
/// Contains the index of the first unknown block and its expected height. /// Contains the index of the first unknown block and its expected height.
@ -255,7 +308,12 @@ pub enum BlockchainResponse {
/// This will be [`None`] if all blocks were known. /// This will be [`None`] if all blocks were known.
FindFirstUnknown(Option<(usize, usize)>), FindFirstUnknown(Option<(usize, usize)>),
/// Response to [`BlockchainReadRequest::AltBlocksInChain`]. /// The response for [`BlockchainReadRequest::TxsInBlock`].
///
/// Will return [`None`] if the request contained an index out of range.
TxsInBlock(Option<TxsInBlock>),
/// The response for [`BlockchainReadRequest::AltBlocksInChain`].
/// ///
/// Contains all the alt blocks in the alt-chain in chronological order. /// Contains all the alt blocks in the alt-chain in chronological order.
AltBlocksInChain(Vec<AltBlockInformation>), AltBlocksInChain(Vec<AltBlockInformation>),

View file

@ -26,8 +26,8 @@ pub use transaction_verification_data::{
pub use types::{ pub use types::{
AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum, AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum,
ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry, ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry,
OutputHistogramEntry, OutputHistogramInput, OutputOnChain, VerifiedBlockInformation, OutputHistogramEntry, OutputHistogramInput, OutputOnChain, TxsInBlock,
VerifiedTransactionInformation, VerifiedBlockInformation, VerifiedTransactionInformation,
}; };
//---------------------------------------------------------------------------------------------------- Feature-gated //---------------------------------------------------------------------------------------------------- Feature-gated

View file

@ -259,6 +259,13 @@ pub struct AddAuxPow {
pub aux_pow: Vec<AuxPow>, pub aux_pow: Vec<AuxPow>,
} }
/// The inner response for a request for txs in a block.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TxsInBlock {
pub block: Vec<u8>,
pub txs: Vec<Vec<u8>>,
}
//---------------------------------------------------------------------------------------------------- Tests //---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)] #[cfg(test)]
mod test { mod test {