mirror of
https://github.com/hinto-janai/cuprate.git
synced 2025-03-27 01:29:06 +00:00
Compare commits
No commits in common. "7b8756fa80e386fb04173d8220c15c86bf9f9888" and "38541dbfda781277d48dd218bb676aede2f8b8a2" have entirely different histories.
7b8756fa80
...
38541dbfda
48 changed files with 215 additions and 1749 deletions
41
Cargo.lock
generated
41
Cargo.lock
generated
|
@ -446,7 +446,6 @@ checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54"
|
|||
dependencies = [
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
"terminal_size",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -704,7 +703,6 @@ version = "0.0.0"
|
|||
dependencies = [
|
||||
"bitflags 2.6.0",
|
||||
"bytemuck",
|
||||
"bytes",
|
||||
"cuprate-constants",
|
||||
"cuprate-database",
|
||||
"cuprate-database-service",
|
||||
|
@ -935,7 +933,6 @@ dependencies = [
|
|||
"libc",
|
||||
"monero-serai",
|
||||
"rayon",
|
||||
"serde",
|
||||
"tokio",
|
||||
"windows",
|
||||
]
|
||||
|
@ -1191,6 +1188,7 @@ dependencies = [
|
|||
"cuprate-consensus",
|
||||
"cuprate-consensus-context",
|
||||
"cuprate-consensus-rules",
|
||||
"cuprate-constants",
|
||||
"cuprate-cryptonight",
|
||||
"cuprate-dandelion-tower",
|
||||
"cuprate-database",
|
||||
|
@ -1232,7 +1230,6 @@ dependencies = [
|
|||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"toml",
|
||||
"tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
|
@ -2907,15 +2904,6 @@ dependencies = [
|
|||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_urlencoded"
|
||||
version = "0.7.1"
|
||||
|
@ -3133,16 +3121,6 @@ dependencies = [
|
|||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "terminal_size"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef"
|
||||
dependencies = [
|
||||
"rustix",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.66"
|
||||
|
@ -3284,26 +3262,11 @@ dependencies = [
|
|||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.8.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
|
@ -3312,8 +3275,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"winnow",
|
||||
]
|
||||
|
|
|
@ -55,7 +55,6 @@ members = [
|
|||
]
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true # Build with LTO
|
||||
strip = "none" # Keep panic stack traces
|
||||
codegen-units = 1 # Optimize for binary speed over compile times
|
||||
|
@ -145,7 +144,6 @@ tokio-util = { version = "0.7", default-features = false }
|
|||
tokio-stream = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # <https://github.com/tower-rs/tower/pull/796>
|
||||
toml = { version = "0.8", default-features = false }
|
||||
tracing-subscriber = { version = "0.3", default-features = false }
|
||||
tracing = { version = "0.1", default-features = false }
|
||||
|
||||
|
@ -376,6 +374,7 @@ unused_lifetimes = "deny"
|
|||
unused_macro_rules = "deny"
|
||||
ambiguous_glob_imports = "deny"
|
||||
unused_unsafe = "deny"
|
||||
rust_2024_compatibility = "deny"
|
||||
|
||||
# Warm
|
||||
let_underscore = { level = "deny", priority = -1 }
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
name = "cuprated"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
description = "The Cuprate Rust Monero node."
|
||||
description = "The Cuprate Monero Rust node."
|
||||
license = "AGPL-3.0-only"
|
||||
authors = ["Boog900", "hinto-janai", "SyntheticBird45"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
|
||||
|
@ -12,29 +12,29 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
|
|||
cuprate-consensus = { workspace = true }
|
||||
cuprate-fast-sync = { workspace = true }
|
||||
cuprate-consensus-context = { workspace = true }
|
||||
cuprate-consensus-rules = { workspace = true }
|
||||
cuprate-cryptonight = { workspace = true }
|
||||
cuprate-helper = { workspace = true, features = ["serde"] }
|
||||
cuprate-epee-encoding = { workspace = true }
|
||||
cuprate-fixed-bytes = { workspace = true }
|
||||
cuprate-levin = { workspace = true }
|
||||
cuprate-wire = { workspace = true }
|
||||
cuprate-p2p = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true }
|
||||
cuprate-dandelion-tower = { workspace = true, features = ["txpool"] }
|
||||
cuprate-async-buffer = { workspace = true }
|
||||
cuprate-address-book = { workspace = true }
|
||||
cuprate-blockchain = { workspace = true }
|
||||
cuprate-database-service = { workspace = true, features = ["serde"] }
|
||||
cuprate-txpool = { workspace = true }
|
||||
cuprate-database = { workspace = true, features = ["serde"] }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-test-utils = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
cuprate-json-rpc = { workspace = true }
|
||||
cuprate-rpc-interface = { workspace = true }
|
||||
cuprate-rpc-types = { workspace = true }
|
||||
|
||||
cuprate-consensus-rules = { workspace = true }
|
||||
cuprate-constants = { workspace = true }
|
||||
cuprate-cryptonight = { workspace = true }
|
||||
cuprate-helper = { workspace = true }
|
||||
cuprate-epee-encoding = { workspace = true }
|
||||
cuprate-fixed-bytes = { workspace = true }
|
||||
cuprate-levin = { workspace = true }
|
||||
cuprate-wire = { workspace = true }
|
||||
cuprate-p2p = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true }
|
||||
cuprate-dandelion-tower = { workspace = true, features = ["txpool"] }
|
||||
cuprate-async-buffer = { workspace = true }
|
||||
cuprate-address-book = { workspace = true }
|
||||
cuprate-blockchain = { workspace = true }
|
||||
cuprate-database-service = { workspace = true }
|
||||
cuprate-txpool = { workspace = true }
|
||||
cuprate-database = { workspace = true }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-test-utils = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
cuprate-json-rpc = { workspace = true }
|
||||
cuprate-rpc-interface = { workspace = true }
|
||||
cuprate-rpc-types = { workspace = true }
|
||||
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
anyhow = { workspace = true }
|
||||
|
@ -44,7 +44,7 @@ borsh = { workspace = true }
|
|||
bytemuck = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
cfg-if = { workspace = true }
|
||||
clap = { workspace = true, features = ["cargo", "help", "wrap_help"] }
|
||||
clap = { workspace = true, features = ["cargo"] }
|
||||
chrono = { workspace = true }
|
||||
crypto-bigint = { workspace = true }
|
||||
crossbeam = { workspace = true }
|
||||
|
@ -71,10 +71,15 @@ thread_local = { workspace = true }
|
|||
tokio-util = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
toml = { workspace = true, features = ["parse", "display"]}
|
||||
tower = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] }
|
||||
tracing = { workspace = true, features = ["default"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
# ____ _
|
||||
# / ___| _ _ __ _ __ __ _| |_ ___
|
||||
# | | | | | | '_ \| '__/ _` | __/ _ \
|
||||
# | |__| |_| | |_) | | | (_| | || __/
|
||||
# \____\__,_| .__/|_| \__,_|\__\___|
|
||||
# |_|
|
||||
#
|
||||
|
||||
## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet".
|
||||
network = "Mainnet"
|
||||
|
||||
## Tracing config.
|
||||
[tracing]
|
||||
## The minimum level for log events to be displayed.
|
||||
level = "info"
|
||||
|
||||
## Clear-net config.
|
||||
[p2p.clear_net]
|
||||
## The number of outbound connections we should make and maintain.
|
||||
outbound_connections = 64
|
||||
## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing.
|
||||
extra_outbound_connections = 8
|
||||
## The maximum number of incoming we should allow.
|
||||
max_inbound_connections = 128
|
||||
## The percent of outbound connections that should be to nodes we have not connected to before.
|
||||
gray_peers_percent = 0.7
|
||||
## The port to accept connections on, if left `0` no connections will be accepted.
|
||||
p2p_port = 0
|
||||
## The IP address to listen to connections on.
|
||||
listen_on = "0.0.0.0"
|
||||
|
||||
## The Clear-net addressbook config.
|
||||
[p2p.clear_net.address_book_config]
|
||||
## The size of the white peer list, which contains peers we have made a connection to before.
|
||||
max_white_list_length = 1_000
|
||||
## The size of the gray peer list, which contains peers we have not made a connection to before.
|
||||
max_gray_list_length = 5_000
|
||||
## The amount of time between address book saves.
|
||||
peer_save_period = { secs = 90, nanos = 0 }
|
||||
|
||||
## The block downloader config.
|
||||
[p2p.block_downloader]
|
||||
## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes).
|
||||
buffer_bytes = 50_000_000
|
||||
## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes).
|
||||
in_progress_queue_bytes = 50_000_000
|
||||
## The target size of a batch of blocks (bytes), must not exceed 100MB.
|
||||
target_batch_bytes= 5_000_000
|
||||
## The amount of time between checking the pool of connected peers for free peers to download blocks.
|
||||
check_client_pool_interval = { secs = 30, nanos = 0 }
|
||||
|
||||
## Storage config
|
||||
[storage]
|
||||
## The amount of reader threads to spawn.
|
||||
reader_threads = "OnePerThread"
|
||||
|
||||
## Txpool storage config.
|
||||
[storage.txpool]
|
||||
## The database sync mode for the txpool.
|
||||
sync_mode = "Async"
|
||||
## The maximum size of all the txs in the pool (bytes).
|
||||
max_txpool_byte_size = 100_000_000
|
||||
|
||||
## Blockchain storage config.
|
||||
[storage.blockchain]
|
||||
## The database sync mode for the blockchain.
|
||||
sync_mode = "Async"
|
|
@ -1,159 +1 @@
|
|||
//! cuprated config
|
||||
use std::{
|
||||
fs::{read_to_string, File},
|
||||
io,
|
||||
path::Path,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use clap::Parser;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_consensus::ContextConfig;
|
||||
use cuprate_helper::{
|
||||
fs::{CUPRATE_CONFIG_DIR, DEFAULT_CONFIG_FILE_NAME},
|
||||
network::Network,
|
||||
};
|
||||
use cuprate_p2p::block_downloader::BlockDownloaderConfig;
|
||||
use cuprate_p2p_core::{ClearNet, ClearNetServerCfg};
|
||||
|
||||
mod args;
|
||||
mod fs;
|
||||
mod p2p;
|
||||
mod storage;
|
||||
mod tracing_config;
|
||||
|
||||
use crate::config::fs::FileSystemConfig;
|
||||
use p2p::P2PConfig;
|
||||
use storage::StorageConfig;
|
||||
use tracing_config::TracingConfig;
|
||||
|
||||
/// Reads the args & config file, returning a [`Config`].
|
||||
pub fn read_config_and_args() -> Config {
|
||||
let args = args::Args::parse();
|
||||
args.do_quick_requests();
|
||||
|
||||
let config: Config = if let Some(config_file) = &args.config_file {
|
||||
// If a config file was set in the args try to read it and exit if we can't.
|
||||
match Config::read_from_path(config_file) {
|
||||
Ok(config) => config,
|
||||
Err(e) => {
|
||||
eprintln!("Failed to read config from file: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// First attempt to read the config file from the current directory.
|
||||
std::env::current_dir()
|
||||
.map(|path| path.join(DEFAULT_CONFIG_FILE_NAME))
|
||||
.map_err(Into::into)
|
||||
.and_then(Config::read_from_path)
|
||||
.inspect_err(|e| tracing::debug!("Failed to read config from current dir: {e}"))
|
||||
// otherwise try the main config directory.
|
||||
.or_else(|_| {
|
||||
let file = CUPRATE_CONFIG_DIR.join(DEFAULT_CONFIG_FILE_NAME);
|
||||
Config::read_from_path(file)
|
||||
})
|
||||
.inspect_err(|e| {
|
||||
tracing::debug!("Failed to read config from config dir: {e}");
|
||||
eprintln!("Failed to find/read config file, using default config.");
|
||||
})
|
||||
.unwrap_or_default()
|
||||
};
|
||||
|
||||
args.apply_args(config)
|
||||
}
|
||||
|
||||
/// The config for all of Cuprate.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct Config {
|
||||
/// The network we should run on.
|
||||
network: Network,
|
||||
|
||||
/// [`tracing`] config.
|
||||
tracing: TracingConfig,
|
||||
|
||||
/// The P2P network config.
|
||||
p2p: P2PConfig,
|
||||
|
||||
/// The storage config.
|
||||
storage: StorageConfig,
|
||||
|
||||
fs: FileSystemConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Attempts to read a config file in [`toml`] format from the given [`Path`].
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Will return an [`Err`] if the file cannot be read or if the file is not a valid [`toml`] config.
|
||||
fn read_from_path(file: impl AsRef<Path>) -> Result<Self, anyhow::Error> {
|
||||
let file_text = read_to_string(file.as_ref())?;
|
||||
|
||||
Ok(toml::from_str(&file_text)
|
||||
.inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy()))
|
||||
.inspect_err(|e| {
|
||||
eprintln!("{e}");
|
||||
eprintln!(
|
||||
"Failed to parse config file at: {}",
|
||||
file.as_ref().to_string_lossy()
|
||||
);
|
||||
})?)
|
||||
}
|
||||
|
||||
/// Returns the current [`Network`] we are running on.
|
||||
pub const fn network(&self) -> Network {
|
||||
self.network
|
||||
}
|
||||
|
||||
/// The [`ClearNet`], [`cuprate_p2p::P2PConfig`].
|
||||
pub fn clearnet_p2p_config(&self) -> cuprate_p2p::P2PConfig<ClearNet> {
|
||||
cuprate_p2p::P2PConfig {
|
||||
network: self.network,
|
||||
seeds: p2p::clear_net_seed_nodes(self.network),
|
||||
outbound_connections: self.p2p.clear_net.general.outbound_connections,
|
||||
extra_outbound_connections: self.p2p.clear_net.general.extra_outbound_connections,
|
||||
max_inbound_connections: self.p2p.clear_net.general.max_inbound_connections,
|
||||
gray_peers_percent: self.p2p.clear_net.general.gray_peers_percent,
|
||||
server_config: Some(ClearNetServerCfg {
|
||||
ip: self.p2p.clear_net.listen_on,
|
||||
}),
|
||||
p2p_port: self.p2p.clear_net.general.p2p_port,
|
||||
// TODO: set this if a public RPC server is set.
|
||||
rpc_port: 0,
|
||||
address_book_config: self
|
||||
.p2p
|
||||
.clear_net
|
||||
.general
|
||||
.address_book_config(&self.fs.cache_directory, self.network),
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`ContextConfig`].
|
||||
pub const fn context_config(&self) -> ContextConfig {
|
||||
match self.network {
|
||||
Network::Mainnet => ContextConfig::main_net(),
|
||||
Network::Stagenet => ContextConfig::stage_net(),
|
||||
Network::Testnet => ContextConfig::test_net(),
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`cuprate_blockchain`] config.
|
||||
pub fn blockchain_config(&self) -> cuprate_blockchain::config::Config {
|
||||
let blockchain = &self.storage.blockchain;
|
||||
|
||||
// We don't set reader threads as we manually make the reader threadpool.
|
||||
cuprate_blockchain::config::ConfigBuilder::default()
|
||||
.network(self.network)
|
||||
.data_directory(self.fs.data_directory.clone())
|
||||
.sync_mode(blockchain.shared.sync_mode)
|
||||
.build()
|
||||
}
|
||||
|
||||
/// The [`BlockDownloaderConfig`].
|
||||
pub fn block_downloader_config(&self) -> BlockDownloaderConfig {
|
||||
self.p2p.block_downloader.clone().into()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
use std::{io::Write, path::PathBuf, process::exit};
|
||||
|
||||
use clap::builder::TypedValueParser;
|
||||
|
||||
use cuprate_helper::network::Network;
|
||||
|
||||
use crate::{config::Config, constants::EXAMPLE_CONFIG};
|
||||
|
||||
/// Cuprate Args.
|
||||
#[derive(clap::Parser, Debug)]
|
||||
#[command(version, about)]
|
||||
pub struct Args {
|
||||
/// The network to run on.
|
||||
#[arg(
|
||||
long,
|
||||
default_value_t = Network::Mainnet,
|
||||
value_parser = clap::builder::PossibleValuesParser::new(["mainnet", "testnet", "stagenet"])
|
||||
.map(|s| s.parse::<Network>().unwrap()),
|
||||
)]
|
||||
pub network: Network,
|
||||
/// The amount of outbound clear-net connections to maintain.
|
||||
#[arg(long)]
|
||||
pub outbound_connections: Option<usize>,
|
||||
/// The PATH of the `cuprated` config file.
|
||||
#[arg(long)]
|
||||
pub config_file: Option<PathBuf>,
|
||||
/// Generate a config file and print it to stdout.
|
||||
#[arg(long)]
|
||||
pub generate_config: bool,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
/// Complete any quick requests asked for in [`Args`].
|
||||
///
|
||||
/// May cause the process to [`exit`].
|
||||
pub fn do_quick_requests(&self) {
|
||||
if self.generate_config {
|
||||
println!("{EXAMPLE_CONFIG}");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the [`Args`] to the given [`Config`].
|
||||
///
|
||||
/// This may exit the program if a config value was set that requires an early exit.
|
||||
pub const fn apply_args(&self, mut config: Config) -> Config {
|
||||
config.network = self.network;
|
||||
|
||||
if let Some(outbound_connections) = self.outbound_connections {
|
||||
config.p2p.clear_net.general.outbound_connections = outbound_connections;
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_helper::fs::{CUPRATE_CACHE_DIR, CUPRATE_DATA_DIR};
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct FileSystemConfig {
|
||||
pub data_directory: PathBuf,
|
||||
pub cache_directory: PathBuf,
|
||||
}
|
||||
|
||||
impl Default for FileSystemConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
data_directory: CUPRATE_DATA_DIR.to_path_buf(),
|
||||
cache_directory: CUPRATE_CACHE_DIR.to_path_buf(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,178 +0,0 @@
|
|||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
|
||||
path::Path,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_helper::{fs::address_book_path, network::Network};
|
||||
|
||||
/// P2P config.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct P2PConfig {
|
||||
/// Clear-net config.
|
||||
pub clear_net: ClearNetConfig,
|
||||
/// Block downloader config.
|
||||
pub block_downloader: BlockDownloaderConfig,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct BlockDownloaderConfig {
|
||||
/// The size in bytes of the buffer between the block downloader and the place which
|
||||
/// is consuming the downloaded blocks.
|
||||
pub buffer_bytes: usize,
|
||||
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
|
||||
pub in_progress_queue_bytes: usize,
|
||||
/// The [`Duration`] between checking the client pool for free peers.
|
||||
pub check_client_pool_interval: Duration,
|
||||
/// The target size of a single batch of blocks (in bytes).
|
||||
pub target_batch_bytes: usize,
|
||||
}
|
||||
|
||||
impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloaderConfig {
|
||||
fn from(value: BlockDownloaderConfig) -> Self {
|
||||
Self {
|
||||
buffer_bytes: value.buffer_bytes,
|
||||
in_progress_queue_bytes: value.in_progress_queue_bytes,
|
||||
check_client_pool_interval: value.check_client_pool_interval,
|
||||
target_batch_bytes: value.target_batch_bytes,
|
||||
initial_batch_len: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BlockDownloaderConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
buffer_bytes: 50_000_000,
|
||||
in_progress_queue_bytes: 50_000_000,
|
||||
check_client_pool_interval: Duration::from_secs(30),
|
||||
target_batch_bytes: 5_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The config values for P2P clear-net.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct ClearNetConfig {
|
||||
/// The server config.
|
||||
pub listen_on: IpAddr,
|
||||
#[serde(flatten)]
|
||||
pub general: SharedNetConfig,
|
||||
}
|
||||
|
||||
impl Default for ClearNetConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
listen_on: IpAddr::V4(Ipv4Addr::UNSPECIFIED),
|
||||
general: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Network config values shared between all network zones.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct SharedNetConfig {
|
||||
/// The number of outbound connections to make and try keep.
|
||||
pub outbound_connections: usize,
|
||||
/// The amount of extra connections we can make if we are under load from the rest of Cuprate.
|
||||
pub extra_outbound_connections: usize,
|
||||
/// The maximum amount of inbound connections
|
||||
pub max_inbound_connections: usize,
|
||||
/// The percent of connections that should be to peers we haven't connected to before.
|
||||
pub gray_peers_percent: f64,
|
||||
/// port to use to accept p2p connections.
|
||||
pub p2p_port: u16,
|
||||
/// The address book config.
|
||||
address_book_config: AddressBookConfig,
|
||||
}
|
||||
|
||||
impl SharedNetConfig {
|
||||
/// Returns the [`AddressBookConfig`].
|
||||
pub fn address_book_config(
|
||||
&self,
|
||||
cache_dir: &Path,
|
||||
network: Network,
|
||||
) -> cuprate_address_book::AddressBookConfig {
|
||||
cuprate_address_book::AddressBookConfig {
|
||||
max_white_list_length: self.address_book_config.max_white_list_length,
|
||||
max_gray_list_length: self.address_book_config.max_gray_list_length,
|
||||
peer_store_directory: address_book_path(cache_dir, network),
|
||||
peer_save_period: self.address_book_config.peer_save_period,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SharedNetConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
outbound_connections: 64,
|
||||
extra_outbound_connections: 8,
|
||||
max_inbound_connections: 128,
|
||||
gray_peers_percent: 0.7,
|
||||
p2p_port: 0,
|
||||
address_book_config: AddressBookConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct AddressBookConfig {
|
||||
max_white_list_length: usize,
|
||||
max_gray_list_length: usize,
|
||||
peer_save_period: Duration,
|
||||
}
|
||||
|
||||
impl Default for AddressBookConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_white_list_length: 1_000,
|
||||
max_gray_list_length: 5_000,
|
||||
peer_save_period: Duration::from_secs(30),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Seed nodes for [`ClearNet`](cuprate_p2p_core::ClearNet).
|
||||
pub fn clear_net_seed_nodes(network: Network) -> Vec<SocketAddr> {
|
||||
let seeds = match network {
|
||||
Network::Mainnet => [
|
||||
"176.9.0.187:18080",
|
||||
"88.198.163.90:18080",
|
||||
"66.85.74.134:18080",
|
||||
"51.79.173.165:18080",
|
||||
"192.99.8.110:18080",
|
||||
"37.187.74.171:18080",
|
||||
"77.172.183.193:18080",
|
||||
]
|
||||
.as_slice(),
|
||||
Network::Stagenet => [
|
||||
"176.9.0.187:38080",
|
||||
"51.79.173.165:38080",
|
||||
"192.99.8.110:38080",
|
||||
"37.187.74.171:38080",
|
||||
"77.172.183.193:38080",
|
||||
]
|
||||
.as_slice(),
|
||||
Network::Testnet => [
|
||||
"176.9.0.187:28080",
|
||||
"51.79.173.165:28080",
|
||||
"192.99.8.110:28080",
|
||||
"37.187.74.171:28080",
|
||||
"77.172.183.193:28080",
|
||||
]
|
||||
.as_slice(),
|
||||
};
|
||||
|
||||
seeds
|
||||
.iter()
|
||||
.map(|s| s.parse())
|
||||
.collect::<Result<_, _>>()
|
||||
.unwrap()
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_database::config::SyncMode;
|
||||
use cuprate_database_service::ReaderThreads;
|
||||
use cuprate_helper::fs::CUPRATE_DATA_DIR;
|
||||
|
||||
/// The storage config.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct StorageConfig {
|
||||
/// The amount of reader threads to spawn between the tx-pool and blockchain.
|
||||
pub reader_threads: ReaderThreads,
|
||||
/// The tx-pool config.
|
||||
pub txpool: TxpoolConfig,
|
||||
/// The blockchain config.
|
||||
pub blockchain: BlockchainConfig,
|
||||
}
|
||||
|
||||
/// The blockchain config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct BlockchainConfig {
|
||||
#[serde(flatten)]
|
||||
pub shared: SharedStorageConfig,
|
||||
}
|
||||
|
||||
impl Default for BlockchainConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shared: SharedStorageConfig {
|
||||
sync_mode: SyncMode::Async,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The tx-pool config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct TxpoolConfig {
|
||||
#[serde(flatten)]
|
||||
pub shared: SharedStorageConfig,
|
||||
|
||||
/// The maximum size of the tx-pool.
|
||||
pub max_txpool_byte_size: usize,
|
||||
}
|
||||
|
||||
impl Default for TxpoolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shared: SharedStorageConfig {
|
||||
sync_mode: SyncMode::Async,
|
||||
},
|
||||
max_txpool_byte_size: 100_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Config values shared between the tx-pool and blockchain.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct SharedStorageConfig {
|
||||
/// The [`SyncMode`] of the database.
|
||||
pub sync_mode: SyncMode,
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
/// [`tracing`] config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct TracingConfig {
|
||||
/// The default minimum log level.
|
||||
#[serde(with = "level_filter_serde")]
|
||||
level: LevelFilter,
|
||||
}
|
||||
|
||||
impl Default for TracingConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
level: LevelFilter::INFO,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod level_filter_serde {
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
#[expect(clippy::trivially_copy_pass_by_ref, reason = "serde")]
|
||||
pub fn serialize<S>(level_filter: &LevelFilter, s: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
s.serialize_str(&level_filter.to_string())
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(d: D) -> Result<LevelFilter, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(d)?;
|
||||
LevelFilter::from_str(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
|
@ -18,12 +18,9 @@ pub const VERSION_BUILD: &str = if cfg!(debug_assertions) {
|
|||
pub const PANIC_CRITICAL_SERVICE_ERROR: &str =
|
||||
"A service critical to Cuprate's function returned an unexpected error.";
|
||||
|
||||
pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml");
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::config::Config;
|
||||
|
||||
#[test]
|
||||
fn version() {
|
||||
|
@ -38,9 +35,4 @@ mod test {
|
|||
assert_eq!(VERSION_BUILD, "0.0.1-release");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generate_config_text_is_valid() {
|
||||
let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,6 @@ fn main() {
|
|||
// Initialize global static `LazyLock` data.
|
||||
statics::init_lazylock_statics();
|
||||
|
||||
let _config = config::read_config_and_args();
|
||||
|
||||
// TODO: everything else.
|
||||
todo!()
|
||||
}
|
||||
|
|
|
@ -1,57 +1,8 @@
|
|||
//! P2P
|
||||
//!
|
||||
//! Will handle initiating the P2P and contains a protocol request handler.
|
||||
use futures::{FutureExt, TryFutureExt};
|
||||
use tokio::sync::oneshot;
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::BlockChainContextService;
|
||||
use cuprate_p2p::{NetworkInterface, P2PConfig};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::TxpoolReadHandle;
|
||||
|
||||
use crate::txpool::IncomingTxHandler;
|
||||
|
||||
mod core_sync_service;
|
||||
mod network_address;
|
||||
pub mod request_handler;
|
||||
|
||||
pub use network_address::CrossNetworkInternalPeerId;
|
||||
|
||||
/// Starts the P2P clearnet network, returning a [`NetworkInterface`] to interact with it.
|
||||
///
|
||||
/// A [`oneshot::Sender`] is also returned to provide the [`IncomingTxHandler`], until this is provided network
|
||||
/// handshakes can not be completed.
|
||||
pub async fn start_clearnet_p2p(
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
blockchain_context_service: BlockChainContextService,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
config: P2PConfig<ClearNet>,
|
||||
) -> Result<
|
||||
(
|
||||
NetworkInterface<ClearNet>,
|
||||
oneshot::Sender<IncomingTxHandler>,
|
||||
),
|
||||
tower::BoxError,
|
||||
> {
|
||||
let (incoming_tx_handler_tx, incoming_tx_handler_rx) = oneshot::channel();
|
||||
|
||||
let request_handler_maker = request_handler::P2pProtocolRequestHandlerMaker {
|
||||
blockchain_read_handle,
|
||||
blockchain_context_service: blockchain_context_service.clone(),
|
||||
txpool_read_handle,
|
||||
incoming_tx_handler: None,
|
||||
incoming_tx_handler_fut: incoming_tx_handler_rx.shared(),
|
||||
};
|
||||
|
||||
Ok((
|
||||
cuprate_p2p::initialize_network(
|
||||
request_handler_maker.map_response(|s| s.map_err(Into::into)),
|
||||
core_sync_service::CoreSyncService(blockchain_context_service),
|
||||
config,
|
||||
)
|
||||
.await?,
|
||||
incoming_tx_handler_tx,
|
||||
))
|
||||
}
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::{future::BoxFuture, FutureExt, TryFutureExt};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_consensus::{
|
||||
BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService,
|
||||
};
|
||||
use cuprate_helper::{cast::usize_to_u64, map::split_u128_into_low_high_bits};
|
||||
use cuprate_p2p_core::services::{CoreSyncDataRequest, CoreSyncDataResponse};
|
||||
use cuprate_wire::CoreSyncData;
|
||||
|
||||
/// The core sync service.
|
||||
#[derive(Clone)]
|
||||
pub struct CoreSyncService(pub BlockChainContextService);
|
||||
|
||||
impl Service<CoreSyncDataRequest> for CoreSyncService {
|
||||
type Response = CoreSyncDataResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future {
|
||||
self.0
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.map_ok(|res| {
|
||||
let BlockChainContextResponse::Context(context) = res else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let context = context.unchecked_blockchain_context();
|
||||
let (cumulative_difficulty, cumulative_difficulty_top64) =
|
||||
split_u128_into_low_high_bits(context.cumulative_difficulty);
|
||||
|
||||
CoreSyncDataResponse(CoreSyncData {
|
||||
cumulative_difficulty,
|
||||
cumulative_difficulty_top64,
|
||||
current_height: usize_to_u64(context.chain_height),
|
||||
pruning_seed: 0,
|
||||
top_id: context.top_hash,
|
||||
top_version: context.current_hf.as_u8(),
|
||||
})
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
}
|
|
@ -1,422 +1 @@
|
|||
use std::{
|
||||
collections::HashSet,
|
||||
future::{ready, Ready},
|
||||
hash::Hash,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{
|
||||
future::{BoxFuture, Shared},
|
||||
FutureExt,
|
||||
};
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use tokio::sync::{broadcast, oneshot, watch};
|
||||
use tokio_stream::wrappers::WatchStream;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::{
|
||||
transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse,
|
||||
BlockChainContextService,
|
||||
};
|
||||
use cuprate_dandelion_tower::TxState;
|
||||
use cuprate_fixed_bytes::ByteArrayVec;
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
use cuprate_helper::{
|
||||
asynch::rayon_spawn_async,
|
||||
cast::usize_to_u64,
|
||||
map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits},
|
||||
};
|
||||
use cuprate_p2p::constants::{
|
||||
MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MAX_BLOCK_BATCH_LEN, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN,
|
||||
};
|
||||
use cuprate_p2p_core::{
|
||||
client::{InternalPeerID, PeerInformation},
|
||||
NetZoneAddress, NetworkZone, ProtocolRequest, ProtocolResponse,
|
||||
};
|
||||
use cuprate_txpool::service::TxpoolReadHandle;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
BlockCompleteEntry, TransactionBlobs, TxsInBlock,
|
||||
};
|
||||
use cuprate_wire::protocol::{
|
||||
ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
|
||||
GetObjectsResponse, NewFluffyBlock, NewTransactions,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
blockchain::interface::{self as blockchain_interface, IncomingBlockError},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
p2p::CrossNetworkInternalPeerId,
|
||||
txpool::{IncomingTxError, IncomingTxHandler, IncomingTxs},
|
||||
};
|
||||
|
||||
/// The P2P protocol request handler [`MakeService`](tower::MakeService).
|
||||
#[derive(Clone)]
|
||||
pub struct P2pProtocolRequestHandlerMaker {
|
||||
pub blockchain_read_handle: BlockchainReadHandle,
|
||||
pub blockchain_context_service: BlockChainContextService,
|
||||
pub txpool_read_handle: TxpoolReadHandle,
|
||||
|
||||
/// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`]
|
||||
/// and the [`IncomingTxHandler`].
|
||||
pub incoming_tx_handler: Option<IncomingTxHandler>,
|
||||
|
||||
/// A [`Future`](std::future::Future) that produces the [`IncomingTxHandler`].
|
||||
pub incoming_tx_handler_fut: Shared<oneshot::Receiver<IncomingTxHandler>>,
|
||||
}
|
||||
|
||||
impl<A: NetZoneAddress> Service<PeerInformation<A>> for P2pProtocolRequestHandlerMaker
|
||||
where
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
type Response = P2pProtocolRequestHandler<A>;
|
||||
type Error = tower::BoxError;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.incoming_tx_handler.is_none() {
|
||||
return self
|
||||
.incoming_tx_handler_fut
|
||||
.poll_unpin(cx)
|
||||
.map(|incoming_tx_handler| {
|
||||
self.incoming_tx_handler = Some(incoming_tx_handler?);
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, peer_information: PeerInformation<A>) -> Self::Future {
|
||||
let Some(incoming_tx_handler) = self.incoming_tx_handler.clone() else {
|
||||
panic!("poll_ready was not called or did not return `Poll::Ready`")
|
||||
};
|
||||
|
||||
// TODO: check sync info?
|
||||
|
||||
let blockchain_read_handle = self.blockchain_read_handle.clone();
|
||||
let txpool_read_handle = self.txpool_read_handle.clone();
|
||||
|
||||
ready(Ok(P2pProtocolRequestHandler {
|
||||
peer_information,
|
||||
blockchain_read_handle,
|
||||
blockchain_context_service: self.blockchain_context_service.clone(),
|
||||
txpool_read_handle,
|
||||
incoming_tx_handler,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// The P2P protocol request handler.
|
||||
#[derive(Clone)]
|
||||
pub struct P2pProtocolRequestHandler<N: NetZoneAddress> {
|
||||
peer_information: PeerInformation<N>,
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
blockchain_context_service: BlockChainContextService,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
incoming_tx_handler: IncomingTxHandler,
|
||||
}
|
||||
|
||||
impl<A: NetZoneAddress> Service<ProtocolRequest> for P2pProtocolRequestHandler<A>
|
||||
where
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
type Response = ProtocolResponse;
|
||||
type Error = anyhow::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: ProtocolRequest) -> Self::Future {
|
||||
match request {
|
||||
ProtocolRequest::GetObjects(r) => {
|
||||
get_objects(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::GetChain(r) => {
|
||||
get_chain(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::FluffyMissingTxs(r) => {
|
||||
fluffy_missing_txs(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::NewBlock(_) => ready(Err(anyhow::anyhow!(
|
||||
"Peer sent a full block when we support fluffy blocks"
|
||||
)))
|
||||
.boxed(),
|
||||
ProtocolRequest::NewFluffyBlock(r) => new_fluffy_block(
|
||||
self.peer_information.clone(),
|
||||
r,
|
||||
self.blockchain_read_handle.clone(),
|
||||
self.txpool_read_handle.clone(),
|
||||
)
|
||||
.boxed(),
|
||||
ProtocolRequest::NewTransactions(r) => new_transactions(
|
||||
self.peer_information.clone(),
|
||||
r,
|
||||
self.blockchain_context_service.clone(),
|
||||
self.incoming_tx_handler.clone(),
|
||||
)
|
||||
.boxed(),
|
||||
ProtocolRequest::GetTxPoolCompliment(_) => ready(Ok(ProtocolResponse::NA)).boxed(), // TODO: should we support this?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Handler functions
|
||||
|
||||
/// [`ProtocolRequest::GetObjects`]
|
||||
async fn get_objects(
|
||||
request: GetObjectsRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
if request.blocks.len() > MAX_BLOCK_BATCH_LEN {
|
||||
anyhow::bail!("Peer requested more blocks than allowed.")
|
||||
}
|
||||
|
||||
let block_hashes: Vec<[u8; 32]> = (&request.blocks).into();
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::BlockCompleteEntries {
|
||||
blocks,
|
||||
missing_hashes,
|
||||
blockchain_height,
|
||||
} = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockCompleteEntries(block_hashes))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(ProtocolResponse::GetObjects(GetObjectsResponse {
|
||||
blocks,
|
||||
missed_ids: ByteArrayVec::from(missing_hashes),
|
||||
current_blockchain_height: usize_to_u64(blockchain_height),
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::GetChain`]
|
||||
async fn get_chain(
|
||||
request: ChainRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
if request.block_ids.len() > MAX_BLOCKS_IDS_IN_CHAIN_ENTRY {
|
||||
anyhow::bail!("Peer sent too many block hashes in chain request.")
|
||||
}
|
||||
|
||||
let block_hashes: Vec<[u8; 32]> = (&request.block_ids).into();
|
||||
let want_pruned_data = request.prune;
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::NextChainEntry {
|
||||
start_height,
|
||||
chain_height,
|
||||
block_ids,
|
||||
block_weights,
|
||||
cumulative_difficulty,
|
||||
first_block_blob,
|
||||
} = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let Some(start_height) = start_height else {
|
||||
anyhow::bail!("The peers chain has a different genesis block than ours.");
|
||||
};
|
||||
|
||||
let (cumulative_difficulty_low64, cumulative_difficulty_top64) =
|
||||
split_u128_into_low_high_bits(cumulative_difficulty);
|
||||
|
||||
Ok(ProtocolResponse::GetChain(ChainResponse {
|
||||
start_height: usize_to_u64(std::num::NonZero::get(start_height)),
|
||||
total_height: usize_to_u64(chain_height),
|
||||
cumulative_difficulty_low64,
|
||||
cumulative_difficulty_top64,
|
||||
m_block_ids: ByteArrayVec::from(block_ids),
|
||||
first_block: first_block_blob.map_or(Bytes::new(), Bytes::from),
|
||||
// only needed when pruned
|
||||
m_block_weights: if want_pruned_data {
|
||||
block_weights.into_iter().map(usize_to_u64).collect()
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::FluffyMissingTxs`]
|
||||
async fn fluffy_missing_txs(
|
||||
mut request: FluffyMissingTransactionsRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
let tx_indexes = std::mem::take(&mut request.missing_tx_indices);
|
||||
let block_hash: [u8; 32] = *request.block_hash;
|
||||
let current_blockchain_height = request.current_blockchain_height;
|
||||
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::TxsInBlock(res) = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::TxsInBlock {
|
||||
block_hash,
|
||||
tx_indexes,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let Some(TxsInBlock { block, txs }) = res else {
|
||||
anyhow::bail!("The peer requested txs out of range.");
|
||||
};
|
||||
|
||||
Ok(ProtocolResponse::NewFluffyBlock(NewFluffyBlock {
|
||||
b: BlockCompleteEntry {
|
||||
block: Bytes::from(block),
|
||||
txs: TransactionBlobs::Normal(txs.into_iter().map(Bytes::from).collect()),
|
||||
pruned: false,
|
||||
// only needed for pruned blocks.
|
||||
block_weight: 0,
|
||||
},
|
||||
current_blockchain_height,
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::NewFluffyBlock`]
|
||||
async fn new_fluffy_block<A: NetZoneAddress>(
|
||||
peer_information: PeerInformation<A>,
|
||||
request: NewFluffyBlock,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
mut txpool_read_handle: TxpoolReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
// TODO: check context service here and ignore the block?
|
||||
let current_blockchain_height = request.current_blockchain_height;
|
||||
|
||||
peer_information
|
||||
.core_sync_data
|
||||
.lock()
|
||||
.unwrap()
|
||||
.current_height = current_blockchain_height;
|
||||
|
||||
let (block, txs) = rayon_spawn_async(move || -> Result<_, anyhow::Error> {
|
||||
let block = Block::read(&mut request.b.block.as_ref())?;
|
||||
|
||||
let tx_blobs = request
|
||||
.b
|
||||
.txs
|
||||
.take_normal()
|
||||
.ok_or(anyhow::anyhow!("Peer sent pruned txs in fluffy block"))?;
|
||||
|
||||
let txs = tx_blobs
|
||||
.into_iter()
|
||||
.map(|tx_blob| {
|
||||
if tx_blob.len() > MAX_TRANSACTION_BLOB_SIZE {
|
||||
anyhow::bail!("Peer sent a transaction over the size limit.");
|
||||
}
|
||||
|
||||
let tx = Transaction::read(&mut tx_blob.as_ref())?;
|
||||
|
||||
Ok((tx.hash(), tx))
|
||||
})
|
||||
.collect::<Result<_, anyhow::Error>>()?;
|
||||
|
||||
// The backing `Bytes` will be deallocated when this closure returns.
|
||||
|
||||
Ok((block, txs))
|
||||
})
|
||||
.await?;
|
||||
|
||||
let res = blockchain_interface::handle_incoming_block(
|
||||
block,
|
||||
txs,
|
||||
&mut blockchain_read_handle,
|
||||
&mut txpool_read_handle,
|
||||
)
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Ok(_) => Ok(ProtocolResponse::NA),
|
||||
Err(IncomingBlockError::UnknownTransactions(block_hash, missing_tx_indices)) => Ok(
|
||||
ProtocolResponse::FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest {
|
||||
block_hash: block_hash.into(),
|
||||
current_blockchain_height,
|
||||
missing_tx_indices: missing_tx_indices.into_iter().map(usize_to_u64).collect(),
|
||||
}),
|
||||
),
|
||||
Err(IncomingBlockError::Orphan) => {
|
||||
// Block's parent was unknown, could be syncing?
|
||||
Ok(ProtocolResponse::NA)
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::NewTransactions`]
|
||||
async fn new_transactions<A>(
|
||||
peer_information: PeerInformation<A>,
|
||||
request: NewTransactions,
|
||||
mut blockchain_context_service: BlockChainContextService,
|
||||
mut incoming_tx_handler: IncomingTxHandler,
|
||||
) -> anyhow::Result<ProtocolResponse>
|
||||
where
|
||||
A: NetZoneAddress,
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
let BlockChainContextResponse::Context(context) = blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let context = context.unchecked_blockchain_context();
|
||||
|
||||
// If we are more than 2 blocks behind the peer then ignore the txs - we are probably still syncing.
|
||||
if usize_to_u64(context.chain_height + 2)
|
||||
< peer_information
|
||||
.core_sync_data
|
||||
.lock()
|
||||
.unwrap()
|
||||
.current_height
|
||||
{
|
||||
return Ok(ProtocolResponse::NA);
|
||||
}
|
||||
|
||||
let state = if request.dandelionpp_fluff {
|
||||
TxState::Fluff
|
||||
} else {
|
||||
TxState::Stem {
|
||||
from: peer_information.id.into(),
|
||||
}
|
||||
};
|
||||
|
||||
// Drop all the data except the stuff we still need.
|
||||
let NewTransactions { txs, .. } = request;
|
||||
|
||||
let res = incoming_tx_handler
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(IncomingTxs { txs, state })
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Ok(()) => Ok(ProtocolResponse::NA),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,4 +12,4 @@ mod dandelion;
|
|||
mod incoming_tx;
|
||||
mod txs_being_handled;
|
||||
|
||||
pub use incoming_tx::{IncomingTxError, IncomingTxHandler, IncomingTxs};
|
||||
pub use incoming_tx::IncomingTxHandler;
|
||||
|
|
|
@ -43,13 +43,9 @@ use crate::{
|
|||
};
|
||||
|
||||
/// An error that can happen handling an incoming tx.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum IncomingTxError {
|
||||
#[error("Error parsing tx: {0}")]
|
||||
Parse(std::io::Error),
|
||||
#[error(transparent)]
|
||||
Consensus(ExtendedConsensusError),
|
||||
#[error("Duplicate tx in message")]
|
||||
DuplicateTransaction,
|
||||
}
|
||||
|
||||
|
@ -71,7 +67,6 @@ pub(super) type TxId = [u8; 32];
|
|||
/// The service than handles incoming transaction pool transactions.
|
||||
///
|
||||
/// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes.
|
||||
#[derive(Clone)]
|
||||
pub struct IncomingTxHandler {
|
||||
/// A store of txs currently being handled in incoming tx requests.
|
||||
pub(super) txs_being_handled: TxsBeingHandled,
|
||||
|
|
|
@ -35,8 +35,6 @@ futures = { workspace = true, optional = true, features = ["std"] }
|
|||
monero-serai = { workspace = true, optional = true }
|
||||
rayon = { workspace = true, optional = true }
|
||||
|
||||
serde = { workspace = true, optional = true, features = ["derive"] }
|
||||
|
||||
# This is kinda a stupid work around.
|
||||
# [thread] needs to activate one of these libs (windows|libc)
|
||||
# although it depends on what target we're building for.
|
||||
|
|
|
@ -28,12 +28,7 @@
|
|||
//! - <https://docs.rs/dirs>
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::LazyLock,
|
||||
};
|
||||
|
||||
use crate::network::Network;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Const
|
||||
/// Cuprate's main directory.
|
||||
|
@ -63,9 +58,6 @@ pub const CUPRATE_DIR: &str = {
|
|||
}
|
||||
};
|
||||
|
||||
/// The default name of Cuprate's config file.
|
||||
pub const DEFAULT_CONFIG_FILE_NAME: &str = "Cuprated.toml";
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Directories
|
||||
/// Create a `LazyLock` for common PATHs used by Cuprate.
|
||||
///
|
||||
|
@ -158,61 +150,32 @@ impl_path_lazylock! {
|
|||
CUPRATE_DATA_DIR,
|
||||
data_dir,
|
||||
"",
|
||||
}
|
||||
|
||||
/// Joins the [`Network`] to the [`Path`].
|
||||
///
|
||||
/// This will keep the path the same for [`Network::Mainnet`].
|
||||
fn path_with_network(path: &Path, network: Network) -> PathBuf {
|
||||
match network {
|
||||
Network::Mainnet => path.to_path_buf(),
|
||||
network => path.join(network.to_string()),
|
||||
}
|
||||
}
|
||||
/// Cuprate's blockchain directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate blockchain files.
|
||||
///
|
||||
/// | OS | PATH |
|
||||
/// |---------|----------------------------------------------------------------|
|
||||
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` |
|
||||
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` |
|
||||
/// | Linux | `/home/alice/.local/share/cuprate/blockchain/` |
|
||||
CUPRATE_BLOCKCHAIN_DIR,
|
||||
data_dir,
|
||||
"blockchain",
|
||||
|
||||
/// Cuprate's blockchain directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate blockchain files.
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, blockchain_path}};
|
||||
///
|
||||
/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("blockchain"));
|
||||
/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("blockchain"));
|
||||
/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("blockchain"));
|
||||
/// ```
|
||||
pub fn blockchain_path(data_dir: &Path, network: Network) -> PathBuf {
|
||||
path_with_network(data_dir, network).join("blockchain")
|
||||
}
|
||||
|
||||
/// Cuprate's txpool directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate txpool files.
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, txpool_path}};
|
||||
///
|
||||
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("txpool"));
|
||||
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("txpool"));
|
||||
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("txpool"));
|
||||
/// ```
|
||||
pub fn txpool_path(data_dir: &Path, network: Network) -> PathBuf {
|
||||
path_with_network(data_dir, network).join("txpool")
|
||||
}
|
||||
|
||||
/// Cuprate's address-book directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate address-book files.
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, address_book_path}};
|
||||
///
|
||||
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook"));
|
||||
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook"));
|
||||
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook"));
|
||||
/// ```
|
||||
pub fn address_book_path(cache_dir: &Path, network: Network) -> PathBuf {
|
||||
path_with_network(cache_dir, network).join("addressbook")
|
||||
/// Cuprate's transaction pool directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate txpool files.
|
||||
///
|
||||
/// | OS | PATH |
|
||||
/// |---------|------------------------------------------------------------|
|
||||
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` |
|
||||
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` |
|
||||
/// | Linux | `/home/alice/.local/share/cuprate/txpool/` |
|
||||
CUPRATE_TXPOOL_DIR,
|
||||
data_dir,
|
||||
"txpool",
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
|
@ -234,21 +197,29 @@ mod test {
|
|||
(&*CUPRATE_CACHE_DIR, ""),
|
||||
(&*CUPRATE_CONFIG_DIR, ""),
|
||||
(&*CUPRATE_DATA_DIR, ""),
|
||||
(&*CUPRATE_BLOCKCHAIN_DIR, ""),
|
||||
(&*CUPRATE_TXPOOL_DIR, ""),
|
||||
];
|
||||
|
||||
if cfg!(target_os = "windows") {
|
||||
array[0].1 = r"AppData\Local\Cuprate";
|
||||
array[1].1 = r"AppData\Roaming\Cuprate";
|
||||
array[2].1 = r"AppData\Roaming\Cuprate";
|
||||
array[3].1 = r"AppData\Roaming\Cuprate\blockchain";
|
||||
array[4].1 = r"AppData\Roaming\Cuprate\txpool";
|
||||
} else if cfg!(target_os = "macos") {
|
||||
array[0].1 = "Library/Caches/Cuprate";
|
||||
array[1].1 = "Library/Application Support/Cuprate";
|
||||
array[2].1 = "Library/Application Support/Cuprate";
|
||||
array[3].1 = "Library/Application Support/Cuprate/blockchain";
|
||||
array[4].1 = "Library/Application Support/Cuprate/txpool";
|
||||
} else {
|
||||
// Assumes Linux.
|
||||
array[0].1 = ".cache/cuprate";
|
||||
array[1].1 = ".config/cuprate";
|
||||
array[2].1 = ".local/share/cuprate";
|
||||
array[3].1 = ".local/share/cuprate/blockchain";
|
||||
array[4].1 = ".local/share/cuprate/txpool";
|
||||
};
|
||||
|
||||
for (path, expected) in array {
|
||||
|
|
|
@ -5,12 +5,6 @@
|
|||
//! into it's own crate.
|
||||
//!
|
||||
//! `#[no_std]` compatible.
|
||||
// TODO: move to types crate.
|
||||
|
||||
use core::{
|
||||
fmt::{Display, Formatter},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
const MAINNET_NETWORK_ID: [u8; 16] = [
|
||||
0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x10,
|
||||
|
@ -23,8 +17,7 @@ const STAGENET_NETWORK_ID: [u8; 16] = [
|
|||
];
|
||||
|
||||
/// An enum representing every Monero network.
|
||||
#[derive(Debug, Clone, Copy, Default, Ord, PartialOrd, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub enum Network {
|
||||
/// Mainnet
|
||||
#[default]
|
||||
|
@ -45,28 +38,3 @@ impl Network {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct ParseNetworkError;
|
||||
|
||||
impl FromStr for Network {
|
||||
type Err = ParseNetworkError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"mainnet" | "Mainnet" => Ok(Self::Mainnet),
|
||||
"testnet" | "Testnet" => Ok(Self::Testnet),
|
||||
"stagenet" | "Stagenet" => Ok(Self::Stagenet),
|
||||
_ => Err(ParseNetworkError),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Display for Network {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||
f.write_str(match self {
|
||||
Self::Mainnet => "mainnet",
|
||||
Self::Testnet => "testnet",
|
||||
Self::Stagenet => "stagenet",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ epee_object!(
|
|||
current_blockchain_height: u64,
|
||||
);
|
||||
|
||||
/// A request for txs we are missing from an incoming block.
|
||||
/// A request for Txs we are missing from our `TxPool`
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct FluffyMissingTransactionsRequest {
|
||||
/// The Block we are missing the Txs in
|
||||
|
|
|
@ -23,7 +23,7 @@ indexmap = { workspace = true, features = ["std"] }
|
|||
|
||||
rand = { workspace = true, features = ["std", "std_rng"] }
|
||||
|
||||
borsh = { workspace = true, features = ["derive", "std"] }
|
||||
borsh = { workspace = true, features = ["derive", "std"]}
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-test-utils = { workspace = true }
|
||||
|
|
|
@ -15,7 +15,7 @@ fn test_cfg() -> AddressBookConfig {
|
|||
AddressBookConfig {
|
||||
max_white_list_length: 100,
|
||||
max_gray_list_length: 500,
|
||||
peer_store_directory: PathBuf::new(),
|
||||
peer_store_file: PathBuf::new(),
|
||||
peer_save_period: Duration::from_secs(60),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,8 @@ pub struct AddressBookConfig {
|
|||
///
|
||||
/// Gray peers are peers we are yet to make a connection to.
|
||||
pub max_gray_list_length: usize,
|
||||
/// The location to store the peer store files.
|
||||
pub peer_store_directory: PathBuf,
|
||||
/// The location to store the address book.
|
||||
pub peer_store_file: PathBuf,
|
||||
/// The amount of time between saving the address book to disk.
|
||||
pub peer_save_period: Duration,
|
||||
}
|
||||
|
@ -63,6 +63,11 @@ pub enum AddressBookError {
|
|||
pub async fn init_address_book<Z: BorshNetworkZone>(
|
||||
cfg: AddressBookConfig,
|
||||
) -> Result<book::AddressBook<Z>, std::io::Error> {
|
||||
tracing::info!(
|
||||
"Loading peers from file: {} ",
|
||||
cfg.peer_store_file.display()
|
||||
);
|
||||
|
||||
let (white_list, gray_list) = match store::read_peers_from_disk::<Z>(&cfg).await {
|
||||
Ok(res) => res,
|
||||
Err(e) if e.kind() == ErrorKind::NotFound => (vec![], vec![]),
|
||||
|
|
|
@ -39,9 +39,7 @@ pub(crate) fn save_peers_to_disk<Z: BorshNetworkZone>(
|
|||
})
|
||||
.unwrap();
|
||||
|
||||
let file = cfg
|
||||
.peer_store_directory
|
||||
.join(format!("{}_p2p_state", Z::NAME));
|
||||
let file = cfg.peer_store_file.clone();
|
||||
spawn_blocking(move || fs::write(&file, &data))
|
||||
}
|
||||
|
||||
|
@ -54,12 +52,7 @@ pub(crate) async fn read_peers_from_disk<Z: BorshNetworkZone>(
|
|||
),
|
||||
std::io::Error,
|
||||
> {
|
||||
let file = cfg
|
||||
.peer_store_directory
|
||||
.join(format!("{}_p2p_state", Z::NAME));
|
||||
|
||||
tracing::info!("Loading peers from file: {} ", file.display());
|
||||
|
||||
let file = cfg.peer_store_file.clone();
|
||||
let data = spawn_blocking(move || fs::read(file)).await.unwrap()?;
|
||||
|
||||
let de_ser: DeserPeerDataV1<Z::Addr> = from_slice(&data)?;
|
||||
|
|
|
@ -116,7 +116,6 @@ pub enum ProtocolResponse {
|
|||
GetChain(ChainResponse),
|
||||
NewFluffyBlock(NewFluffyBlock),
|
||||
NewTransactions(NewTransactions),
|
||||
FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest),
|
||||
NA,
|
||||
}
|
||||
|
||||
|
@ -140,9 +139,6 @@ impl PeerResponse {
|
|||
ProtocolResponse::GetChain(_) => MessageID::GetChain,
|
||||
ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock,
|
||||
ProtocolResponse::NewTransactions(_) => MessageID::NewFluffyBlock,
|
||||
ProtocolResponse::FluffyMissingTransactionsRequest(_) => {
|
||||
MessageID::FluffyMissingTxs
|
||||
}
|
||||
|
||||
ProtocolResponse::NA => return None,
|
||||
},
|
||||
|
|
|
@ -71,9 +71,6 @@ impl TryFrom<ProtocolResponse> for ProtocolMessage {
|
|||
ProtocolResponse::NewFluffyBlock(val) => Self::NewFluffyBlock(val),
|
||||
ProtocolResponse::GetChain(val) => Self::ChainEntryResponse(val),
|
||||
ProtocolResponse::GetObjects(val) => Self::GetObjectsResponse(val),
|
||||
ProtocolResponse::FluffyMissingTransactionsRequest(val) => {
|
||||
Self::FluffyMissingTransactionsRequest(val)
|
||||
}
|
||||
ProtocolResponse::NA => return Err(MessageConversionError),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -62,15 +62,15 @@ pub struct BlockBatch {
|
|||
pub struct BlockDownloaderConfig {
|
||||
/// The size in bytes of the buffer between the block downloader and the place which
|
||||
/// is consuming the downloaded blocks.
|
||||
pub buffer_bytes: usize,
|
||||
pub buffer_size: usize,
|
||||
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
|
||||
pub in_progress_queue_bytes: usize,
|
||||
pub in_progress_queue_size: usize,
|
||||
/// The [`Duration`] between checking the client pool for free peers.
|
||||
pub check_client_pool_interval: Duration,
|
||||
/// The target size of a single batch of blocks (in bytes).
|
||||
pub target_batch_bytes: usize,
|
||||
pub target_batch_size: usize,
|
||||
/// The initial amount of blocks to request (in number of blocks)
|
||||
pub initial_batch_len: usize,
|
||||
pub initial_batch_size: usize,
|
||||
}
|
||||
|
||||
/// An error that occurred in the [`BlockDownloader`].
|
||||
|
@ -145,7 +145,7 @@ where
|
|||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
{
|
||||
let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_bytes);
|
||||
let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size);
|
||||
|
||||
let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config);
|
||||
|
||||
|
@ -242,7 +242,7 @@ where
|
|||
Self {
|
||||
peer_set,
|
||||
our_chain_svc,
|
||||
amount_of_blocks_to_request: config.initial_batch_len,
|
||||
amount_of_blocks_to_request: config.initial_batch_size,
|
||||
amount_of_blocks_to_request_updated_at: 0,
|
||||
amount_of_empty_chain_entries: 0,
|
||||
block_download_tasks: JoinSet::new(),
|
||||
|
@ -381,7 +381,7 @@ where
|
|||
}
|
||||
|
||||
// If our ready queue is too large send duplicate requests for the blocks we are waiting on.
|
||||
if self.block_queue.size() >= self.config.in_progress_queue_bytes {
|
||||
if self.block_queue.size() >= self.config.in_progress_queue_size {
|
||||
return self.request_inflight_batch_again(client);
|
||||
}
|
||||
|
||||
|
@ -565,7 +565,7 @@ where
|
|||
self.amount_of_blocks_to_request = calculate_next_block_batch_size(
|
||||
block_batch.size,
|
||||
block_batch.blocks.len(),
|
||||
self.config.target_batch_bytes,
|
||||
self.config.target_batch_size,
|
||||
);
|
||||
|
||||
tracing::debug!(
|
||||
|
|
|
@ -66,11 +66,11 @@ proptest! {
|
|||
genesis: *blockchain.blocks.first().unwrap().0
|
||||
},
|
||||
BlockDownloaderConfig {
|
||||
buffer_bytes: 1_000,
|
||||
in_progress_queue_bytes: 10_000,
|
||||
buffer_size: 1_000,
|
||||
in_progress_queue_size: 10_000,
|
||||
check_client_pool_interval: Duration::from_secs(5),
|
||||
target_batch_bytes: 5_000,
|
||||
initial_batch_len: 1,
|
||||
target_batch_size: 5_000,
|
||||
initial_batch_size: 1,
|
||||
});
|
||||
|
||||
let blocks = stream.map(|blocks| blocks.blocks).concat().await;
|
||||
|
|
|
@ -52,7 +52,7 @@ pub(crate) const INITIAL_CHAIN_REQUESTS_TO_SEND: usize = 3;
|
|||
/// The enforced maximum amount of blocks to request in a batch.
|
||||
///
|
||||
/// Requesting more than this will cause the peer to disconnect and potentially lead to bans.
|
||||
pub const MAX_BLOCK_BATCH_LEN: usize = 100;
|
||||
pub(crate) const MAX_BLOCK_BATCH_LEN: usize = 100;
|
||||
|
||||
/// The timeout that the block downloader will use for requests.
|
||||
pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
|
@ -61,13 +61,13 @@ pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_sec
|
|||
/// be less than.
|
||||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size>
|
||||
pub const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000;
|
||||
pub(crate) const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000;
|
||||
|
||||
/// The maximum amount of block IDs allowed in a chain entry response.
|
||||
///
|
||||
/// ref: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/cryptonote_config.h#L97>
|
||||
// TODO: link to the protocol book when this section is added.
|
||||
pub const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000;
|
||||
pub(crate) const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000;
|
||||
|
||||
/// The amount of failures downloading a specific batch before we stop attempting to download it.
|
||||
pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5;
|
||||
|
|
|
@ -15,7 +15,7 @@ default = ["heed"]
|
|||
heed = ["cuprate-database/heed"]
|
||||
redb = ["cuprate-database/redb"]
|
||||
redb-memory = ["cuprate-database/redb-memory"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-database = { workspace = true }
|
||||
|
@ -34,7 +34,6 @@ serde = { workspace = true, optional = true }
|
|||
tower = { workspace = true }
|
||||
thread_local = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-constants = { workspace = true }
|
||||
|
|
|
@ -76,7 +76,7 @@ use cuprate_blockchain::{
|
|||
let tmp_dir = tempfile::tempdir()?;
|
||||
let db_dir = tmp_dir.path().to_owned();
|
||||
let config = ConfigBuilder::new()
|
||||
.data_directory(db_dir.into())
|
||||
.db_directory(db_dir.into())
|
||||
.build();
|
||||
|
||||
// Initialize the database environment.
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
//!
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! // Use a custom database directory.
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .db_directory(db_dir.into())
|
||||
//! // Use as many reader threads as possible (when using `service`).
|
||||
//! .reader_threads(ReaderThreads::OnePerThread)
|
||||
//! // Use the fastest sync mode.
|
||||
|
@ -41,16 +41,13 @@
|
|||
//! ```
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::{borrow::Cow, path::PathBuf};
|
||||
use std::{borrow::Cow, path::Path};
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm};
|
||||
use cuprate_helper::{
|
||||
fs::{blockchain_path, CUPRATE_DATA_DIR},
|
||||
network::Network,
|
||||
};
|
||||
use cuprate_helper::fs::CUPRATE_BLOCKCHAIN_DIR;
|
||||
|
||||
// re-exports
|
||||
pub use cuprate_database_service::ReaderThreads;
|
||||
|
@ -62,9 +59,8 @@ pub use cuprate_database_service::ReaderThreads;
|
|||
#[derive(Debug, Clone, PartialEq, PartialOrd)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct ConfigBuilder {
|
||||
network: Network,
|
||||
|
||||
data_dir: Option<PathBuf>,
|
||||
/// [`Config::db_directory`].
|
||||
db_directory: Option<Cow<'static, Path>>,
|
||||
|
||||
/// [`Config::cuprate_database_config`].
|
||||
db_config: cuprate_database::config::ConfigBuilder,
|
||||
|
@ -80,12 +76,10 @@ impl ConfigBuilder {
|
|||
/// after this function to use default values.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
network: Network::default(),
|
||||
data_dir: None,
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path(
|
||||
&CUPRATE_DATA_DIR,
|
||||
Network::Mainnet,
|
||||
))),
|
||||
db_directory: None,
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(
|
||||
&*CUPRATE_BLOCKCHAIN_DIR,
|
||||
)),
|
||||
reader_threads: None,
|
||||
}
|
||||
}
|
||||
|
@ -93,21 +87,21 @@ impl ConfigBuilder {
|
|||
/// Build into a [`Config`].
|
||||
///
|
||||
/// # Default values
|
||||
/// If [`ConfigBuilder::data_directory`] was not called,
|
||||
/// [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`] will be used.
|
||||
/// If [`ConfigBuilder::db_directory`] was not called,
|
||||
/// the default [`CUPRATE_BLOCKCHAIN_DIR`] will be used.
|
||||
///
|
||||
/// For all other values, [`Default::default`] is used.
|
||||
pub fn build(self) -> Config {
|
||||
// INVARIANT: all PATH safety checks are done
|
||||
// in `helper::fs`. No need to do them here.
|
||||
let data_dir = self
|
||||
.data_dir
|
||||
.unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf());
|
||||
let db_directory = self
|
||||
.db_directory
|
||||
.unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR));
|
||||
|
||||
let reader_threads = self.reader_threads.unwrap_or_default();
|
||||
let db_config = self
|
||||
.db_config
|
||||
.db_directory(Cow::Owned(blockchain_path(&data_dir, self.network)))
|
||||
.db_directory(db_directory)
|
||||
.reader_threads(reader_threads.as_threads())
|
||||
.build();
|
||||
|
||||
|
@ -117,17 +111,10 @@ impl ConfigBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
/// Change the network this blockchain database is for.
|
||||
/// Set a custom database directory (and file) [`Path`].
|
||||
#[must_use]
|
||||
pub const fn network(mut self, network: Network) -> Self {
|
||||
self.network = network;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a custom database directory (and file) [`PathBuf`].
|
||||
#[must_use]
|
||||
pub fn data_directory(mut self, db_directory: PathBuf) -> Self {
|
||||
self.data_dir = Some(db_directory);
|
||||
pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self {
|
||||
self.db_directory = Some(db_directory);
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -158,7 +145,9 @@ impl ConfigBuilder {
|
|||
/// Good default for testing, and resource-available machines.
|
||||
#[must_use]
|
||||
pub fn fast(mut self) -> Self {
|
||||
self.db_config = self.db_config.fast();
|
||||
self.db_config =
|
||||
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR))
|
||||
.fast();
|
||||
|
||||
self.reader_threads = Some(ReaderThreads::OnePerThread);
|
||||
self
|
||||
|
@ -170,7 +159,9 @@ impl ConfigBuilder {
|
|||
/// Good default for resource-limited machines, e.g. a cheap VPS.
|
||||
#[must_use]
|
||||
pub fn low_power(mut self) -> Self {
|
||||
self.db_config = self.db_config.low_power();
|
||||
self.db_config =
|
||||
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR))
|
||||
.low_power();
|
||||
|
||||
self.reader_threads = Some(ReaderThreads::One);
|
||||
self
|
||||
|
@ -179,13 +170,10 @@ impl ConfigBuilder {
|
|||
|
||||
impl Default for ConfigBuilder {
|
||||
fn default() -> Self {
|
||||
let db_directory = Cow::Borrowed(&**CUPRATE_BLOCKCHAIN_DIR);
|
||||
Self {
|
||||
network: Network::default(),
|
||||
data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()),
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path(
|
||||
&CUPRATE_DATA_DIR,
|
||||
Network::default(),
|
||||
))),
|
||||
db_directory: Some(db_directory.clone()),
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(db_directory),
|
||||
reader_threads: Some(ReaderThreads::default()),
|
||||
}
|
||||
}
|
||||
|
@ -213,7 +201,7 @@ impl Config {
|
|||
/// Create a new [`Config`] with sane default settings.
|
||||
///
|
||||
/// The [`cuprate_database::config::Config::db_directory`]
|
||||
/// will be set to [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`].
|
||||
/// will be set to [`CUPRATE_BLOCKCHAIN_DIR`].
|
||||
///
|
||||
/// All other values will be [`Default::default`].
|
||||
///
|
||||
|
@ -225,14 +213,14 @@ impl Config {
|
|||
/// resize::ResizeAlgorithm,
|
||||
/// DATABASE_DATA_FILENAME,
|
||||
/// };
|
||||
/// use cuprate_helper::{fs::*, network::Network};
|
||||
/// use cuprate_helper::fs::*;
|
||||
///
|
||||
/// use cuprate_blockchain::config::*;
|
||||
///
|
||||
/// let config = Config::new();
|
||||
///
|
||||
/// assert_eq!(config.db_config.db_directory().as_ref(), blockchain_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path());
|
||||
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR));
|
||||
/// assert_eq!(config.db_config.db_directory(), &*CUPRATE_BLOCKCHAIN_DIR);
|
||||
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_BLOCKCHAIN_DIR));
|
||||
/// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME));
|
||||
/// assert_eq!(config.db_config.sync_mode, SyncMode::default());
|
||||
/// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default());
|
||||
|
|
|
@ -2,23 +2,21 @@
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use bytemuck::TransparentWrapper;
|
||||
use bytes::Bytes;
|
||||
use monero_serai::{
|
||||
block::{Block, BlockHeader},
|
||||
transaction::Transaction,
|
||||
};
|
||||
|
||||
use cuprate_database::{
|
||||
DbResult, RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw},
|
||||
DbResult, RuntimeError, StorableVec, {DatabaseRo, DatabaseRw},
|
||||
};
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_helper::{
|
||||
map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits},
|
||||
tx::tx_fee,
|
||||
};
|
||||
use cuprate_types::{
|
||||
AltBlockInformation, BlockCompleteEntry, ChainId, ExtendedBlockHeader, HardFork,
|
||||
TransactionBlobs, VerifiedBlockInformation, VerifiedTransactionInformation,
|
||||
AltBlockInformation, ChainId, ExtendedBlockHeader, HardFork, VerifiedBlockInformation,
|
||||
VerifiedTransactionInformation,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
|
@ -29,7 +27,7 @@ use crate::{
|
|||
output::get_rct_num_outputs,
|
||||
tx::{add_tx, remove_tx},
|
||||
},
|
||||
tables::{BlockHeights, BlockInfos, Tables, TablesIter, TablesMut},
|
||||
tables::{BlockHeights, BlockInfos, Tables, TablesMut},
|
||||
types::{BlockHash, BlockHeight, BlockInfo},
|
||||
};
|
||||
|
||||
|
@ -224,64 +222,6 @@ pub fn pop_block(
|
|||
Ok((block_height, block_info.block_hash, block))
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- `get_block_blob_with_tx_indexes`
|
||||
/// Retrieve a block's raw bytes, the index of the miner transaction and the number of non miner-txs in the block.
|
||||
///
|
||||
#[doc = doc_error!()]
|
||||
pub fn get_block_blob_with_tx_indexes(
|
||||
block_height: &BlockHeight,
|
||||
tables: &impl Tables,
|
||||
) -> Result<(Vec<u8>, u64, usize), RuntimeError> {
|
||||
let miner_tx_idx = tables.block_infos().get(block_height)?.mining_tx_index;
|
||||
|
||||
let block_txs = tables.block_txs_hashes().get(block_height)?.0;
|
||||
let numb_txs = block_txs.len();
|
||||
|
||||
// Get the block header
|
||||
let mut block = tables.block_header_blobs().get(block_height)?.0;
|
||||
|
||||
// Add the miner tx to the blob.
|
||||
let mut miner_tx_blob = tables.tx_blobs().get(&miner_tx_idx)?.0;
|
||||
block.append(&mut miner_tx_blob);
|
||||
|
||||
// Add the blocks tx hashes.
|
||||
monero_serai::io::write_varint(&block_txs.len(), &mut block)
|
||||
.expect("The number of txs per block will not exceed u64::MAX");
|
||||
|
||||
let block_txs_bytes = bytemuck::must_cast_slice(&block_txs);
|
||||
block.extend_from_slice(block_txs_bytes);
|
||||
|
||||
Ok((block, miner_tx_idx, numb_txs))
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*`
|
||||
/// Retrieve a [`BlockCompleteEntry`] from the database.
|
||||
///
|
||||
#[doc = doc_error!()]
|
||||
pub fn get_block_complete_entry(
|
||||
block_hash: &BlockHash,
|
||||
tables: &impl TablesIter,
|
||||
) -> Result<BlockCompleteEntry, RuntimeError> {
|
||||
let block_height = tables.block_heights().get(block_hash)?;
|
||||
let (block_blob, miner_tx_idx, numb_non_miner_txs) =
|
||||
get_block_blob_with_tx_indexes(&block_height, tables)?;
|
||||
|
||||
let first_tx_idx = miner_tx_idx + 1;
|
||||
|
||||
let tx_blobs = tables
|
||||
.tx_blobs_iter()
|
||||
.get_range(first_tx_idx..(usize_to_u64(numb_non_miner_txs) + first_tx_idx))?
|
||||
.map(|tx_blob| Ok(Bytes::from(tx_blob?.0)))
|
||||
.collect::<Result<_, RuntimeError>>()?;
|
||||
|
||||
Ok(BlockCompleteEntry {
|
||||
block: Bytes::from(block_blob),
|
||||
txs: TransactionBlobs::Normal(tx_blobs),
|
||||
pruned: false,
|
||||
block_weight: 0,
|
||||
})
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*`
|
||||
/// Retrieve a [`ExtendedBlockHeader`] from the database.
|
||||
///
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
use cuprate_database::{DatabaseRo, DbResult, RuntimeError};
|
||||
|
||||
use crate::{
|
||||
ops::{block::block_exists, macros::doc_error},
|
||||
ops::macros::doc_error,
|
||||
tables::{BlockHeights, BlockInfos},
|
||||
types::{BlockHash, BlockHeight},
|
||||
types::BlockHeight,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Free Functions
|
||||
|
@ -76,44 +76,6 @@ pub fn cumulative_generated_coins(
|
|||
}
|
||||
}
|
||||
|
||||
/// Find the split point between our chain and a list of [`BlockHash`]s from another chain.
|
||||
///
|
||||
/// This function accepts chains in chronological and reverse chronological order, however
|
||||
/// if the wrong order is specified the return value is meaningless.
|
||||
///
|
||||
/// For chronologically ordered chains this will return the index of the first unknown, for reverse
|
||||
/// chronologically ordered chains this will return the index of the first known.
|
||||
///
|
||||
/// If all blocks are known for chronologically ordered chains or unknown for reverse chronologically
|
||||
/// ordered chains then the length of the chain will be returned.
|
||||
#[doc = doc_error!()]
|
||||
#[inline]
|
||||
pub fn find_split_point(
|
||||
block_ids: &[BlockHash],
|
||||
chronological_order: bool,
|
||||
table_block_heights: &impl DatabaseRo<BlockHeights>,
|
||||
) -> Result<usize, RuntimeError> {
|
||||
let mut err = None;
|
||||
|
||||
// Do a binary search to find the first unknown/known block in the batch.
|
||||
let idx = block_ids.partition_point(|block_id| {
|
||||
match block_exists(block_id, table_block_heights) {
|
||||
Ok(exists) => exists == chronological_order,
|
||||
Err(e) => {
|
||||
err.get_or_insert(e);
|
||||
// if this happens the search is scrapped, just return `false` back.
|
||||
false
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(e) = err {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(idx)
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
|
|
@ -71,7 +71,7 @@
|
|||
//! let tmp_dir = tempfile::tempdir()?;
|
||||
//! let db_dir = tmp_dir.path().to_owned();
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .build();
|
||||
//!
|
||||
//! // Initialize the database environment.
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
//! let tmp_dir = tempfile::tempdir()?;
|
||||
//! let db_dir = tmp_dir.path().to_owned();
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .build();
|
||||
//!
|
||||
//! // Initialize the database thread-pool.
|
||||
|
|
|
@ -10,26 +10,23 @@
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::{
|
||||
cmp::min,
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use rayon::{
|
||||
iter::{Either, IntoParallelIterator, ParallelIterator},
|
||||
iter::{IntoParallelIterator, ParallelIterator},
|
||||
prelude::*,
|
||||
ThreadPool,
|
||||
};
|
||||
use thread_local::ThreadLocal;
|
||||
|
||||
use cuprate_database::{
|
||||
ConcreteEnv, DatabaseIter, DatabaseRo, DbResult, Env, EnvInner, RuntimeError,
|
||||
};
|
||||
use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError};
|
||||
use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads};
|
||||
use cuprate_helper::map::combine_low_high_bits_to_u128;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, TxsInBlock,
|
||||
Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
|
@ -39,10 +36,9 @@ use crate::{
|
|||
get_alt_chain_history_ranges,
|
||||
},
|
||||
block::{
|
||||
block_exists, get_block_blob_with_tx_indexes, get_block_complete_entry,
|
||||
get_block_extended_header_from_height, get_block_height, get_block_info,
|
||||
block_exists, get_block_extended_header_from_height, get_block_height, get_block_info,
|
||||
},
|
||||
blockchain::{cumulative_generated_coins, find_split_point, top_block_height},
|
||||
blockchain::{cumulative_generated_coins, top_block_height},
|
||||
key_image::key_image_exists,
|
||||
output::id_to_output_on_chain,
|
||||
},
|
||||
|
@ -50,7 +46,7 @@ use crate::{
|
|||
free::{compact_history_genesis_not_included, compact_history_index_to_height_offset},
|
||||
types::{BlockchainReadHandle, ResponseResult},
|
||||
},
|
||||
tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables, TablesIter},
|
||||
tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables},
|
||||
types::{
|
||||
AltBlockHeight, Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId,
|
||||
},
|
||||
|
@ -104,7 +100,6 @@ fn map_request(
|
|||
/* SOMEDAY: pre-request handling, run some code for each request? */
|
||||
|
||||
match request {
|
||||
R::BlockCompleteEntries(block_hashes) => block_complete_entries(env, block_hashes),
|
||||
R::BlockExtendedHeader(block) => block_extended_header(env, block),
|
||||
R::BlockHash(block, chain) => block_hash(env, block, chain),
|
||||
R::FindBlock(block_hash) => find_block(env, block_hash),
|
||||
|
@ -118,12 +113,7 @@ fn map_request(
|
|||
R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec),
|
||||
R::KeyImagesSpent(set) => key_images_spent(env, set),
|
||||
R::CompactChainHistory => compact_chain_history(env),
|
||||
R::NextChainEntry(block_hashes, amount) => next_chain_entry(env, &block_hashes, amount),
|
||||
R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids),
|
||||
R::TxsInBlock {
|
||||
block_hash,
|
||||
tx_indexes,
|
||||
} => txs_in_block(env, block_hash, tx_indexes),
|
||||
R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id),
|
||||
R::Block { height } => block(env, height),
|
||||
R::BlockByHash(hash) => block_by_hash(env, hash),
|
||||
|
@ -208,38 +198,6 @@ macro_rules! get_tables {
|
|||
// TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal
|
||||
// amount of parallelism.
|
||||
|
||||
/// [`BlockchainReadRequest::BlockCompleteEntries`].
|
||||
fn block_complete_entries(env: &ConcreteEnv, block_hashes: Vec<BlockHash>) -> ResponseResult {
|
||||
// Prepare tx/tables in `ThreadLocal`.
|
||||
let env_inner = env.env_inner();
|
||||
let tx_ro = thread_local(env);
|
||||
let tables = thread_local(env);
|
||||
|
||||
let (missing_hashes, blocks) = block_hashes
|
||||
.into_par_iter()
|
||||
.map(|block_hash| {
|
||||
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
|
||||
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
|
||||
|
||||
match get_block_complete_entry(&block_hash, tables) {
|
||||
Err(RuntimeError::KeyNotFound) => Ok(Either::Left(block_hash)),
|
||||
res => res.map(Either::Right),
|
||||
}
|
||||
})
|
||||
.collect::<DbResult<_>>()?;
|
||||
|
||||
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
|
||||
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
|
||||
|
||||
let blockchain_height = crate::ops::blockchain::chain_height(tables.block_heights())?;
|
||||
|
||||
Ok(BlockchainResponse::BlockCompleteEntries {
|
||||
blocks,
|
||||
missing_hashes,
|
||||
blockchain_height,
|
||||
})
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockExtendedHeader`].
|
||||
#[inline]
|
||||
fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
|
||||
|
@ -377,7 +335,7 @@ fn block_extended_header_in_range(
|
|||
}
|
||||
})
|
||||
})
|
||||
.collect::<DbResult<Vec<_>>>()?
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -576,76 +534,6 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult {
|
|||
})
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::NextChainEntry`]
|
||||
///
|
||||
/// # Invariant
|
||||
/// `block_ids` must be sorted in reverse chronological block order, or else
|
||||
/// the returned result is unspecified and meaningless, as this function
|
||||
/// performs a binary search.
|
||||
fn next_chain_entry(
|
||||
env: &ConcreteEnv,
|
||||
block_ids: &[BlockHash],
|
||||
next_entry_size: usize,
|
||||
) -> ResponseResult {
|
||||
// Single-threaded, no `ThreadLocal` required.
|
||||
let env_inner = env.env_inner();
|
||||
let tx_ro = env_inner.tx_ro()?;
|
||||
|
||||
let tables = env_inner.open_tables(&tx_ro)?;
|
||||
let table_block_heights = tables.block_heights();
|
||||
let table_block_infos = tables.block_infos_iter();
|
||||
|
||||
let idx = find_split_point(block_ids, false, table_block_heights)?;
|
||||
|
||||
// This will happen if we have a different genesis block.
|
||||
if idx == block_ids.len() {
|
||||
return Ok(BlockchainResponse::NextChainEntry {
|
||||
start_height: None,
|
||||
chain_height: 0,
|
||||
block_ids: vec![],
|
||||
block_weights: vec![],
|
||||
cumulative_difficulty: 0,
|
||||
first_block_blob: None,
|
||||
});
|
||||
}
|
||||
|
||||
// The returned chain entry must overlap with one of the blocks we were told about.
|
||||
let first_known_block_hash = block_ids[idx];
|
||||
let first_known_height = table_block_heights.get(&first_known_block_hash)?;
|
||||
|
||||
let chain_height = crate::ops::blockchain::chain_height(table_block_heights)?;
|
||||
let last_height_in_chain_entry = min(first_known_height + next_entry_size, chain_height);
|
||||
|
||||
let (block_ids, block_weights) = table_block_infos
|
||||
.get_range(first_known_height..last_height_in_chain_entry)?
|
||||
.map(|block_info| {
|
||||
let block_info = block_info?;
|
||||
|
||||
Ok((block_info.block_hash, block_info.weight))
|
||||
})
|
||||
.collect::<DbResult<(Vec<_>, Vec<_>)>>()?;
|
||||
|
||||
let top_block_info = table_block_infos.get(&(chain_height - 1))?;
|
||||
|
||||
let first_block_blob = if block_ids.len() >= 2 {
|
||||
Some(get_block_blob_with_tx_indexes(&(first_known_height + 1), &tables)?.0)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(BlockchainResponse::NextChainEntry {
|
||||
start_height: std::num::NonZero::new(first_known_height),
|
||||
chain_height,
|
||||
block_ids,
|
||||
block_weights,
|
||||
cumulative_difficulty: combine_low_high_bits_to_u128(
|
||||
top_block_info.cumulative_difficulty_low,
|
||||
top_block_info.cumulative_difficulty_high,
|
||||
),
|
||||
first_block_blob,
|
||||
})
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FindFirstUnknown`]
|
||||
///
|
||||
/// # Invariant
|
||||
|
@ -658,7 +546,24 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
|
|||
|
||||
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
|
||||
|
||||
let idx = find_split_point(block_ids, true, &table_block_heights)?;
|
||||
let mut err = None;
|
||||
|
||||
// Do a binary search to find the first unknown block in the batch.
|
||||
let idx =
|
||||
block_ids.partition_point(
|
||||
|block_id| match block_exists(block_id, &table_block_heights) {
|
||||
Ok(exists) => exists,
|
||||
Err(e) => {
|
||||
err.get_or_insert(e);
|
||||
// if this happens the search is scrapped, just return `false` back.
|
||||
false
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
if let Some(e) = err {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(if idx == block_ids.len() {
|
||||
BlockchainResponse::FindFirstUnknown(None)
|
||||
|
@ -671,33 +576,6 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
|
|||
})
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::TxsInBlock`]
|
||||
fn txs_in_block(env: &ConcreteEnv, block_hash: [u8; 32], missing_txs: Vec<u64>) -> ResponseResult {
|
||||
// Single-threaded, no `ThreadLocal` required.
|
||||
let env_inner = env.env_inner();
|
||||
let tx_ro = env_inner.tx_ro()?;
|
||||
let tables = env_inner.open_tables(&tx_ro)?;
|
||||
|
||||
let block_height = tables.block_heights().get(&block_hash)?;
|
||||
|
||||
let (block, miner_tx_index, numb_txs) = get_block_blob_with_tx_indexes(&block_height, &tables)?;
|
||||
let first_tx_index = miner_tx_index + 1;
|
||||
|
||||
if numb_txs < missing_txs.len() {
|
||||
return Ok(BlockchainResponse::TxsInBlock(None));
|
||||
}
|
||||
|
||||
let txs = missing_txs
|
||||
.into_iter()
|
||||
.map(|index_offset| Ok(tables.tx_blobs().get(&(first_tx_index + index_offset))?.0))
|
||||
.collect::<DbResult<_>>()?;
|
||||
|
||||
Ok(BlockchainResponse::TxsInBlock(Some(TxsInBlock {
|
||||
block,
|
||||
txs,
|
||||
})))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::AltBlocksInChain`]
|
||||
fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult {
|
||||
// Prepare tx/tables in `ThreadLocal`.
|
||||
|
@ -735,7 +613,7 @@ fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult {
|
|||
)
|
||||
})
|
||||
})
|
||||
.collect::<DbResult<_>>()?;
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
Ok(BlockchainResponse::AltBlocksInChain(blocks))
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
@ -45,7 +46,7 @@ fn init_service() -> (
|
|||
) {
|
||||
let tempdir = tempfile::tempdir().unwrap();
|
||||
let config = ConfigBuilder::new()
|
||||
.data_directory(tempdir.path().into())
|
||||
.db_directory(Cow::Owned(tempdir.path().into()))
|
||||
.low_power()
|
||||
.build();
|
||||
let (reader, writer, env) = init(config).unwrap();
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
//! - only used internally
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::fmt::Debug;
|
||||
use std::{borrow::Cow, fmt::Debug};
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
|
@ -74,7 +74,7 @@ impl AssertTableLen {
|
|||
pub(crate) fn tmp_concrete_env() -> (impl Env, tempfile::TempDir) {
|
||||
let tempdir = tempfile::tempdir().unwrap();
|
||||
let config = ConfigBuilder::new()
|
||||
.data_directory(tempdir.path().into())
|
||||
.db_directory(Cow::Owned(tempdir.path().into()))
|
||||
.low_power()
|
||||
.build();
|
||||
let env = crate::open(config).unwrap();
|
||||
|
|
|
@ -15,7 +15,7 @@ default = ["heed"]
|
|||
heed = ["cuprate-database/heed"]
|
||||
redb = ["cuprate-database/redb"]
|
||||
redb-memory = ["cuprate-database/redb-memory"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-database = { workspace = true, features = ["heed"] }
|
||||
|
|
|
@ -78,7 +78,7 @@ use cuprate_txpool::{
|
|||
let tmp_dir = tempfile::tempdir()?;
|
||||
let db_dir = tmp_dir.path().to_owned();
|
||||
let config = ConfigBuilder::new()
|
||||
.data_directory(db_dir.into())
|
||||
.db_directory(db_dir.into())
|
||||
.build();
|
||||
|
||||
// Initialize the database environment.
|
||||
|
|
|
@ -1,18 +1,15 @@
|
|||
//! The transaction pool [`Config`].
|
||||
use std::{borrow::Cow, path::PathBuf};
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{borrow::Cow, path::Path};
|
||||
|
||||
use cuprate_database::{
|
||||
config::{Config as DbConfig, SyncMode},
|
||||
resize::ResizeAlgorithm,
|
||||
};
|
||||
use cuprate_database_service::ReaderThreads;
|
||||
use cuprate_helper::{
|
||||
fs::{txpool_path, CUPRATE_DATA_DIR},
|
||||
network::Network,
|
||||
};
|
||||
use cuprate_helper::fs::CUPRATE_TXPOOL_DIR;
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// The default transaction pool weight limit.
|
||||
const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024;
|
||||
|
@ -24,9 +21,8 @@ const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024;
|
|||
#[derive(Debug, Clone, PartialEq, PartialOrd)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct ConfigBuilder {
|
||||
network: Network,
|
||||
|
||||
data_dir: Option<PathBuf>,
|
||||
/// [`Config::db_directory`].
|
||||
db_directory: Option<Cow<'static, Path>>,
|
||||
|
||||
/// [`Config::cuprate_database_config`].
|
||||
db_config: cuprate_database::config::ConfigBuilder,
|
||||
|
@ -45,12 +41,10 @@ impl ConfigBuilder {
|
|||
/// after this function to use default values.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
network: Network::default(),
|
||||
data_dir: None,
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path(
|
||||
&CUPRATE_DATA_DIR,
|
||||
Network::Mainnet,
|
||||
))),
|
||||
db_directory: None,
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(
|
||||
&*CUPRATE_TXPOOL_DIR,
|
||||
)),
|
||||
reader_threads: None,
|
||||
max_txpool_weight: None,
|
||||
}
|
||||
|
@ -59,16 +53,16 @@ impl ConfigBuilder {
|
|||
/// Build into a [`Config`].
|
||||
///
|
||||
/// # Default values
|
||||
/// If [`ConfigBuilder::data_directory`] was not called,
|
||||
/// [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`] will be used.
|
||||
/// If [`ConfigBuilder::db_directory`] was not called,
|
||||
/// the default [`CUPRATE_TXPOOL_DIR`] will be used.
|
||||
///
|
||||
/// For all other values, [`Default::default`] is used.
|
||||
pub fn build(self) -> Config {
|
||||
// INVARIANT: all PATH safety checks are done
|
||||
// in `helper::fs`. No need to do them here.
|
||||
let data_dir = self
|
||||
.data_dir
|
||||
.unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf());
|
||||
let db_directory = self
|
||||
.db_directory
|
||||
.unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_TXPOOL_DIR));
|
||||
|
||||
let reader_threads = self.reader_threads.unwrap_or_default();
|
||||
|
||||
|
@ -78,7 +72,7 @@ impl ConfigBuilder {
|
|||
|
||||
let db_config = self
|
||||
.db_config
|
||||
.db_directory(Cow::Owned(txpool_path(&data_dir, self.network)))
|
||||
.db_directory(db_directory)
|
||||
.reader_threads(reader_threads.as_threads())
|
||||
.build();
|
||||
|
||||
|
@ -89,13 +83,6 @@ impl ConfigBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
/// Change the network this database is for.
|
||||
#[must_use]
|
||||
pub const fn network(mut self, network: Network) -> Self {
|
||||
self.network = network;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets a new maximum weight for the transaction pool.
|
||||
#[must_use]
|
||||
pub const fn max_txpool_weight(mut self, max_txpool_weight: usize) -> Self {
|
||||
|
@ -103,10 +90,10 @@ impl ConfigBuilder {
|
|||
self
|
||||
}
|
||||
|
||||
/// Set a custom data directory [`PathBuf`].
|
||||
/// Set a custom database directory (and file) [`Path`].
|
||||
#[must_use]
|
||||
pub fn data_directory(mut self, db_directory: PathBuf) -> Self {
|
||||
self.data_dir = Some(db_directory);
|
||||
pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self {
|
||||
self.db_directory = Some(db_directory);
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -137,7 +124,9 @@ impl ConfigBuilder {
|
|||
/// Good default for testing, and resource-available machines.
|
||||
#[must_use]
|
||||
pub fn fast(mut self) -> Self {
|
||||
self.db_config = self.db_config.fast();
|
||||
self.db_config =
|
||||
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR))
|
||||
.fast();
|
||||
|
||||
self.reader_threads = Some(ReaderThreads::OnePerThread);
|
||||
self
|
||||
|
@ -149,7 +138,9 @@ impl ConfigBuilder {
|
|||
/// Good default for resource-limited machines, e.g. a cheap VPS.
|
||||
#[must_use]
|
||||
pub fn low_power(mut self) -> Self {
|
||||
self.db_config = self.db_config.low_power();
|
||||
self.db_config =
|
||||
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR))
|
||||
.low_power();
|
||||
|
||||
self.reader_threads = Some(ReaderThreads::One);
|
||||
self
|
||||
|
@ -158,13 +149,10 @@ impl ConfigBuilder {
|
|||
|
||||
impl Default for ConfigBuilder {
|
||||
fn default() -> Self {
|
||||
let db_directory = Cow::Borrowed(CUPRATE_TXPOOL_DIR.as_path());
|
||||
Self {
|
||||
network: Network::default(),
|
||||
data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()),
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path(
|
||||
&CUPRATE_DATA_DIR,
|
||||
Network::Mainnet,
|
||||
))),
|
||||
db_directory: Some(db_directory.clone()),
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(db_directory),
|
||||
reader_threads: Some(ReaderThreads::default()),
|
||||
max_txpool_weight: Some(DEFAULT_TXPOOL_WEIGHT_LIMIT),
|
||||
}
|
||||
|
@ -196,7 +184,7 @@ impl Config {
|
|||
/// Create a new [`Config`] with sane default settings.
|
||||
///
|
||||
/// The [`DbConfig::db_directory`]
|
||||
/// will be set to [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`].
|
||||
/// will be set to [`CUPRATE_TXPOOL_DIR`].
|
||||
///
|
||||
/// All other values will be [`Default::default`].
|
||||
///
|
||||
|
@ -209,21 +197,25 @@ impl Config {
|
|||
/// DATABASE_DATA_FILENAME,
|
||||
/// };
|
||||
/// use cuprate_database_service::ReaderThreads;
|
||||
/// use cuprate_helper::{fs::*, network::Network};
|
||||
/// use cuprate_helper::fs::*;
|
||||
///
|
||||
/// use cuprate_txpool::Config;
|
||||
///
|
||||
/// let config = Config::new();
|
||||
///
|
||||
/// assert_eq!(config.db_config.db_directory(), txpool_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path());
|
||||
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR));
|
||||
/// assert_eq!(config.db_config.db_directory(), &*CUPRATE_TXPOOL_DIR);
|
||||
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_TXPOOL_DIR));
|
||||
/// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME));
|
||||
/// assert_eq!(config.db_config.sync_mode, SyncMode::default());
|
||||
/// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default());
|
||||
/// assert_eq!(config.reader_threads, ReaderThreads::default());
|
||||
/// ```
|
||||
pub fn new() -> Self {
|
||||
ConfigBuilder::new().build()
|
||||
Self {
|
||||
db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)),
|
||||
reader_threads: ReaderThreads::default(),
|
||||
max_txpool_weight: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
//! let tmp_dir = tempfile::tempdir()?;
|
||||
//! let db_dir = tmp_dir.path().to_owned();
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .build();
|
||||
//!
|
||||
//! // Initialize the database environment.
|
||||
|
|
|
@ -83,7 +83,7 @@
|
|||
//! let tmp_dir = tempfile::tempdir()?;
|
||||
//! let db_dir = tmp_dir.path().to_owned();
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .build();
|
||||
//!
|
||||
//! // Initialize the database thread-pool.
|
||||
|
|
|
@ -11,9 +11,9 @@ use std::{
|
|||
use monero_serai::block::Block;
|
||||
|
||||
use crate::{
|
||||
types::{Chain, ExtendedBlockHeader, OutputOnChain, TxsInBlock, VerifiedBlockInformation},
|
||||
AltBlockInformation, BlockCompleteEntry, ChainId, ChainInfo, CoinbaseTxSum,
|
||||
OutputHistogramEntry, OutputHistogramInput,
|
||||
types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation},
|
||||
AltBlockInformation, ChainId, ChainInfo, CoinbaseTxSum, OutputHistogramEntry,
|
||||
OutputHistogramInput,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- ReadRequest
|
||||
|
@ -27,11 +27,6 @@ use crate::{
|
|||
/// See `Response` for the expected responses per `Request`.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum BlockchainReadRequest {
|
||||
/// Request [`BlockCompleteEntry`]s.
|
||||
///
|
||||
/// The input is the block hashes.
|
||||
BlockCompleteEntries(Vec<[u8; 32]>),
|
||||
|
||||
/// Request a block's extended header.
|
||||
///
|
||||
/// The input is the block's height.
|
||||
|
@ -101,16 +96,6 @@ pub enum BlockchainReadRequest {
|
|||
/// A request for the compact chain history.
|
||||
CompactChainHistory,
|
||||
|
||||
/// A request for the next chain entry.
|
||||
///
|
||||
/// Input is a list of block hashes and the amount of block hashes to return in the next chain entry.
|
||||
///
|
||||
/// # Invariant
|
||||
/// The [`Vec`] containing the block IDs must be sorted in reverse chronological block
|
||||
/// order, or else the returned response is unspecified and meaningless,
|
||||
/// as this request performs a binary search
|
||||
NextChainEntry(Vec<[u8; 32]>, usize),
|
||||
|
||||
/// A request to find the first unknown block ID in a list of block IDs.
|
||||
///
|
||||
/// # Invariant
|
||||
|
@ -119,16 +104,6 @@ pub enum BlockchainReadRequest {
|
|||
/// as this request performs a binary search.
|
||||
FindFirstUnknown(Vec<[u8; 32]>),
|
||||
|
||||
/// A request for transactions from a specific block.
|
||||
TxsInBlock {
|
||||
/// The block to get transactions from.
|
||||
block_hash: [u8; 32],
|
||||
/// The indexes of the transactions from the block.
|
||||
/// This is not the global index of the txs, instead it is the local index as they appear in
|
||||
/// the block.
|
||||
tx_indexes: Vec<u64>,
|
||||
},
|
||||
|
||||
/// A request for all alt blocks in the chain with the given [`ChainId`].
|
||||
AltBlocksInChain(ChainId),
|
||||
|
||||
|
@ -207,16 +182,6 @@ pub enum BlockchainWriteRequest {
|
|||
#[expect(clippy::large_enum_variant)]
|
||||
pub enum BlockchainResponse {
|
||||
//------------------------------------------------------ Reads
|
||||
/// Response to [`BlockchainReadRequest::BlockCompleteEntries`].
|
||||
BlockCompleteEntries {
|
||||
/// The [`BlockCompleteEntry`]s that we had.
|
||||
blocks: Vec<BlockCompleteEntry>,
|
||||
/// The hashes of blocks that were requested, but we don't have.
|
||||
missing_hashes: Vec<[u8; 32]>,
|
||||
/// Our blockchain height.
|
||||
blockchain_height: usize,
|
||||
},
|
||||
|
||||
/// Response to [`BlockchainReadRequest::BlockExtendedHeader`].
|
||||
///
|
||||
/// Inner value is the extended headed of the requested block.
|
||||
|
@ -283,24 +248,6 @@ pub enum BlockchainResponse {
|
|||
cumulative_difficulty: u128,
|
||||
},
|
||||
|
||||
/// Response to [`BlockchainReadRequest::NextChainEntry`].
|
||||
///
|
||||
/// If all blocks were unknown `start_height` will be [`None`], the other fields will be meaningless.
|
||||
NextChainEntry {
|
||||
/// The start height of this entry, [`None`] if we could not find the split point.
|
||||
start_height: Option<std::num::NonZero<usize>>,
|
||||
/// The current chain height.
|
||||
chain_height: usize,
|
||||
/// The next block hashes in the entry.
|
||||
block_ids: Vec<[u8; 32]>,
|
||||
/// The block weights of the next blocks.
|
||||
block_weights: Vec<usize>,
|
||||
/// The current cumulative difficulty of our chain.
|
||||
cumulative_difficulty: u128,
|
||||
/// The block blob of the 2nd block in `block_ids`, if there is one.
|
||||
first_block_blob: Option<Vec<u8>>,
|
||||
},
|
||||
|
||||
/// Response to [`BlockchainReadRequest::FindFirstUnknown`].
|
||||
///
|
||||
/// Contains the index of the first unknown block and its expected height.
|
||||
|
@ -308,12 +255,7 @@ pub enum BlockchainResponse {
|
|||
/// This will be [`None`] if all blocks were known.
|
||||
FindFirstUnknown(Option<(usize, usize)>),
|
||||
|
||||
/// The response for [`BlockchainReadRequest::TxsInBlock`].
|
||||
///
|
||||
/// Will return [`None`] if the request contained an index out of range.
|
||||
TxsInBlock(Option<TxsInBlock>),
|
||||
|
||||
/// The response for [`BlockchainReadRequest::AltBlocksInChain`].
|
||||
/// Response to [`BlockchainReadRequest::AltBlocksInChain`].
|
||||
///
|
||||
/// Contains all the alt blocks in the alt-chain in chronological order.
|
||||
AltBlocksInChain(Vec<AltBlockInformation>),
|
||||
|
|
|
@ -26,8 +26,8 @@ pub use transaction_verification_data::{
|
|||
pub use types::{
|
||||
AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum,
|
||||
ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry,
|
||||
OutputHistogramEntry, OutputHistogramInput, OutputOnChain, TxsInBlock,
|
||||
VerifiedBlockInformation, VerifiedTransactionInformation,
|
||||
OutputHistogramEntry, OutputHistogramInput, OutputOnChain, VerifiedBlockInformation,
|
||||
VerifiedTransactionInformation,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Feature-gated
|
||||
|
|
|
@ -259,13 +259,6 @@ pub struct AddAuxPow {
|
|||
pub aux_pow: Vec<AuxPow>,
|
||||
}
|
||||
|
||||
/// The inner response for a request for txs in a block.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct TxsInBlock {
|
||||
pub block: Vec<u8>,
|
||||
pub txs: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
|
Loading…
Reference in a new issue