mirror of
https://github.com/Cuprate/cuprate.git
synced 2024-12-22 19:49:28 +00:00
Merge branch 'main' into get-range-fix
This commit is contained in:
commit
72c9db39db
99 changed files with 2007 additions and 526 deletions
41
Cargo.lock
generated
41
Cargo.lock
generated
|
@ -446,6 +446,7 @@ checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54"
|
|||
dependencies = [
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
"terminal_size",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -703,6 +704,7 @@ version = "0.0.0"
|
|||
dependencies = [
|
||||
"bitflags 2.6.0",
|
||||
"bytemuck",
|
||||
"bytes",
|
||||
"cuprate-constants",
|
||||
"cuprate-database",
|
||||
"cuprate-database-service",
|
||||
|
@ -933,6 +935,7 @@ dependencies = [
|
|||
"libc",
|
||||
"monero-serai",
|
||||
"rayon",
|
||||
"serde",
|
||||
"tokio",
|
||||
"windows",
|
||||
]
|
||||
|
@ -1188,7 +1191,6 @@ dependencies = [
|
|||
"cuprate-consensus",
|
||||
"cuprate-consensus-context",
|
||||
"cuprate-consensus-rules",
|
||||
"cuprate-constants",
|
||||
"cuprate-cryptonight",
|
||||
"cuprate-dandelion-tower",
|
||||
"cuprate-database",
|
||||
|
@ -1230,6 +1232,7 @@ dependencies = [
|
|||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"toml",
|
||||
"tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
|
@ -2904,6 +2907,15 @@ dependencies = [
|
|||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_urlencoded"
|
||||
version = "0.7.1"
|
||||
|
@ -3121,6 +3133,16 @@ dependencies = [
|
|||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "terminal_size"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef"
|
||||
dependencies = [
|
||||
"rustix",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.66"
|
||||
|
@ -3262,11 +3284,26 @@ dependencies = [
|
|||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.8.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
|
@ -3275,6 +3312,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"winnow",
|
||||
]
|
||||
|
|
|
@ -55,6 +55,7 @@ members = [
|
|||
]
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true # Build with LTO
|
||||
strip = "none" # Keep panic stack traces
|
||||
codegen-units = 1 # Optimize for binary speed over compile times
|
||||
|
@ -144,6 +145,7 @@ tokio-util = { version = "0.7", default-features = false }
|
|||
tokio-stream = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # <https://github.com/tower-rs/tower/pull/796>
|
||||
toml = { version = "0.8", default-features = false }
|
||||
tracing-subscriber = { version = "0.3", default-features = false }
|
||||
tracing = { version = "0.1", default-features = false }
|
||||
|
||||
|
@ -279,6 +281,9 @@ rest_pat_in_fully_bound_structs = "deny"
|
|||
redundant_type_annotations = "deny"
|
||||
infinite_loop = "deny"
|
||||
zero_repeat_side_effects = "deny"
|
||||
non_zero_suggestions = "deny"
|
||||
manual_is_power_of_two = "deny"
|
||||
used_underscore_items = "deny"
|
||||
|
||||
# Warm
|
||||
cast_possible_truncation = "deny"
|
||||
|
|
|
@ -82,7 +82,7 @@ impl_from_str_benchmark! {
|
|||
macro_rules! impl_to_string_pretty_benchmark {
|
||||
(
|
||||
$(
|
||||
$fn_name:ident => $request_constructor:expr,
|
||||
$fn_name:ident => $request_constructor:expr_2021,
|
||||
)*
|
||||
) => {
|
||||
$(
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
name = "cuprated"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
description = "The Cuprate Monero Rust node."
|
||||
description = "The Cuprate Rust Monero node."
|
||||
license = "AGPL-3.0-only"
|
||||
authors = ["Boog900", "hinto-janai", "SyntheticBird45"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
|
||||
|
@ -13,9 +13,8 @@ cuprate-consensus = { workspace = true }
|
|||
cuprate-fast-sync = { workspace = true }
|
||||
cuprate-consensus-context = { workspace = true }
|
||||
cuprate-consensus-rules = { workspace = true }
|
||||
cuprate-constants = { workspace = true }
|
||||
cuprate-cryptonight = { workspace = true }
|
||||
cuprate-helper = { workspace = true }
|
||||
cuprate-helper = { workspace = true, features = ["serde"] }
|
||||
cuprate-epee-encoding = { workspace = true }
|
||||
cuprate-fixed-bytes = { workspace = true }
|
||||
cuprate-levin = { workspace = true }
|
||||
|
@ -26,9 +25,9 @@ cuprate-dandelion-tower = { workspace = true, features = ["txpool"] }
|
|||
cuprate-async-buffer = { workspace = true }
|
||||
cuprate-address-book = { workspace = true }
|
||||
cuprate-blockchain = { workspace = true }
|
||||
cuprate-database-service = { workspace = true }
|
||||
cuprate-database-service = { workspace = true, features = ["serde"] }
|
||||
cuprate-txpool = { workspace = true }
|
||||
cuprate-database = { workspace = true }
|
||||
cuprate-database = { workspace = true, features = ["serde"] }
|
||||
cuprate-pruning = { workspace = true }
|
||||
cuprate-test-utils = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
|
@ -36,6 +35,7 @@ cuprate-json-rpc = { workspace = true }
|
|||
cuprate-rpc-interface = { workspace = true }
|
||||
cuprate-rpc-types = { workspace = true }
|
||||
|
||||
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
|
@ -44,7 +44,7 @@ borsh = { workspace = true }
|
|||
bytemuck = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
cfg-if = { workspace = true }
|
||||
clap = { workspace = true, features = ["cargo"] }
|
||||
clap = { workspace = true, features = ["cargo", "help", "wrap_help"] }
|
||||
chrono = { workspace = true }
|
||||
crypto-bigint = { workspace = true }
|
||||
crossbeam = { workspace = true }
|
||||
|
@ -71,15 +71,10 @@ thread_local = { workspace = true }
|
|||
tokio-util = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
toml = { workspace = true, features = ["parse", "display"]}
|
||||
tower = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] }
|
||||
tracing = { workspace = true }
|
||||
tracing = { workspace = true, features = ["default"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
|
|
67
binaries/cuprated/Cuprated.toml
Normal file
67
binaries/cuprated/Cuprated.toml
Normal file
|
@ -0,0 +1,67 @@
|
|||
# ____ _
|
||||
# / ___| _ _ __ _ __ __ _| |_ ___
|
||||
# | | | | | | '_ \| '__/ _` | __/ _ \
|
||||
# | |__| |_| | |_) | | | (_| | || __/
|
||||
# \____\__,_| .__/|_| \__,_|\__\___|
|
||||
# |_|
|
||||
#
|
||||
|
||||
## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet".
|
||||
network = "Mainnet"
|
||||
|
||||
## Tracing config.
|
||||
[tracing]
|
||||
## The minimum level for log events to be displayed.
|
||||
level = "info"
|
||||
|
||||
## Clear-net config.
|
||||
[p2p.clear_net]
|
||||
## The number of outbound connections we should make and maintain.
|
||||
outbound_connections = 64
|
||||
## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing.
|
||||
extra_outbound_connections = 8
|
||||
## The maximum number of incoming we should allow.
|
||||
max_inbound_connections = 128
|
||||
## The percent of outbound connections that should be to nodes we have not connected to before.
|
||||
gray_peers_percent = 0.7
|
||||
## The port to accept connections on, if left `0` no connections will be accepted.
|
||||
p2p_port = 0
|
||||
## The IP address to listen to connections on.
|
||||
listen_on = "0.0.0.0"
|
||||
|
||||
## The Clear-net addressbook config.
|
||||
[p2p.clear_net.address_book_config]
|
||||
## The size of the white peer list, which contains peers we have made a connection to before.
|
||||
max_white_list_length = 1_000
|
||||
## The size of the gray peer list, which contains peers we have not made a connection to before.
|
||||
max_gray_list_length = 5_000
|
||||
## The amount of time between address book saves.
|
||||
peer_save_period = { secs = 90, nanos = 0 }
|
||||
|
||||
## The block downloader config.
|
||||
[p2p.block_downloader]
|
||||
## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes).
|
||||
buffer_bytes = 50_000_000
|
||||
## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes).
|
||||
in_progress_queue_bytes = 50_000_000
|
||||
## The target size of a batch of blocks (bytes), must not exceed 100MB.
|
||||
target_batch_bytes= 5_000_000
|
||||
## The amount of time between checking the pool of connected peers for free peers to download blocks.
|
||||
check_client_pool_interval = { secs = 30, nanos = 0 }
|
||||
|
||||
## Storage config
|
||||
[storage]
|
||||
## The amount of reader threads to spawn.
|
||||
reader_threads = "OnePerThread"
|
||||
|
||||
## Txpool storage config.
|
||||
[storage.txpool]
|
||||
## The database sync mode for the txpool.
|
||||
sync_mode = "Async"
|
||||
## The maximum size of all the txs in the pool (bytes).
|
||||
max_txpool_byte_size = 100_000_000
|
||||
|
||||
## Blockchain storage config.
|
||||
[storage.blockchain]
|
||||
## The database sync mode for the blockchain.
|
||||
sync_mode = "Async"
|
|
@ -1 +1,159 @@
|
|||
//! cuprated config
|
||||
use std::{
|
||||
fs::{read_to_string, File},
|
||||
io,
|
||||
path::Path,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use clap::Parser;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_consensus::ContextConfig;
|
||||
use cuprate_helper::{
|
||||
fs::{CUPRATE_CONFIG_DIR, DEFAULT_CONFIG_FILE_NAME},
|
||||
network::Network,
|
||||
};
|
||||
use cuprate_p2p::block_downloader::BlockDownloaderConfig;
|
||||
use cuprate_p2p_core::{ClearNet, ClearNetServerCfg};
|
||||
|
||||
mod args;
|
||||
mod fs;
|
||||
mod p2p;
|
||||
mod storage;
|
||||
mod tracing_config;
|
||||
|
||||
use crate::config::fs::FileSystemConfig;
|
||||
use p2p::P2PConfig;
|
||||
use storage::StorageConfig;
|
||||
use tracing_config::TracingConfig;
|
||||
|
||||
/// Reads the args & config file, returning a [`Config`].
|
||||
pub fn read_config_and_args() -> Config {
|
||||
let args = args::Args::parse();
|
||||
args.do_quick_requests();
|
||||
|
||||
let config: Config = if let Some(config_file) = &args.config_file {
|
||||
// If a config file was set in the args try to read it and exit if we can't.
|
||||
match Config::read_from_path(config_file) {
|
||||
Ok(config) => config,
|
||||
Err(e) => {
|
||||
eprintln!("Failed to read config from file: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// First attempt to read the config file from the current directory.
|
||||
std::env::current_dir()
|
||||
.map(|path| path.join(DEFAULT_CONFIG_FILE_NAME))
|
||||
.map_err(Into::into)
|
||||
.and_then(Config::read_from_path)
|
||||
.inspect_err(|e| tracing::debug!("Failed to read config from current dir: {e}"))
|
||||
// otherwise try the main config directory.
|
||||
.or_else(|_| {
|
||||
let file = CUPRATE_CONFIG_DIR.join(DEFAULT_CONFIG_FILE_NAME);
|
||||
Config::read_from_path(file)
|
||||
})
|
||||
.inspect_err(|e| {
|
||||
tracing::debug!("Failed to read config from config dir: {e}");
|
||||
eprintln!("Failed to find/read config file, using default config.");
|
||||
})
|
||||
.unwrap_or_default()
|
||||
};
|
||||
|
||||
args.apply_args(config)
|
||||
}
|
||||
|
||||
/// The config for all of Cuprate.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct Config {
|
||||
/// The network we should run on.
|
||||
network: Network,
|
||||
|
||||
/// [`tracing`] config.
|
||||
tracing: TracingConfig,
|
||||
|
||||
/// The P2P network config.
|
||||
p2p: P2PConfig,
|
||||
|
||||
/// The storage config.
|
||||
storage: StorageConfig,
|
||||
|
||||
fs: FileSystemConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Attempts to read a config file in [`toml`] format from the given [`Path`].
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Will return an [`Err`] if the file cannot be read or if the file is not a valid [`toml`] config.
|
||||
fn read_from_path(file: impl AsRef<Path>) -> Result<Self, anyhow::Error> {
|
||||
let file_text = read_to_string(file.as_ref())?;
|
||||
|
||||
Ok(toml::from_str(&file_text)
|
||||
.inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy()))
|
||||
.inspect_err(|e| {
|
||||
eprintln!("{e}");
|
||||
eprintln!(
|
||||
"Failed to parse config file at: {}",
|
||||
file.as_ref().to_string_lossy()
|
||||
);
|
||||
})?)
|
||||
}
|
||||
|
||||
/// Returns the current [`Network`] we are running on.
|
||||
pub const fn network(&self) -> Network {
|
||||
self.network
|
||||
}
|
||||
|
||||
/// The [`ClearNet`], [`cuprate_p2p::P2PConfig`].
|
||||
pub fn clearnet_p2p_config(&self) -> cuprate_p2p::P2PConfig<ClearNet> {
|
||||
cuprate_p2p::P2PConfig {
|
||||
network: self.network,
|
||||
seeds: p2p::clear_net_seed_nodes(self.network),
|
||||
outbound_connections: self.p2p.clear_net.general.outbound_connections,
|
||||
extra_outbound_connections: self.p2p.clear_net.general.extra_outbound_connections,
|
||||
max_inbound_connections: self.p2p.clear_net.general.max_inbound_connections,
|
||||
gray_peers_percent: self.p2p.clear_net.general.gray_peers_percent,
|
||||
server_config: Some(ClearNetServerCfg {
|
||||
ip: self.p2p.clear_net.listen_on,
|
||||
}),
|
||||
p2p_port: self.p2p.clear_net.general.p2p_port,
|
||||
// TODO: set this if a public RPC server is set.
|
||||
rpc_port: 0,
|
||||
address_book_config: self
|
||||
.p2p
|
||||
.clear_net
|
||||
.general
|
||||
.address_book_config(&self.fs.cache_directory, self.network),
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`ContextConfig`].
|
||||
pub const fn context_config(&self) -> ContextConfig {
|
||||
match self.network {
|
||||
Network::Mainnet => ContextConfig::main_net(),
|
||||
Network::Stagenet => ContextConfig::stage_net(),
|
||||
Network::Testnet => ContextConfig::test_net(),
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`cuprate_blockchain`] config.
|
||||
pub fn blockchain_config(&self) -> cuprate_blockchain::config::Config {
|
||||
let blockchain = &self.storage.blockchain;
|
||||
|
||||
// We don't set reader threads as we manually make the reader threadpool.
|
||||
cuprate_blockchain::config::ConfigBuilder::default()
|
||||
.network(self.network)
|
||||
.data_directory(self.fs.data_directory.clone())
|
||||
.sync_mode(blockchain.shared.sync_mode)
|
||||
.build()
|
||||
}
|
||||
|
||||
/// The [`BlockDownloaderConfig`].
|
||||
pub fn block_downloader_config(&self) -> BlockDownloaderConfig {
|
||||
self.p2p.block_downloader.clone().into()
|
||||
}
|
||||
}
|
||||
|
|
55
binaries/cuprated/src/config/args.rs
Normal file
55
binaries/cuprated/src/config/args.rs
Normal file
|
@ -0,0 +1,55 @@
|
|||
use std::{io::Write, path::PathBuf, process::exit};
|
||||
|
||||
use clap::builder::TypedValueParser;
|
||||
|
||||
use cuprate_helper::network::Network;
|
||||
|
||||
use crate::{config::Config, constants::EXAMPLE_CONFIG};
|
||||
|
||||
/// Cuprate Args.
|
||||
#[derive(clap::Parser, Debug)]
|
||||
#[command(version, about)]
|
||||
pub struct Args {
|
||||
/// The network to run on.
|
||||
#[arg(
|
||||
long,
|
||||
default_value_t = Network::Mainnet,
|
||||
value_parser = clap::builder::PossibleValuesParser::new(["mainnet", "testnet", "stagenet"])
|
||||
.map(|s| s.parse::<Network>().unwrap()),
|
||||
)]
|
||||
pub network: Network,
|
||||
/// The amount of outbound clear-net connections to maintain.
|
||||
#[arg(long)]
|
||||
pub outbound_connections: Option<usize>,
|
||||
/// The PATH of the `cuprated` config file.
|
||||
#[arg(long)]
|
||||
pub config_file: Option<PathBuf>,
|
||||
/// Generate a config file and print it to stdout.
|
||||
#[arg(long)]
|
||||
pub generate_config: bool,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
/// Complete any quick requests asked for in [`Args`].
|
||||
///
|
||||
/// May cause the process to [`exit`].
|
||||
pub fn do_quick_requests(&self) {
|
||||
if self.generate_config {
|
||||
println!("{EXAMPLE_CONFIG}");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the [`Args`] to the given [`Config`].
|
||||
///
|
||||
/// This may exit the program if a config value was set that requires an early exit.
|
||||
pub const fn apply_args(&self, mut config: Config) -> Config {
|
||||
config.network = self.network;
|
||||
|
||||
if let Some(outbound_connections) = self.outbound_connections {
|
||||
config.p2p.clear_net.general.outbound_connections = outbound_connections;
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
}
|
21
binaries/cuprated/src/config/fs.rs
Normal file
21
binaries/cuprated/src/config/fs.rs
Normal file
|
@ -0,0 +1,21 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_helper::fs::{CUPRATE_CACHE_DIR, CUPRATE_DATA_DIR};
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct FileSystemConfig {
|
||||
pub data_directory: PathBuf,
|
||||
pub cache_directory: PathBuf,
|
||||
}
|
||||
|
||||
impl Default for FileSystemConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
data_directory: CUPRATE_DATA_DIR.to_path_buf(),
|
||||
cache_directory: CUPRATE_CACHE_DIR.to_path_buf(),
|
||||
}
|
||||
}
|
||||
}
|
178
binaries/cuprated/src/config/p2p.rs
Normal file
178
binaries/cuprated/src/config/p2p.rs
Normal file
|
@ -0,0 +1,178 @@
|
|||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
|
||||
path::Path,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_helper::{fs::address_book_path, network::Network};
|
||||
|
||||
/// P2P config.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct P2PConfig {
|
||||
/// Clear-net config.
|
||||
pub clear_net: ClearNetConfig,
|
||||
/// Block downloader config.
|
||||
pub block_downloader: BlockDownloaderConfig,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct BlockDownloaderConfig {
|
||||
/// The size in bytes of the buffer between the block downloader and the place which
|
||||
/// is consuming the downloaded blocks.
|
||||
pub buffer_bytes: usize,
|
||||
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
|
||||
pub in_progress_queue_bytes: usize,
|
||||
/// The [`Duration`] between checking the client pool for free peers.
|
||||
pub check_client_pool_interval: Duration,
|
||||
/// The target size of a single batch of blocks (in bytes).
|
||||
pub target_batch_bytes: usize,
|
||||
}
|
||||
|
||||
impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloaderConfig {
|
||||
fn from(value: BlockDownloaderConfig) -> Self {
|
||||
Self {
|
||||
buffer_bytes: value.buffer_bytes,
|
||||
in_progress_queue_bytes: value.in_progress_queue_bytes,
|
||||
check_client_pool_interval: value.check_client_pool_interval,
|
||||
target_batch_bytes: value.target_batch_bytes,
|
||||
initial_batch_len: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BlockDownloaderConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
buffer_bytes: 50_000_000,
|
||||
in_progress_queue_bytes: 50_000_000,
|
||||
check_client_pool_interval: Duration::from_secs(30),
|
||||
target_batch_bytes: 5_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The config values for P2P clear-net.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct ClearNetConfig {
|
||||
/// The server config.
|
||||
pub listen_on: IpAddr,
|
||||
#[serde(flatten)]
|
||||
pub general: SharedNetConfig,
|
||||
}
|
||||
|
||||
impl Default for ClearNetConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
listen_on: IpAddr::V4(Ipv4Addr::UNSPECIFIED),
|
||||
general: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Network config values shared between all network zones.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct SharedNetConfig {
|
||||
/// The number of outbound connections to make and try keep.
|
||||
pub outbound_connections: usize,
|
||||
/// The amount of extra connections we can make if we are under load from the rest of Cuprate.
|
||||
pub extra_outbound_connections: usize,
|
||||
/// The maximum amount of inbound connections
|
||||
pub max_inbound_connections: usize,
|
||||
/// The percent of connections that should be to peers we haven't connected to before.
|
||||
pub gray_peers_percent: f64,
|
||||
/// port to use to accept p2p connections.
|
||||
pub p2p_port: u16,
|
||||
/// The address book config.
|
||||
address_book_config: AddressBookConfig,
|
||||
}
|
||||
|
||||
impl SharedNetConfig {
|
||||
/// Returns the [`AddressBookConfig`].
|
||||
pub fn address_book_config(
|
||||
&self,
|
||||
cache_dir: &Path,
|
||||
network: Network,
|
||||
) -> cuprate_address_book::AddressBookConfig {
|
||||
cuprate_address_book::AddressBookConfig {
|
||||
max_white_list_length: self.address_book_config.max_white_list_length,
|
||||
max_gray_list_length: self.address_book_config.max_gray_list_length,
|
||||
peer_store_directory: address_book_path(cache_dir, network),
|
||||
peer_save_period: self.address_book_config.peer_save_period,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SharedNetConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
outbound_connections: 64,
|
||||
extra_outbound_connections: 8,
|
||||
max_inbound_connections: 128,
|
||||
gray_peers_percent: 0.7,
|
||||
p2p_port: 0,
|
||||
address_book_config: AddressBookConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct AddressBookConfig {
|
||||
max_white_list_length: usize,
|
||||
max_gray_list_length: usize,
|
||||
peer_save_period: Duration,
|
||||
}
|
||||
|
||||
impl Default for AddressBookConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_white_list_length: 1_000,
|
||||
max_gray_list_length: 5_000,
|
||||
peer_save_period: Duration::from_secs(30),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Seed nodes for [`ClearNet`](cuprate_p2p_core::ClearNet).
|
||||
pub fn clear_net_seed_nodes(network: Network) -> Vec<SocketAddr> {
|
||||
let seeds = match network {
|
||||
Network::Mainnet => [
|
||||
"176.9.0.187:18080",
|
||||
"88.198.163.90:18080",
|
||||
"66.85.74.134:18080",
|
||||
"51.79.173.165:18080",
|
||||
"192.99.8.110:18080",
|
||||
"37.187.74.171:18080",
|
||||
"77.172.183.193:18080",
|
||||
]
|
||||
.as_slice(),
|
||||
Network::Stagenet => [
|
||||
"176.9.0.187:38080",
|
||||
"51.79.173.165:38080",
|
||||
"192.99.8.110:38080",
|
||||
"37.187.74.171:38080",
|
||||
"77.172.183.193:38080",
|
||||
]
|
||||
.as_slice(),
|
||||
Network::Testnet => [
|
||||
"176.9.0.187:28080",
|
||||
"51.79.173.165:28080",
|
||||
"192.99.8.110:28080",
|
||||
"37.187.74.171:28080",
|
||||
"77.172.183.193:28080",
|
||||
]
|
||||
.as_slice(),
|
||||
};
|
||||
|
||||
seeds
|
||||
.iter()
|
||||
.map(|s| s.parse())
|
||||
.collect::<Result<_, _>>()
|
||||
.unwrap()
|
||||
}
|
67
binaries/cuprated/src/config/storage.rs
Normal file
67
binaries/cuprated/src/config/storage.rs
Normal file
|
@ -0,0 +1,67 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_database::config::SyncMode;
|
||||
use cuprate_database_service::ReaderThreads;
|
||||
use cuprate_helper::fs::CUPRATE_DATA_DIR;
|
||||
|
||||
/// The storage config.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct StorageConfig {
|
||||
/// The amount of reader threads to spawn between the tx-pool and blockchain.
|
||||
pub reader_threads: ReaderThreads,
|
||||
/// The tx-pool config.
|
||||
pub txpool: TxpoolConfig,
|
||||
/// The blockchain config.
|
||||
pub blockchain: BlockchainConfig,
|
||||
}
|
||||
|
||||
/// The blockchain config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct BlockchainConfig {
|
||||
#[serde(flatten)]
|
||||
pub shared: SharedStorageConfig,
|
||||
}
|
||||
|
||||
impl Default for BlockchainConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shared: SharedStorageConfig {
|
||||
sync_mode: SyncMode::Async,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The tx-pool config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct TxpoolConfig {
|
||||
#[serde(flatten)]
|
||||
pub shared: SharedStorageConfig,
|
||||
|
||||
/// The maximum size of the tx-pool.
|
||||
pub max_txpool_byte_size: usize,
|
||||
}
|
||||
|
||||
impl Default for TxpoolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shared: SharedStorageConfig {
|
||||
sync_mode: SyncMode::Async,
|
||||
},
|
||||
max_txpool_byte_size: 100_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Config values shared between the tx-pool and blockchain.
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct SharedStorageConfig {
|
||||
/// The [`SyncMode`] of the database.
|
||||
pub sync_mode: SyncMode,
|
||||
}
|
42
binaries/cuprated/src/config/tracing_config.rs
Normal file
42
binaries/cuprated/src/config/tracing_config.rs
Normal file
|
@ -0,0 +1,42 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
/// [`tracing`] config.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields, default)]
|
||||
pub struct TracingConfig {
|
||||
/// The default minimum log level.
|
||||
#[serde(with = "level_filter_serde")]
|
||||
level: LevelFilter,
|
||||
}
|
||||
|
||||
impl Default for TracingConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
level: LevelFilter::INFO,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod level_filter_serde {
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
#[expect(clippy::trivially_copy_pass_by_ref, reason = "serde")]
|
||||
pub fn serialize<S>(level_filter: &LevelFilter, s: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
s.serialize_str(&level_filter.to_string())
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(d: D) -> Result<LevelFilter, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(d)?;
|
||||
LevelFilter::from_str(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
|
@ -18,9 +18,12 @@ pub const VERSION_BUILD: &str = if cfg!(debug_assertions) {
|
|||
pub const PANIC_CRITICAL_SERVICE_ERROR: &str =
|
||||
"A service critical to Cuprate's function returned an unexpected error.";
|
||||
|
||||
pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml");
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::config::Config;
|
||||
|
||||
#[test]
|
||||
fn version() {
|
||||
|
@ -35,4 +38,9 @@ mod test {
|
|||
assert_eq!(VERSION_BUILD, "0.0.1-release");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generate_config_text_is_valid() {
|
||||
let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ fn main() {
|
|||
// Initialize global static `LazyLock` data.
|
||||
statics::init_lazylock_statics();
|
||||
|
||||
let _config = config::read_config_and_args();
|
||||
|
||||
// TODO: everything else.
|
||||
todo!()
|
||||
}
|
||||
|
|
|
@ -1,8 +1,57 @@
|
|||
//! P2P
|
||||
//!
|
||||
//! Will handle initiating the P2P and contains a protocol request handler.
|
||||
use futures::{FutureExt, TryFutureExt};
|
||||
use tokio::sync::oneshot;
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::BlockChainContextService;
|
||||
use cuprate_p2p::{NetworkInterface, P2PConfig};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_txpool::service::TxpoolReadHandle;
|
||||
|
||||
use crate::txpool::IncomingTxHandler;
|
||||
|
||||
mod core_sync_service;
|
||||
mod network_address;
|
||||
pub mod request_handler;
|
||||
|
||||
pub use network_address::CrossNetworkInternalPeerId;
|
||||
|
||||
/// Starts the P2P clearnet network, returning a [`NetworkInterface`] to interact with it.
|
||||
///
|
||||
/// A [`oneshot::Sender`] is also returned to provide the [`IncomingTxHandler`], until this is provided network
|
||||
/// handshakes can not be completed.
|
||||
pub async fn start_clearnet_p2p(
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
blockchain_context_service: BlockChainContextService,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
config: P2PConfig<ClearNet>,
|
||||
) -> Result<
|
||||
(
|
||||
NetworkInterface<ClearNet>,
|
||||
oneshot::Sender<IncomingTxHandler>,
|
||||
),
|
||||
tower::BoxError,
|
||||
> {
|
||||
let (incoming_tx_handler_tx, incoming_tx_handler_rx) = oneshot::channel();
|
||||
|
||||
let request_handler_maker = request_handler::P2pProtocolRequestHandlerMaker {
|
||||
blockchain_read_handle,
|
||||
blockchain_context_service: blockchain_context_service.clone(),
|
||||
txpool_read_handle,
|
||||
incoming_tx_handler: None,
|
||||
incoming_tx_handler_fut: incoming_tx_handler_rx.shared(),
|
||||
};
|
||||
|
||||
Ok((
|
||||
cuprate_p2p::initialize_network(
|
||||
request_handler_maker.map_response(|s| s.map_err(Into::into)),
|
||||
core_sync_service::CoreSyncService(blockchain_context_service),
|
||||
config,
|
||||
)
|
||||
.await?,
|
||||
incoming_tx_handler_tx,
|
||||
))
|
||||
}
|
||||
|
|
49
binaries/cuprated/src/p2p/core_sync_service.rs
Normal file
49
binaries/cuprated/src/p2p/core_sync_service.rs
Normal file
|
@ -0,0 +1,49 @@
|
|||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::{future::BoxFuture, FutureExt, TryFutureExt};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_consensus::{
|
||||
BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService,
|
||||
};
|
||||
use cuprate_helper::{cast::usize_to_u64, map::split_u128_into_low_high_bits};
|
||||
use cuprate_p2p_core::services::{CoreSyncDataRequest, CoreSyncDataResponse};
|
||||
use cuprate_wire::CoreSyncData;
|
||||
|
||||
/// The core sync service.
|
||||
#[derive(Clone)]
|
||||
pub struct CoreSyncService(pub BlockChainContextService);
|
||||
|
||||
impl Service<CoreSyncDataRequest> for CoreSyncService {
|
||||
type Response = CoreSyncDataResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future {
|
||||
self.0
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.map_ok(|res| {
|
||||
let BlockChainContextResponse::Context(context) = res else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let context = context.unchecked_blockchain_context();
|
||||
let (cumulative_difficulty, cumulative_difficulty_top64) =
|
||||
split_u128_into_low_high_bits(context.cumulative_difficulty);
|
||||
|
||||
CoreSyncDataResponse(CoreSyncData {
|
||||
cumulative_difficulty,
|
||||
cumulative_difficulty_top64,
|
||||
current_height: usize_to_u64(context.chain_height),
|
||||
pruning_seed: 0,
|
||||
top_id: context.top_hash,
|
||||
top_version: context.current_hf.as_u8(),
|
||||
})
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
}
|
|
@ -1 +1,422 @@
|
|||
use std::{
|
||||
collections::HashSet,
|
||||
future::{ready, Ready},
|
||||
hash::Hash,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{
|
||||
future::{BoxFuture, Shared},
|
||||
FutureExt,
|
||||
};
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use tokio::sync::{broadcast, oneshot, watch};
|
||||
use tokio_stream::wrappers::WatchStream;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::{
|
||||
transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse,
|
||||
BlockChainContextService,
|
||||
};
|
||||
use cuprate_dandelion_tower::TxState;
|
||||
use cuprate_fixed_bytes::ByteArrayVec;
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
use cuprate_helper::{
|
||||
asynch::rayon_spawn_async,
|
||||
cast::usize_to_u64,
|
||||
map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits},
|
||||
};
|
||||
use cuprate_p2p::constants::{
|
||||
MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MAX_BLOCK_BATCH_LEN, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN,
|
||||
};
|
||||
use cuprate_p2p_core::{
|
||||
client::{InternalPeerID, PeerInformation},
|
||||
NetZoneAddress, NetworkZone, ProtocolRequest, ProtocolResponse,
|
||||
};
|
||||
use cuprate_txpool::service::TxpoolReadHandle;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
BlockCompleteEntry, TransactionBlobs, TxsInBlock,
|
||||
};
|
||||
use cuprate_wire::protocol::{
|
||||
ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
|
||||
GetObjectsResponse, NewFluffyBlock, NewTransactions,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
blockchain::interface::{self as blockchain_interface, IncomingBlockError},
|
||||
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||
p2p::CrossNetworkInternalPeerId,
|
||||
txpool::{IncomingTxError, IncomingTxHandler, IncomingTxs},
|
||||
};
|
||||
|
||||
/// The P2P protocol request handler [`MakeService`](tower::MakeService).
|
||||
#[derive(Clone)]
|
||||
pub struct P2pProtocolRequestHandlerMaker {
|
||||
pub blockchain_read_handle: BlockchainReadHandle,
|
||||
pub blockchain_context_service: BlockChainContextService,
|
||||
pub txpool_read_handle: TxpoolReadHandle,
|
||||
|
||||
/// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`]
|
||||
/// and the [`IncomingTxHandler`].
|
||||
pub incoming_tx_handler: Option<IncomingTxHandler>,
|
||||
|
||||
/// A [`Future`](std::future::Future) that produces the [`IncomingTxHandler`].
|
||||
pub incoming_tx_handler_fut: Shared<oneshot::Receiver<IncomingTxHandler>>,
|
||||
}
|
||||
|
||||
impl<A: NetZoneAddress> Service<PeerInformation<A>> for P2pProtocolRequestHandlerMaker
|
||||
where
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
type Response = P2pProtocolRequestHandler<A>;
|
||||
type Error = tower::BoxError;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.incoming_tx_handler.is_none() {
|
||||
return self
|
||||
.incoming_tx_handler_fut
|
||||
.poll_unpin(cx)
|
||||
.map(|incoming_tx_handler| {
|
||||
self.incoming_tx_handler = Some(incoming_tx_handler?);
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, peer_information: PeerInformation<A>) -> Self::Future {
|
||||
let Some(incoming_tx_handler) = self.incoming_tx_handler.clone() else {
|
||||
panic!("poll_ready was not called or did not return `Poll::Ready`")
|
||||
};
|
||||
|
||||
// TODO: check sync info?
|
||||
|
||||
let blockchain_read_handle = self.blockchain_read_handle.clone();
|
||||
let txpool_read_handle = self.txpool_read_handle.clone();
|
||||
|
||||
ready(Ok(P2pProtocolRequestHandler {
|
||||
peer_information,
|
||||
blockchain_read_handle,
|
||||
blockchain_context_service: self.blockchain_context_service.clone(),
|
||||
txpool_read_handle,
|
||||
incoming_tx_handler,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// The P2P protocol request handler.
|
||||
#[derive(Clone)]
|
||||
pub struct P2pProtocolRequestHandler<N: NetZoneAddress> {
|
||||
peer_information: PeerInformation<N>,
|
||||
blockchain_read_handle: BlockchainReadHandle,
|
||||
blockchain_context_service: BlockChainContextService,
|
||||
txpool_read_handle: TxpoolReadHandle,
|
||||
incoming_tx_handler: IncomingTxHandler,
|
||||
}
|
||||
|
||||
impl<A: NetZoneAddress> Service<ProtocolRequest> for P2pProtocolRequestHandler<A>
|
||||
where
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
type Response = ProtocolResponse;
|
||||
type Error = anyhow::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: ProtocolRequest) -> Self::Future {
|
||||
match request {
|
||||
ProtocolRequest::GetObjects(r) => {
|
||||
get_objects(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::GetChain(r) => {
|
||||
get_chain(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::FluffyMissingTxs(r) => {
|
||||
fluffy_missing_txs(r, self.blockchain_read_handle.clone()).boxed()
|
||||
}
|
||||
ProtocolRequest::NewBlock(_) => ready(Err(anyhow::anyhow!(
|
||||
"Peer sent a full block when we support fluffy blocks"
|
||||
)))
|
||||
.boxed(),
|
||||
ProtocolRequest::NewFluffyBlock(r) => new_fluffy_block(
|
||||
self.peer_information.clone(),
|
||||
r,
|
||||
self.blockchain_read_handle.clone(),
|
||||
self.txpool_read_handle.clone(),
|
||||
)
|
||||
.boxed(),
|
||||
ProtocolRequest::NewTransactions(r) => new_transactions(
|
||||
self.peer_information.clone(),
|
||||
r,
|
||||
self.blockchain_context_service.clone(),
|
||||
self.incoming_tx_handler.clone(),
|
||||
)
|
||||
.boxed(),
|
||||
ProtocolRequest::GetTxPoolCompliment(_) => ready(Ok(ProtocolResponse::NA)).boxed(), // TODO: should we support this?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Handler functions
|
||||
|
||||
/// [`ProtocolRequest::GetObjects`]
|
||||
async fn get_objects(
|
||||
request: GetObjectsRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
if request.blocks.len() > MAX_BLOCK_BATCH_LEN {
|
||||
anyhow::bail!("Peer requested more blocks than allowed.")
|
||||
}
|
||||
|
||||
let block_hashes: Vec<[u8; 32]> = (&request.blocks).into();
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::BlockCompleteEntries {
|
||||
blocks,
|
||||
missing_hashes,
|
||||
blockchain_height,
|
||||
} = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockCompleteEntries(block_hashes))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Ok(ProtocolResponse::GetObjects(GetObjectsResponse {
|
||||
blocks,
|
||||
missed_ids: ByteArrayVec::from(missing_hashes),
|
||||
current_blockchain_height: usize_to_u64(blockchain_height),
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::GetChain`]
|
||||
async fn get_chain(
|
||||
request: ChainRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
if request.block_ids.len() > MAX_BLOCKS_IDS_IN_CHAIN_ENTRY {
|
||||
anyhow::bail!("Peer sent too many block hashes in chain request.")
|
||||
}
|
||||
|
||||
let block_hashes: Vec<[u8; 32]> = (&request.block_ids).into();
|
||||
let want_pruned_data = request.prune;
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::NextChainEntry {
|
||||
start_height,
|
||||
chain_height,
|
||||
block_ids,
|
||||
block_weights,
|
||||
cumulative_difficulty,
|
||||
first_block_blob,
|
||||
} = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let Some(start_height) = start_height else {
|
||||
anyhow::bail!("The peers chain has a different genesis block than ours.");
|
||||
};
|
||||
|
||||
let (cumulative_difficulty_low64, cumulative_difficulty_top64) =
|
||||
split_u128_into_low_high_bits(cumulative_difficulty);
|
||||
|
||||
Ok(ProtocolResponse::GetChain(ChainResponse {
|
||||
start_height: usize_to_u64(std::num::NonZero::get(start_height)),
|
||||
total_height: usize_to_u64(chain_height),
|
||||
cumulative_difficulty_low64,
|
||||
cumulative_difficulty_top64,
|
||||
m_block_ids: ByteArrayVec::from(block_ids),
|
||||
first_block: first_block_blob.map_or(Bytes::new(), Bytes::from),
|
||||
// only needed when pruned
|
||||
m_block_weights: if want_pruned_data {
|
||||
block_weights.into_iter().map(usize_to_u64).collect()
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::FluffyMissingTxs`]
|
||||
async fn fluffy_missing_txs(
|
||||
mut request: FluffyMissingTransactionsRequest,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
let tx_indexes = std::mem::take(&mut request.missing_tx_indices);
|
||||
let block_hash: [u8; 32] = *request.block_hash;
|
||||
let current_blockchain_height = request.current_blockchain_height;
|
||||
|
||||
// deallocate the backing `Bytes`.
|
||||
drop(request);
|
||||
|
||||
let BlockchainResponse::TxsInBlock(res) = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::TxsInBlock {
|
||||
block_hash,
|
||||
tx_indexes,
|
||||
})
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let Some(TxsInBlock { block, txs }) = res else {
|
||||
anyhow::bail!("The peer requested txs out of range.");
|
||||
};
|
||||
|
||||
Ok(ProtocolResponse::NewFluffyBlock(NewFluffyBlock {
|
||||
b: BlockCompleteEntry {
|
||||
block: Bytes::from(block),
|
||||
txs: TransactionBlobs::Normal(txs.into_iter().map(Bytes::from).collect()),
|
||||
pruned: false,
|
||||
// only needed for pruned blocks.
|
||||
block_weight: 0,
|
||||
},
|
||||
current_blockchain_height,
|
||||
}))
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::NewFluffyBlock`]
|
||||
async fn new_fluffy_block<A: NetZoneAddress>(
|
||||
peer_information: PeerInformation<A>,
|
||||
request: NewFluffyBlock,
|
||||
mut blockchain_read_handle: BlockchainReadHandle,
|
||||
mut txpool_read_handle: TxpoolReadHandle,
|
||||
) -> anyhow::Result<ProtocolResponse> {
|
||||
// TODO: check context service here and ignore the block?
|
||||
let current_blockchain_height = request.current_blockchain_height;
|
||||
|
||||
peer_information
|
||||
.core_sync_data
|
||||
.lock()
|
||||
.unwrap()
|
||||
.current_height = current_blockchain_height;
|
||||
|
||||
let (block, txs) = rayon_spawn_async(move || -> Result<_, anyhow::Error> {
|
||||
let block = Block::read(&mut request.b.block.as_ref())?;
|
||||
|
||||
let tx_blobs = request
|
||||
.b
|
||||
.txs
|
||||
.take_normal()
|
||||
.ok_or(anyhow::anyhow!("Peer sent pruned txs in fluffy block"))?;
|
||||
|
||||
let txs = tx_blobs
|
||||
.into_iter()
|
||||
.map(|tx_blob| {
|
||||
if tx_blob.len() > MAX_TRANSACTION_BLOB_SIZE {
|
||||
anyhow::bail!("Peer sent a transaction over the size limit.");
|
||||
}
|
||||
|
||||
let tx = Transaction::read(&mut tx_blob.as_ref())?;
|
||||
|
||||
Ok((tx.hash(), tx))
|
||||
})
|
||||
.collect::<Result<_, anyhow::Error>>()?;
|
||||
|
||||
// The backing `Bytes` will be deallocated when this closure returns.
|
||||
|
||||
Ok((block, txs))
|
||||
})
|
||||
.await?;
|
||||
|
||||
let res = blockchain_interface::handle_incoming_block(
|
||||
block,
|
||||
txs,
|
||||
&mut blockchain_read_handle,
|
||||
&mut txpool_read_handle,
|
||||
)
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Ok(_) => Ok(ProtocolResponse::NA),
|
||||
Err(IncomingBlockError::UnknownTransactions(block_hash, missing_tx_indices)) => Ok(
|
||||
ProtocolResponse::FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest {
|
||||
block_hash: block_hash.into(),
|
||||
current_blockchain_height,
|
||||
missing_tx_indices: missing_tx_indices.into_iter().map(usize_to_u64).collect(),
|
||||
}),
|
||||
),
|
||||
Err(IncomingBlockError::Orphan) => {
|
||||
// Block's parent was unknown, could be syncing?
|
||||
Ok(ProtocolResponse::NA)
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// [`ProtocolRequest::NewTransactions`]
|
||||
async fn new_transactions<A>(
|
||||
peer_information: PeerInformation<A>,
|
||||
request: NewTransactions,
|
||||
mut blockchain_context_service: BlockChainContextService,
|
||||
mut incoming_tx_handler: IncomingTxHandler,
|
||||
) -> anyhow::Result<ProtocolResponse>
|
||||
where
|
||||
A: NetZoneAddress,
|
||||
InternalPeerID<A>: Into<CrossNetworkInternalPeerId>,
|
||||
{
|
||||
let BlockChainContextResponse::Context(context) = blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Context)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let context = context.unchecked_blockchain_context();
|
||||
|
||||
// If we are more than 2 blocks behind the peer then ignore the txs - we are probably still syncing.
|
||||
if usize_to_u64(context.chain_height + 2)
|
||||
< peer_information
|
||||
.core_sync_data
|
||||
.lock()
|
||||
.unwrap()
|
||||
.current_height
|
||||
{
|
||||
return Ok(ProtocolResponse::NA);
|
||||
}
|
||||
|
||||
let state = if request.dandelionpp_fluff {
|
||||
TxState::Fluff
|
||||
} else {
|
||||
TxState::Stem {
|
||||
from: peer_information.id.into(),
|
||||
}
|
||||
};
|
||||
|
||||
// Drop all the data except the stuff we still need.
|
||||
let NewTransactions { txs, .. } = request;
|
||||
|
||||
let res = incoming_tx_handler
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(IncomingTxs { txs, state })
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Ok(()) => Ok(ProtocolResponse::NA),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ use std::{
|
|||
macro_rules! define_init_lazylock_statics {
|
||||
($(
|
||||
$( #[$attr:meta] )*
|
||||
$name:ident: $t:ty = $init_fn:expr;
|
||||
$name:ident: $t:ty = $init_fn:expr_2021;
|
||||
)*) => {
|
||||
/// Initialize global static `LazyLock` data.
|
||||
pub fn init_lazylock_statics() {
|
||||
|
|
|
@ -12,4 +12,4 @@ mod dandelion;
|
|||
mod incoming_tx;
|
||||
mod txs_being_handled;
|
||||
|
||||
pub use incoming_tx::IncomingTxHandler;
|
||||
pub use incoming_tx::{IncomingTxError, IncomingTxHandler, IncomingTxs};
|
||||
|
|
|
@ -43,9 +43,13 @@ use crate::{
|
|||
};
|
||||
|
||||
/// An error that can happen handling an incoming tx.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum IncomingTxError {
|
||||
#[error("Error parsing tx: {0}")]
|
||||
Parse(std::io::Error),
|
||||
#[error(transparent)]
|
||||
Consensus(ExtendedConsensusError),
|
||||
#[error("Duplicate tx in message")]
|
||||
DuplicateTransaction,
|
||||
}
|
||||
|
||||
|
@ -67,6 +71,7 @@ pub(super) type TxId = [u8; 32];
|
|||
/// The service than handles incoming transaction pool transactions.
|
||||
///
|
||||
/// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes.
|
||||
#[derive(Clone)]
|
||||
pub struct IncomingTxHandler {
|
||||
/// A store of txs currently being handled in incoming tx requests.
|
||||
pub(super) txs_being_handled: TxsBeingHandled,
|
||||
|
|
|
@ -328,8 +328,8 @@ fn next_difficulty(
|
|||
time_span = 1;
|
||||
}
|
||||
|
||||
// TODO: do checked operations here and unwrap so we don't silently overflow?
|
||||
(windowed_work * u128::from(hf.block_time().as_secs()) + time_span - 1) / time_span
|
||||
// TODO: do `checked_mul` here and unwrap so we don't silently overflow?
|
||||
(windowed_work * u128::from(hf.block_time().as_secs())).div_ceil(time_span)
|
||||
}
|
||||
|
||||
/// Get the start and end of the window to calculate difficulty.
|
||||
|
|
|
@ -9,7 +9,7 @@ use clap::Parser;
|
|||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::{
|
||||
config::ConfigBuilder, cuprate_database::RuntimeError, service::BlockchainReadHandle,
|
||||
config::ConfigBuilder, cuprate_database::DbResult, service::BlockchainReadHandle,
|
||||
};
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
|
@ -23,7 +23,7 @@ const BATCH_SIZE: usize = 512;
|
|||
async fn read_batch(
|
||||
handle: &mut BlockchainReadHandle,
|
||||
height_from: usize,
|
||||
) -> Result<Vec<BlockId>, RuntimeError> {
|
||||
) -> DbResult<Vec<BlockId>> {
|
||||
let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE);
|
||||
|
||||
for height in height_from..(height_from + BATCH_SIZE) {
|
||||
|
|
|
@ -35,6 +35,8 @@ futures = { workspace = true, optional = true, features = ["std"] }
|
|||
monero-serai = { workspace = true, optional = true }
|
||||
rayon = { workspace = true, optional = true }
|
||||
|
||||
serde = { workspace = true, optional = true, features = ["derive"] }
|
||||
|
||||
# This is kinda a stupid work around.
|
||||
# [thread] needs to activate one of these libs (windows|libc)
|
||||
# although it depends on what target we're building for.
|
||||
|
|
|
@ -28,7 +28,12 @@
|
|||
//! - <https://docs.rs/dirs>
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::LazyLock,
|
||||
};
|
||||
|
||||
use crate::network::Network;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Const
|
||||
/// Cuprate's main directory.
|
||||
|
@ -58,6 +63,9 @@ pub const CUPRATE_DIR: &str = {
|
|||
}
|
||||
};
|
||||
|
||||
/// The default name of Cuprate's config file.
|
||||
pub const DEFAULT_CONFIG_FILE_NAME: &str = "Cuprated.toml";
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Directories
|
||||
/// Create a `LazyLock` for common PATHs used by Cuprate.
|
||||
///
|
||||
|
@ -150,32 +158,61 @@ impl_path_lazylock! {
|
|||
CUPRATE_DATA_DIR,
|
||||
data_dir,
|
||||
"",
|
||||
}
|
||||
|
||||
/// Cuprate's blockchain directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate blockchain files.
|
||||
///
|
||||
/// | OS | PATH |
|
||||
/// |---------|----------------------------------------------------------------|
|
||||
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` |
|
||||
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` |
|
||||
/// | Linux | `/home/alice/.local/share/cuprate/blockchain/` |
|
||||
CUPRATE_BLOCKCHAIN_DIR,
|
||||
data_dir,
|
||||
"blockchain",
|
||||
/// Joins the [`Network`] to the [`Path`].
|
||||
///
|
||||
/// This will keep the path the same for [`Network::Mainnet`].
|
||||
fn path_with_network(path: &Path, network: Network) -> PathBuf {
|
||||
match network {
|
||||
Network::Mainnet => path.to_path_buf(),
|
||||
network => path.join(network.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Cuprate's transaction pool directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate txpool files.
|
||||
///
|
||||
/// | OS | PATH |
|
||||
/// |---------|------------------------------------------------------------|
|
||||
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` |
|
||||
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` |
|
||||
/// | Linux | `/home/alice/.local/share/cuprate/txpool/` |
|
||||
CUPRATE_TXPOOL_DIR,
|
||||
data_dir,
|
||||
"txpool",
|
||||
/// Cuprate's blockchain directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate blockchain files.
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, blockchain_path}};
|
||||
///
|
||||
/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("blockchain"));
|
||||
/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("blockchain"));
|
||||
/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("blockchain"));
|
||||
/// ```
|
||||
pub fn blockchain_path(data_dir: &Path, network: Network) -> PathBuf {
|
||||
path_with_network(data_dir, network).join("blockchain")
|
||||
}
|
||||
|
||||
/// Cuprate's txpool directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate txpool files.
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, txpool_path}};
|
||||
///
|
||||
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("txpool"));
|
||||
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("txpool"));
|
||||
/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("txpool"));
|
||||
/// ```
|
||||
pub fn txpool_path(data_dir: &Path, network: Network) -> PathBuf {
|
||||
path_with_network(data_dir, network).join("txpool")
|
||||
}
|
||||
|
||||
/// Cuprate's address-book directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate address-book files.
|
||||
///
|
||||
/// ```rust
|
||||
/// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, address_book_path}};
|
||||
///
|
||||
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook"));
|
||||
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook"));
|
||||
/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook"));
|
||||
/// ```
|
||||
pub fn address_book_path(cache_dir: &Path, network: Network) -> PathBuf {
|
||||
path_with_network(cache_dir, network).join("addressbook")
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
|
@ -197,29 +234,21 @@ mod test {
|
|||
(&*CUPRATE_CACHE_DIR, ""),
|
||||
(&*CUPRATE_CONFIG_DIR, ""),
|
||||
(&*CUPRATE_DATA_DIR, ""),
|
||||
(&*CUPRATE_BLOCKCHAIN_DIR, ""),
|
||||
(&*CUPRATE_TXPOOL_DIR, ""),
|
||||
];
|
||||
|
||||
if cfg!(target_os = "windows") {
|
||||
array[0].1 = r"AppData\Local\Cuprate";
|
||||
array[1].1 = r"AppData\Roaming\Cuprate";
|
||||
array[2].1 = r"AppData\Roaming\Cuprate";
|
||||
array[3].1 = r"AppData\Roaming\Cuprate\blockchain";
|
||||
array[4].1 = r"AppData\Roaming\Cuprate\txpool";
|
||||
} else if cfg!(target_os = "macos") {
|
||||
array[0].1 = "Library/Caches/Cuprate";
|
||||
array[1].1 = "Library/Application Support/Cuprate";
|
||||
array[2].1 = "Library/Application Support/Cuprate";
|
||||
array[3].1 = "Library/Application Support/Cuprate/blockchain";
|
||||
array[4].1 = "Library/Application Support/Cuprate/txpool";
|
||||
} else {
|
||||
// Assumes Linux.
|
||||
array[0].1 = ".cache/cuprate";
|
||||
array[1].1 = ".config/cuprate";
|
||||
array[2].1 = ".local/share/cuprate";
|
||||
array[3].1 = ".local/share/cuprate/blockchain";
|
||||
array[4].1 = ".local/share/cuprate/txpool";
|
||||
};
|
||||
|
||||
for (path, expected) in array {
|
||||
|
|
|
@ -5,6 +5,12 @@
|
|||
//! into it's own crate.
|
||||
//!
|
||||
//! `#[no_std]` compatible.
|
||||
// TODO: move to types crate.
|
||||
|
||||
use core::{
|
||||
fmt::{Display, Formatter},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
const MAINNET_NETWORK_ID: [u8; 16] = [
|
||||
0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x10,
|
||||
|
@ -17,7 +23,8 @@ const STAGENET_NETWORK_ID: [u8; 16] = [
|
|||
];
|
||||
|
||||
/// An enum representing every Monero network.
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
#[derive(Debug, Clone, Copy, Default, Ord, PartialOrd, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
|
||||
pub enum Network {
|
||||
/// Mainnet
|
||||
#[default]
|
||||
|
@ -38,3 +45,28 @@ impl Network {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct ParseNetworkError;
|
||||
|
||||
impl FromStr for Network {
|
||||
type Err = ParseNetworkError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"mainnet" | "Mainnet" => Ok(Self::Mainnet),
|
||||
"testnet" | "Testnet" => Ok(Self::Testnet),
|
||||
"stagenet" | "Stagenet" => Ok(Self::Stagenet),
|
||||
_ => Err(ParseNetworkError),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Display for Network {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||
f.write_str(match self {
|
||||
Self::Mainnet => "mainnet",
|
||||
Self::Testnet => "testnet",
|
||||
Self::Stagenet => "stagenet",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,14 +76,14 @@ macro_rules! epee_object {
|
|||
// All this does is return the second (right) arg if present otherwise the left is returned.
|
||||
(
|
||||
@internal_try_right_then_left
|
||||
$a:expr, $b:expr
|
||||
$a:expr_2021, $b:expr_2021
|
||||
) => {
|
||||
$b
|
||||
};
|
||||
|
||||
(
|
||||
@internal_try_right_then_left
|
||||
$a:expr,
|
||||
$a:expr_2021,
|
||||
) => {
|
||||
$a
|
||||
};
|
||||
|
@ -122,7 +122,7 @@ macro_rules! epee_object {
|
|||
// ------------------------------------------------------------------------ Entry Point
|
||||
(
|
||||
$obj:ident,
|
||||
$($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr)? $(=> $read_fn:expr, $write_fn:expr, $should_write_fn:expr)?, )*
|
||||
$($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr_2021)? $(=> $read_fn:expr_2021, $write_fn:expr_2021, $should_write_fn:expr_2021)?, )*
|
||||
$(!flatten: $flat_field: ident: $flat_ty:ty ,)*
|
||||
|
||||
) => {
|
||||
|
|
|
@ -159,7 +159,7 @@ epee_object!(
|
|||
current_blockchain_height: u64,
|
||||
);
|
||||
|
||||
/// A request for Txs we are missing from our `TxPool`
|
||||
/// A request for txs we are missing from an incoming block.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct FluffyMissingTransactionsRequest {
|
||||
/// The Block we are missing the Txs in
|
||||
|
|
|
@ -23,7 +23,7 @@ indexmap = { workspace = true, features = ["std"] }
|
|||
|
||||
rand = { workspace = true, features = ["std", "std_rng"] }
|
||||
|
||||
borsh = { workspace = true, features = ["derive", "std"]}
|
||||
borsh = { workspace = true, features = ["derive", "std"] }
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-test-utils = { workspace = true }
|
||||
|
|
|
@ -15,7 +15,7 @@ fn test_cfg() -> AddressBookConfig {
|
|||
AddressBookConfig {
|
||||
max_white_list_length: 100,
|
||||
max_gray_list_length: 500,
|
||||
peer_store_file: PathBuf::new(),
|
||||
peer_store_directory: PathBuf::new(),
|
||||
peer_save_period: Duration::from_secs(60),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,8 @@ pub struct AddressBookConfig {
|
|||
///
|
||||
/// Gray peers are peers we are yet to make a connection to.
|
||||
pub max_gray_list_length: usize,
|
||||
/// The location to store the address book.
|
||||
pub peer_store_file: PathBuf,
|
||||
/// The location to store the peer store files.
|
||||
pub peer_store_directory: PathBuf,
|
||||
/// The amount of time between saving the address book to disk.
|
||||
pub peer_save_period: Duration,
|
||||
}
|
||||
|
@ -63,11 +63,6 @@ pub enum AddressBookError {
|
|||
pub async fn init_address_book<Z: BorshNetworkZone>(
|
||||
cfg: AddressBookConfig,
|
||||
) -> Result<book::AddressBook<Z>, std::io::Error> {
|
||||
tracing::info!(
|
||||
"Loading peers from file: {} ",
|
||||
cfg.peer_store_file.display()
|
||||
);
|
||||
|
||||
let (white_list, gray_list) = match store::read_peers_from_disk::<Z>(&cfg).await {
|
||||
Ok(res) => res,
|
||||
Err(e) if e.kind() == ErrorKind::NotFound => (vec![], vec![]),
|
||||
|
|
|
@ -39,7 +39,9 @@ pub(crate) fn save_peers_to_disk<Z: BorshNetworkZone>(
|
|||
})
|
||||
.unwrap();
|
||||
|
||||
let file = cfg.peer_store_file.clone();
|
||||
let file = cfg
|
||||
.peer_store_directory
|
||||
.join(format!("{}_p2p_state", Z::NAME));
|
||||
spawn_blocking(move || fs::write(&file, &data))
|
||||
}
|
||||
|
||||
|
@ -52,7 +54,12 @@ pub(crate) async fn read_peers_from_disk<Z: BorshNetworkZone>(
|
|||
),
|
||||
std::io::Error,
|
||||
> {
|
||||
let file = cfg.peer_store_file.clone();
|
||||
let file = cfg
|
||||
.peer_store_directory
|
||||
.join(format!("{}_p2p_state", Z::NAME));
|
||||
|
||||
tracing::info!("Loading peers from file: {} ", file.display());
|
||||
|
||||
let data = spawn_blocking(move || fs::read(file)).await.unwrap()?;
|
||||
|
||||
let de_ser: DeserPeerDataV1<Z::Addr> = from_slice(&data)?;
|
||||
|
|
|
@ -157,7 +157,7 @@ pub struct BufferSinkSend<'a, T> {
|
|||
item: Option<T>,
|
||||
}
|
||||
|
||||
impl<'a, T> Future for BufferSinkSend<'a, T> {
|
||||
impl<T> Future for BufferSinkSend<'_, T> {
|
||||
type Output = Result<(), BufferError>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
|
@ -183,7 +183,7 @@ pub struct BufferSinkReady<'a, T> {
|
|||
size_needed: usize,
|
||||
}
|
||||
|
||||
impl<'a, T> Future for BufferSinkReady<'a, T> {
|
||||
impl<T> Future for BufferSinkReady<'_, T> {
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
|
|
|
@ -12,6 +12,7 @@ use crate::{
|
|||
OutboundPeer, State,
|
||||
};
|
||||
|
||||
#[expect(clippy::type_complexity)]
|
||||
pub(crate) fn mock_discover_svc<Req: Send + 'static>() -> (
|
||||
impl Stream<
|
||||
Item = Result<
|
||||
|
|
|
@ -121,7 +121,6 @@ pub trait NetZoneAddress:
|
|||
///
|
||||
/// - TODO: IP zone banning?
|
||||
/// - TODO: rename this to Host.
|
||||
|
||||
type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static;
|
||||
|
||||
/// Changes the port of this address to `port`.
|
||||
|
|
|
@ -116,6 +116,7 @@ pub enum ProtocolResponse {
|
|||
GetChain(ChainResponse),
|
||||
NewFluffyBlock(NewFluffyBlock),
|
||||
NewTransactions(NewTransactions),
|
||||
FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest),
|
||||
NA,
|
||||
}
|
||||
|
||||
|
@ -139,6 +140,9 @@ impl PeerResponse {
|
|||
ProtocolResponse::GetChain(_) => MessageID::GetChain,
|
||||
ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock,
|
||||
ProtocolResponse::NewTransactions(_) => MessageID::NewFluffyBlock,
|
||||
ProtocolResponse::FluffyMissingTransactionsRequest(_) => {
|
||||
MessageID::FluffyMissingTxs
|
||||
}
|
||||
|
||||
ProtocolResponse::NA => return None,
|
||||
},
|
||||
|
|
|
@ -71,6 +71,9 @@ impl TryFrom<ProtocolResponse> for ProtocolMessage {
|
|||
ProtocolResponse::NewFluffyBlock(val) => Self::NewFluffyBlock(val),
|
||||
ProtocolResponse::GetChain(val) => Self::ChainEntryResponse(val),
|
||||
ProtocolResponse::GetObjects(val) => Self::GetObjectsResponse(val),
|
||||
ProtocolResponse::FluffyMissingTransactionsRequest(val) => {
|
||||
Self::FluffyMissingTransactionsRequest(val)
|
||||
}
|
||||
ProtocolResponse::NA => return Err(MessageConversionError),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -62,15 +62,15 @@ pub struct BlockBatch {
|
|||
pub struct BlockDownloaderConfig {
|
||||
/// The size in bytes of the buffer between the block downloader and the place which
|
||||
/// is consuming the downloaded blocks.
|
||||
pub buffer_size: usize,
|
||||
pub buffer_bytes: usize,
|
||||
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
|
||||
pub in_progress_queue_size: usize,
|
||||
pub in_progress_queue_bytes: usize,
|
||||
/// The [`Duration`] between checking the client pool for free peers.
|
||||
pub check_client_pool_interval: Duration,
|
||||
/// The target size of a single batch of blocks (in bytes).
|
||||
pub target_batch_size: usize,
|
||||
pub target_batch_bytes: usize,
|
||||
/// The initial amount of blocks to request (in number of blocks)
|
||||
pub initial_batch_size: usize,
|
||||
pub initial_batch_len: usize,
|
||||
}
|
||||
|
||||
/// An error that occurred in the [`BlockDownloader`].
|
||||
|
@ -145,7 +145,7 @@ where
|
|||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
{
|
||||
let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size);
|
||||
let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_bytes);
|
||||
|
||||
let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config);
|
||||
|
||||
|
@ -242,7 +242,7 @@ where
|
|||
Self {
|
||||
peer_set,
|
||||
our_chain_svc,
|
||||
amount_of_blocks_to_request: config.initial_batch_size,
|
||||
amount_of_blocks_to_request: config.initial_batch_len,
|
||||
amount_of_blocks_to_request_updated_at: 0,
|
||||
amount_of_empty_chain_entries: 0,
|
||||
block_download_tasks: JoinSet::new(),
|
||||
|
@ -381,7 +381,7 @@ where
|
|||
}
|
||||
|
||||
// If our ready queue is too large send duplicate requests for the blocks we are waiting on.
|
||||
if self.block_queue.size() >= self.config.in_progress_queue_size {
|
||||
if self.block_queue.size() >= self.config.in_progress_queue_bytes {
|
||||
return self.request_inflight_batch_again(client);
|
||||
}
|
||||
|
||||
|
@ -565,7 +565,7 @@ where
|
|||
self.amount_of_blocks_to_request = calculate_next_block_batch_size(
|
||||
block_batch.size,
|
||||
block_batch.blocks.len(),
|
||||
self.config.target_batch_size,
|
||||
self.config.target_batch_bytes,
|
||||
);
|
||||
|
||||
tracing::debug!(
|
||||
|
|
|
@ -146,9 +146,9 @@ fn deserialize_batch(
|
|||
|
||||
// Check the height lines up as expected.
|
||||
// This must happen after the hash check.
|
||||
if !block
|
||||
if block
|
||||
.number()
|
||||
.is_some_and(|height| height == expected_height)
|
||||
.is_none_or(|height| height != expected_height)
|
||||
{
|
||||
tracing::warn!(
|
||||
"Invalid chain, expected height: {expected_height}, got height: {:?}",
|
||||
|
|
|
@ -66,11 +66,11 @@ proptest! {
|
|||
genesis: *blockchain.blocks.first().unwrap().0
|
||||
},
|
||||
BlockDownloaderConfig {
|
||||
buffer_size: 1_000,
|
||||
in_progress_queue_size: 10_000,
|
||||
buffer_bytes: 1_000,
|
||||
in_progress_queue_bytes: 10_000,
|
||||
check_client_pool_interval: Duration::from_secs(5),
|
||||
target_batch_size: 5_000,
|
||||
initial_batch_size: 1,
|
||||
target_batch_bytes: 5_000,
|
||||
initial_batch_len: 1,
|
||||
});
|
||||
|
||||
let blocks = stream.map(|blocks| blocks.blocks).concat().await;
|
||||
|
|
|
@ -57,6 +57,7 @@ impl Default for BroadcastConfig {
|
|||
/// - The [`BroadcastSvc`]
|
||||
/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers.
|
||||
/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers.
|
||||
#[expect(clippy::type_complexity)]
|
||||
pub(crate) fn init_broadcast_channels<N: NetworkZone>(
|
||||
config: BroadcastConfig,
|
||||
) -> (
|
||||
|
|
|
@ -52,7 +52,7 @@ pub(crate) const INITIAL_CHAIN_REQUESTS_TO_SEND: usize = 3;
|
|||
/// The enforced maximum amount of blocks to request in a batch.
|
||||
///
|
||||
/// Requesting more than this will cause the peer to disconnect and potentially lead to bans.
|
||||
pub(crate) const MAX_BLOCK_BATCH_LEN: usize = 100;
|
||||
pub const MAX_BLOCK_BATCH_LEN: usize = 100;
|
||||
|
||||
/// The timeout that the block downloader will use for requests.
|
||||
pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
|
@ -61,13 +61,13 @@ pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_sec
|
|||
/// be less than.
|
||||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size>
|
||||
pub(crate) const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000;
|
||||
pub const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000;
|
||||
|
||||
/// The maximum amount of block IDs allowed in a chain entry response.
|
||||
///
|
||||
/// ref: <https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/cryptonote_config.h#L97>
|
||||
// TODO: link to the protocol book when this section is added.
|
||||
pub(crate) const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000;
|
||||
pub const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000;
|
||||
|
||||
/// The amount of failures downloading a specific batch before we stop attempting to download it.
|
||||
pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5;
|
||||
|
|
|
@ -327,7 +327,7 @@ impl DecompressedPruningSeed {
|
|||
///
|
||||
/// This function will also error if `block_height` > `blockchain_height`
|
||||
///
|
||||
pub fn get_next_unpruned_block(
|
||||
pub const fn get_next_unpruned_block(
|
||||
&self,
|
||||
block_height: usize,
|
||||
blockchain_height: usize,
|
||||
|
|
|
@ -68,7 +68,7 @@ macro_rules! generate_endpoints_with_no_input {
|
|||
/// - [`generate_endpoints_with_input`]
|
||||
/// - [`generate_endpoints_with_no_input`]
|
||||
macro_rules! generate_endpoints_inner {
|
||||
($variant:ident, $handler:ident, $request:expr) => {
|
||||
($variant:ident, $handler:ident, $request:expr_2021) => {
|
||||
paste::paste! {
|
||||
{
|
||||
// Check if restricted.
|
||||
|
|
|
@ -71,7 +71,7 @@ macro_rules! generate_endpoints_with_no_input {
|
|||
/// - [`generate_endpoints_with_input`]
|
||||
/// - [`generate_endpoints_with_no_input`]
|
||||
macro_rules! generate_endpoints_inner {
|
||||
($variant:ident, $handler:ident, $request:expr) => {
|
||||
($variant:ident, $handler:ident, $request:expr_2021) => {
|
||||
paste::paste! {
|
||||
{
|
||||
// Check if restricted.
|
||||
|
|
|
@ -37,7 +37,7 @@ macro_rules! serde_doc_test {
|
|||
(
|
||||
// `const` string from `cuprate_test_utils::rpc::data`
|
||||
// v
|
||||
$cuprate_test_utils_rpc_const:ident => $expected:expr
|
||||
$cuprate_test_utils_rpc_const:ident => $expected:expr_2021
|
||||
// ^
|
||||
// Expected value as an expression
|
||||
) => {
|
||||
|
|
|
@ -77,7 +77,7 @@ macro_rules! define_request_and_response {
|
|||
$( #[$request_field_attr:meta] )* // Field attribute.
|
||||
$request_field:ident: $request_field_type:ty // field_name: field type
|
||||
$(as $request_field_type_as:ty)? // (optional) alternative type (de)serialization
|
||||
$(= $request_field_type_default:expr, $request_field_type_default_string:literal)?, // (optional) default value
|
||||
$(= $request_field_type_default:expr_2021, $request_field_type_default_string:literal)?, // (optional) default value
|
||||
)*
|
||||
},
|
||||
|
||||
|
@ -89,7 +89,7 @@ macro_rules! define_request_and_response {
|
|||
$( #[$response_field_attr:meta] )*
|
||||
$response_field:ident: $response_field_type:ty
|
||||
$(as $response_field_type_as:ty)?
|
||||
$(= $response_field_type_default:expr, $response_field_type_default_string:literal)?,
|
||||
$(= $response_field_type_default:expr_2021, $response_field_type_default_string:literal)?,
|
||||
)*
|
||||
}
|
||||
) => { paste::paste! {
|
||||
|
@ -229,7 +229,7 @@ macro_rules! define_request {
|
|||
// field_name: FieldType
|
||||
$field:ident: $field_type:ty
|
||||
$(as $field_as:ty)?
|
||||
$(= $field_default:expr, $field_default_string:literal)?,
|
||||
$(= $field_default:expr_2021, $field_default_string:literal)?,
|
||||
// The $field_default is an optional extra token that represents
|
||||
// a default value to pass to [`cuprate_epee_encoding::epee_object`],
|
||||
// see it for usage.
|
||||
|
@ -286,7 +286,7 @@ macro_rules! define_response {
|
|||
$( #[$field_attr:meta] )*
|
||||
$field:ident: $field_type:ty
|
||||
$(as $field_as:ty)?
|
||||
$(= $field_default:expr, $field_default_string:literal)?,
|
||||
$(= $field_default:expr_2021, $field_default_string:literal)?,
|
||||
)*
|
||||
}
|
||||
) => {
|
||||
|
@ -323,7 +323,7 @@ macro_rules! define_response {
|
|||
$( #[$field_attr:meta] )*
|
||||
$field:ident: $field_type:ty
|
||||
$(as $field_as:ty)?
|
||||
$(= $field_default:expr, $field_default_string:literal)?,
|
||||
$(= $field_default:expr_2021, $field_default_string:literal)?,
|
||||
)*
|
||||
}
|
||||
) => {
|
||||
|
|
|
@ -37,7 +37,7 @@ macro_rules! define_struct_and_impl_epee {
|
|||
$(
|
||||
$( #[$field_attr:meta] )* // Field attributes
|
||||
// Field name => the type => optional `epee_object` default value.
|
||||
$field_name:ident: $field_type:ty $(= $field_default:expr)?,
|
||||
$field_name:ident: $field_type:ty $(= $field_default:expr_2021)?,
|
||||
)*
|
||||
}
|
||||
) => {
|
||||
|
|
|
@ -65,7 +65,7 @@ macro_rules! serde_doc_test {
|
|||
(
|
||||
// `const` string from `cuprate_test_utils::rpc::data`
|
||||
// v
|
||||
$cuprate_test_utils_rpc_const:ident => $expected:expr
|
||||
$cuprate_test_utils_rpc_const:ident => $expected:expr_2021
|
||||
// ^
|
||||
// Expected value as an expression
|
||||
) => {
|
||||
|
|
|
@ -15,7 +15,7 @@ default = ["heed"]
|
|||
heed = ["cuprate-database/heed"]
|
||||
redb = ["cuprate-database/redb"]
|
||||
redb-memory = ["cuprate-database/redb-memory"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-database = { workspace = true }
|
||||
|
@ -34,6 +34,7 @@ serde = { workspace = true, optional = true }
|
|||
tower = { workspace = true }
|
||||
thread_local = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-constants = { workspace = true }
|
||||
|
|
|
@ -76,7 +76,7 @@ use cuprate_blockchain::{
|
|||
let tmp_dir = tempfile::tempdir()?;
|
||||
let db_dir = tmp_dir.path().to_owned();
|
||||
let config = ConfigBuilder::new()
|
||||
.db_directory(db_dir.into())
|
||||
.data_directory(db_dir.into())
|
||||
.build();
|
||||
|
||||
// Initialize the database environment.
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
//!
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! // Use a custom database directory.
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .data_directory(db_dir.into())
|
||||
//! // Use as many reader threads as possible (when using `service`).
|
||||
//! .reader_threads(ReaderThreads::OnePerThread)
|
||||
//! // Use the fastest sync mode.
|
||||
|
@ -41,13 +41,16 @@
|
|||
//! ```
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::{borrow::Cow, path::Path};
|
||||
use std::{borrow::Cow, path::PathBuf};
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm};
|
||||
use cuprate_helper::fs::CUPRATE_BLOCKCHAIN_DIR;
|
||||
use cuprate_helper::{
|
||||
fs::{blockchain_path, CUPRATE_DATA_DIR},
|
||||
network::Network,
|
||||
};
|
||||
|
||||
// re-exports
|
||||
pub use cuprate_database_service::ReaderThreads;
|
||||
|
@ -59,8 +62,9 @@ pub use cuprate_database_service::ReaderThreads;
|
|||
#[derive(Debug, Clone, PartialEq, PartialOrd)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct ConfigBuilder {
|
||||
/// [`Config::db_directory`].
|
||||
db_directory: Option<Cow<'static, Path>>,
|
||||
network: Network,
|
||||
|
||||
data_dir: Option<PathBuf>,
|
||||
|
||||
/// [`Config::cuprate_database_config`].
|
||||
db_config: cuprate_database::config::ConfigBuilder,
|
||||
|
@ -76,10 +80,12 @@ impl ConfigBuilder {
|
|||
/// after this function to use default values.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
db_directory: None,
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(
|
||||
&*CUPRATE_BLOCKCHAIN_DIR,
|
||||
)),
|
||||
network: Network::default(),
|
||||
data_dir: None,
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path(
|
||||
&CUPRATE_DATA_DIR,
|
||||
Network::Mainnet,
|
||||
))),
|
||||
reader_threads: None,
|
||||
}
|
||||
}
|
||||
|
@ -87,21 +93,21 @@ impl ConfigBuilder {
|
|||
/// Build into a [`Config`].
|
||||
///
|
||||
/// # Default values
|
||||
/// If [`ConfigBuilder::db_directory`] was not called,
|
||||
/// the default [`CUPRATE_BLOCKCHAIN_DIR`] will be used.
|
||||
/// If [`ConfigBuilder::data_directory`] was not called,
|
||||
/// [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`] will be used.
|
||||
///
|
||||
/// For all other values, [`Default::default`] is used.
|
||||
pub fn build(self) -> Config {
|
||||
// INVARIANT: all PATH safety checks are done
|
||||
// in `helper::fs`. No need to do them here.
|
||||
let db_directory = self
|
||||
.db_directory
|
||||
.unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR));
|
||||
let data_dir = self
|
||||
.data_dir
|
||||
.unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf());
|
||||
|
||||
let reader_threads = self.reader_threads.unwrap_or_default();
|
||||
let db_config = self
|
||||
.db_config
|
||||
.db_directory(db_directory)
|
||||
.db_directory(Cow::Owned(blockchain_path(&data_dir, self.network)))
|
||||
.reader_threads(reader_threads.as_threads())
|
||||
.build();
|
||||
|
||||
|
@ -111,10 +117,17 @@ impl ConfigBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
/// Set a custom database directory (and file) [`Path`].
|
||||
/// Change the network this blockchain database is for.
|
||||
#[must_use]
|
||||
pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self {
|
||||
self.db_directory = Some(db_directory);
|
||||
pub const fn network(mut self, network: Network) -> Self {
|
||||
self.network = network;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a custom database directory (and file) [`PathBuf`].
|
||||
#[must_use]
|
||||
pub fn data_directory(mut self, db_directory: PathBuf) -> Self {
|
||||
self.data_dir = Some(db_directory);
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -145,9 +158,7 @@ impl ConfigBuilder {
|
|||
/// Good default for testing, and resource-available machines.
|
||||
#[must_use]
|
||||
pub fn fast(mut self) -> Self {
|
||||
self.db_config =
|
||||
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR))
|
||||
.fast();
|
||||
self.db_config = self.db_config.fast();
|
||||
|
||||
self.reader_threads = Some(ReaderThreads::OnePerThread);
|
||||
self
|
||||
|
@ -159,9 +170,7 @@ impl ConfigBuilder {
|
|||
/// Good default for resource-limited machines, e.g. a cheap VPS.
|
||||
#[must_use]
|
||||
pub fn low_power(mut self) -> Self {
|
||||
self.db_config =
|
||||
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR))
|
||||
.low_power();
|
||||
self.db_config = self.db_config.low_power();
|
||||
|
||||
self.reader_threads = Some(ReaderThreads::One);
|
||||
self
|
||||
|
@ -170,10 +179,13 @@ impl ConfigBuilder {
|
|||
|
||||
impl Default for ConfigBuilder {
|
||||
fn default() -> Self {
|
||||
let db_directory = Cow::Borrowed(&**CUPRATE_BLOCKCHAIN_DIR);
|
||||
Self {
|
||||
db_directory: Some(db_directory.clone()),
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(db_directory),
|
||||
network: Network::default(),
|
||||
data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()),
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path(
|
||||
&CUPRATE_DATA_DIR,
|
||||
Network::default(),
|
||||
))),
|
||||
reader_threads: Some(ReaderThreads::default()),
|
||||
}
|
||||
}
|
||||
|
@ -201,7 +213,7 @@ impl Config {
|
|||
/// Create a new [`Config`] with sane default settings.
|
||||
///
|
||||
/// The [`cuprate_database::config::Config::db_directory`]
|
||||
/// will be set to [`CUPRATE_BLOCKCHAIN_DIR`].
|
||||
/// will be set to [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`].
|
||||
///
|
||||
/// All other values will be [`Default::default`].
|
||||
///
|
||||
|
@ -213,14 +225,14 @@ impl Config {
|
|||
/// resize::ResizeAlgorithm,
|
||||
/// DATABASE_DATA_FILENAME,
|
||||
/// };
|
||||
/// use cuprate_helper::fs::*;
|
||||
/// use cuprate_helper::{fs::*, network::Network};
|
||||
///
|
||||
/// use cuprate_blockchain::config::*;
|
||||
///
|
||||
/// let config = Config::new();
|
||||
///
|
||||
/// assert_eq!(config.db_config.db_directory(), &*CUPRATE_BLOCKCHAIN_DIR);
|
||||
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_BLOCKCHAIN_DIR));
|
||||
/// assert_eq!(config.db_config.db_directory().as_ref(), blockchain_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path());
|
||||
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR));
|
||||
/// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME));
|
||||
/// assert_eq!(config.db_config.sync_mode, SyncMode::default());
|
||||
/// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default());
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use bytemuck::TransparentWrapper;
|
||||
use monero_serai::block::{Block, BlockHeader};
|
||||
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec};
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, StorableVec};
|
||||
use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits};
|
||||
use cuprate_types::{AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, HardFork};
|
||||
|
||||
|
@ -21,7 +21,7 @@ use crate::{
|
|||
pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>(
|
||||
env_inner: &E,
|
||||
tx_rw: &mut E::Rw<'_>,
|
||||
) -> Result<(), RuntimeError> {
|
||||
) -> DbResult<()> {
|
||||
use crate::tables::{
|
||||
AltBlockBlobs, AltBlockHeights, AltBlocksInfo, AltChainInfos, AltTransactionBlobs,
|
||||
AltTransactionInfos,
|
||||
|
@ -47,10 +47,7 @@ pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>(
|
|||
/// - `alt_block.height` is == `0`
|
||||
/// - `alt_block.txs.len()` != `alt_block.block.transactions.len()`
|
||||
///
|
||||
pub fn add_alt_block(
|
||||
alt_block: &AltBlockInformation,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<(), RuntimeError> {
|
||||
pub fn add_alt_block(alt_block: &AltBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> {
|
||||
let alt_block_height = AltBlockHeight {
|
||||
chain_id: alt_block.chain_id.into(),
|
||||
height: alt_block.height,
|
||||
|
@ -100,7 +97,7 @@ pub fn add_alt_block(
|
|||
pub fn get_alt_block(
|
||||
alt_block_height: &AltBlockHeight,
|
||||
tables: &impl Tables,
|
||||
) -> Result<AltBlockInformation, RuntimeError> {
|
||||
) -> DbResult<AltBlockInformation> {
|
||||
let block_info = tables.alt_blocks_info().get(alt_block_height)?;
|
||||
|
||||
let block_blob = tables.alt_block_blobs().get(alt_block_height)?.0;
|
||||
|
@ -111,7 +108,7 @@ pub fn get_alt_block(
|
|||
.transactions
|
||||
.iter()
|
||||
.map(|tx_hash| get_alt_transaction(tx_hash, tables))
|
||||
.collect::<Result<_, RuntimeError>>()?;
|
||||
.collect::<DbResult<_>>()?;
|
||||
|
||||
Ok(AltBlockInformation {
|
||||
block,
|
||||
|
@ -141,7 +138,7 @@ pub fn get_alt_block_hash(
|
|||
block_height: &BlockHeight,
|
||||
alt_chain: ChainId,
|
||||
tables: &impl Tables,
|
||||
) -> Result<BlockHash, RuntimeError> {
|
||||
) -> DbResult<BlockHash> {
|
||||
let alt_chains = tables.alt_chain_infos();
|
||||
|
||||
// First find what [`ChainId`] this block would be stored under.
|
||||
|
@ -188,7 +185,7 @@ pub fn get_alt_block_hash(
|
|||
pub fn get_alt_block_extended_header_from_height(
|
||||
height: &AltBlockHeight,
|
||||
table: &impl Tables,
|
||||
) -> Result<ExtendedBlockHeader, RuntimeError> {
|
||||
) -> DbResult<ExtendedBlockHeader> {
|
||||
let block_info = table.alt_blocks_info().get(height)?;
|
||||
|
||||
let block_blob = table.alt_block_blobs().get(height)?.0;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::cmp::{max, min};
|
||||
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError};
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError};
|
||||
use cuprate_types::{Chain, ChainId};
|
||||
|
||||
use crate::{
|
||||
|
@ -21,7 +21,7 @@ pub fn update_alt_chain_info(
|
|||
alt_block_height: &AltBlockHeight,
|
||||
prev_hash: &BlockHash,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<(), RuntimeError> {
|
||||
) -> DbResult<()> {
|
||||
let parent_chain = match tables.alt_block_heights().get(prev_hash) {
|
||||
Ok(alt_parent_height) => Chain::Alt(alt_parent_height.chain_id.into()),
|
||||
Err(RuntimeError::KeyNotFound) => Chain::Main,
|
||||
|
@ -74,7 +74,7 @@ pub fn get_alt_chain_history_ranges(
|
|||
range: std::ops::Range<BlockHeight>,
|
||||
alt_chain: ChainId,
|
||||
alt_chain_infos: &impl DatabaseRo<AltChainInfos>,
|
||||
) -> Result<Vec<(Chain, std::ops::Range<BlockHeight>)>, RuntimeError> {
|
||||
) -> DbResult<Vec<(Chain, std::ops::Range<BlockHeight>)>> {
|
||||
let mut ranges = Vec::with_capacity(5);
|
||||
|
||||
let mut i = range.end;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use bytemuck::TransparentWrapper;
|
||||
use monero_serai::transaction::Transaction;
|
||||
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec};
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec};
|
||||
use cuprate_types::VerifiedTransactionInformation;
|
||||
|
||||
use crate::{
|
||||
|
@ -22,7 +22,7 @@ use crate::{
|
|||
pub fn add_alt_transaction_blob(
|
||||
tx: &VerifiedTransactionInformation,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<(), RuntimeError> {
|
||||
) -> DbResult<()> {
|
||||
tables.alt_transaction_infos_mut().put(
|
||||
&tx.tx_hash,
|
||||
&AltTransactionInfo {
|
||||
|
@ -51,7 +51,7 @@ pub fn add_alt_transaction_blob(
|
|||
pub fn get_alt_transaction(
|
||||
tx_hash: &TxHash,
|
||||
tables: &impl Tables,
|
||||
) -> Result<VerifiedTransactionInformation, RuntimeError> {
|
||||
) -> DbResult<VerifiedTransactionInformation> {
|
||||
let tx_info = tables.alt_transaction_infos().get(tx_hash)?;
|
||||
|
||||
let tx_blob = match tables.alt_transaction_blobs().get(tx_hash) {
|
||||
|
|
|
@ -2,21 +2,23 @@
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use bytemuck::TransparentWrapper;
|
||||
use bytes::Bytes;
|
||||
use monero_serai::{
|
||||
block::{Block, BlockHeader},
|
||||
transaction::Transaction,
|
||||
};
|
||||
|
||||
use cuprate_database::{
|
||||
RuntimeError, StorableVec, {DatabaseRo, DatabaseRw},
|
||||
DbResult, RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw},
|
||||
};
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_helper::{
|
||||
map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits},
|
||||
tx::tx_fee,
|
||||
};
|
||||
use cuprate_types::{
|
||||
AltBlockInformation, ChainId, ExtendedBlockHeader, HardFork, VerifiedBlockInformation,
|
||||
VerifiedTransactionInformation,
|
||||
AltBlockInformation, BlockCompleteEntry, ChainId, ExtendedBlockHeader, HardFork,
|
||||
TransactionBlobs, VerifiedBlockInformation, VerifiedTransactionInformation,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
|
@ -27,7 +29,7 @@ use crate::{
|
|||
output::get_rct_num_outputs,
|
||||
tx::{add_tx, remove_tx},
|
||||
},
|
||||
tables::{BlockHeights, BlockInfos, Tables, TablesMut},
|
||||
tables::{BlockHeights, BlockInfos, Tables, TablesIter, TablesMut},
|
||||
types::{BlockHash, BlockHeight, BlockInfo},
|
||||
};
|
||||
|
||||
|
@ -44,10 +46,7 @@ use crate::{
|
|||
/// - `block.height > u32::MAX` (not normally possible)
|
||||
/// - `block.height` is != [`chain_height`]
|
||||
// no inline, too big.
|
||||
pub fn add_block(
|
||||
block: &VerifiedBlockInformation,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<(), RuntimeError> {
|
||||
pub fn add_block(block: &VerifiedBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> {
|
||||
//------------------------------------------------------ Check preconditions first
|
||||
|
||||
// Cast height to `u32` for storage (handled at top of function).
|
||||
|
@ -153,7 +152,7 @@ pub fn add_block(
|
|||
pub fn pop_block(
|
||||
move_to_alt_chain: Option<ChainId>,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> {
|
||||
) -> DbResult<(BlockHeight, BlockHash, Block)> {
|
||||
//------------------------------------------------------ Block Info
|
||||
// Remove block data from tables.
|
||||
let (block_height, block_info) = tables.block_infos_mut().pop_last()?;
|
||||
|
@ -195,7 +194,7 @@ pub fn pop_block(
|
|||
tx,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<VerifiedTransactionInformation>, RuntimeError>>()?;
|
||||
.collect::<DbResult<Vec<VerifiedTransactionInformation>>>()?;
|
||||
|
||||
alt_block::add_alt_block(
|
||||
&AltBlockInformation {
|
||||
|
@ -225,6 +224,64 @@ pub fn pop_block(
|
|||
Ok((block_height, block_info.block_hash, block))
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- `get_block_blob_with_tx_indexes`
|
||||
/// Retrieve a block's raw bytes, the index of the miner transaction and the number of non miner-txs in the block.
|
||||
///
|
||||
#[doc = doc_error!()]
|
||||
pub fn get_block_blob_with_tx_indexes(
|
||||
block_height: &BlockHeight,
|
||||
tables: &impl Tables,
|
||||
) -> Result<(Vec<u8>, u64, usize), RuntimeError> {
|
||||
let miner_tx_idx = tables.block_infos().get(block_height)?.mining_tx_index;
|
||||
|
||||
let block_txs = tables.block_txs_hashes().get(block_height)?.0;
|
||||
let numb_txs = block_txs.len();
|
||||
|
||||
// Get the block header
|
||||
let mut block = tables.block_header_blobs().get(block_height)?.0;
|
||||
|
||||
// Add the miner tx to the blob.
|
||||
let mut miner_tx_blob = tables.tx_blobs().get(&miner_tx_idx)?.0;
|
||||
block.append(&mut miner_tx_blob);
|
||||
|
||||
// Add the blocks tx hashes.
|
||||
monero_serai::io::write_varint(&block_txs.len(), &mut block)
|
||||
.expect("The number of txs per block will not exceed u64::MAX");
|
||||
|
||||
let block_txs_bytes = bytemuck::must_cast_slice(&block_txs);
|
||||
block.extend_from_slice(block_txs_bytes);
|
||||
|
||||
Ok((block, miner_tx_idx, numb_txs))
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*`
|
||||
/// Retrieve a [`BlockCompleteEntry`] from the database.
|
||||
///
|
||||
#[doc = doc_error!()]
|
||||
pub fn get_block_complete_entry(
|
||||
block_hash: &BlockHash,
|
||||
tables: &impl TablesIter,
|
||||
) -> Result<BlockCompleteEntry, RuntimeError> {
|
||||
let block_height = tables.block_heights().get(block_hash)?;
|
||||
let (block_blob, miner_tx_idx, numb_non_miner_txs) =
|
||||
get_block_blob_with_tx_indexes(&block_height, tables)?;
|
||||
|
||||
let first_tx_idx = miner_tx_idx + 1;
|
||||
|
||||
let tx_blobs = tables
|
||||
.tx_blobs_iter()
|
||||
.get_range(first_tx_idx..(usize_to_u64(numb_non_miner_txs) + first_tx_idx))?
|
||||
.map(|tx_blob| Ok(Bytes::from(tx_blob?.0)))
|
||||
.collect::<Result<_, RuntimeError>>()?;
|
||||
|
||||
Ok(BlockCompleteEntry {
|
||||
block: Bytes::from(block_blob),
|
||||
txs: TransactionBlobs::Normal(tx_blobs),
|
||||
pruned: false,
|
||||
block_weight: 0,
|
||||
})
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*`
|
||||
/// Retrieve a [`ExtendedBlockHeader`] from the database.
|
||||
///
|
||||
|
@ -239,7 +296,7 @@ pub fn pop_block(
|
|||
pub fn get_block_extended_header(
|
||||
block_hash: &BlockHash,
|
||||
tables: &impl Tables,
|
||||
) -> Result<ExtendedBlockHeader, RuntimeError> {
|
||||
) -> DbResult<ExtendedBlockHeader> {
|
||||
get_block_extended_header_from_height(&tables.block_heights().get(block_hash)?, tables)
|
||||
}
|
||||
|
||||
|
@ -253,7 +310,7 @@ pub fn get_block_extended_header(
|
|||
pub fn get_block_extended_header_from_height(
|
||||
block_height: &BlockHeight,
|
||||
tables: &impl Tables,
|
||||
) -> Result<ExtendedBlockHeader, RuntimeError> {
|
||||
) -> DbResult<ExtendedBlockHeader> {
|
||||
let block_info = tables.block_infos().get(block_height)?;
|
||||
let block_header_blob = tables.block_header_blobs().get(block_height)?.0;
|
||||
let block_header = BlockHeader::read(&mut block_header_blob.as_slice())?;
|
||||
|
@ -279,7 +336,7 @@ pub fn get_block_extended_header_from_height(
|
|||
#[inline]
|
||||
pub fn get_block_extended_header_top(
|
||||
tables: &impl Tables,
|
||||
) -> Result<(ExtendedBlockHeader, BlockHeight), RuntimeError> {
|
||||
) -> DbResult<(ExtendedBlockHeader, BlockHeight)> {
|
||||
let height = chain_height(tables.block_heights())?.saturating_sub(1);
|
||||
let header = get_block_extended_header_from_height(&height, tables)?;
|
||||
Ok((header, height))
|
||||
|
@ -292,7 +349,7 @@ pub fn get_block_extended_header_top(
|
|||
pub fn get_block_info(
|
||||
block_height: &BlockHeight,
|
||||
table_block_infos: &impl DatabaseRo<BlockInfos>,
|
||||
) -> Result<BlockInfo, RuntimeError> {
|
||||
) -> DbResult<BlockInfo> {
|
||||
table_block_infos.get(block_height)
|
||||
}
|
||||
|
||||
|
@ -302,7 +359,7 @@ pub fn get_block_info(
|
|||
pub fn get_block_height(
|
||||
block_hash: &BlockHash,
|
||||
table_block_heights: &impl DatabaseRo<BlockHeights>,
|
||||
) -> Result<BlockHeight, RuntimeError> {
|
||||
) -> DbResult<BlockHeight> {
|
||||
table_block_heights.get(block_hash)
|
||||
}
|
||||
|
||||
|
@ -317,7 +374,7 @@ pub fn get_block_height(
|
|||
pub fn block_exists(
|
||||
block_hash: &BlockHash,
|
||||
table_block_heights: &impl DatabaseRo<BlockHeights>,
|
||||
) -> Result<bool, RuntimeError> {
|
||||
) -> DbResult<bool> {
|
||||
table_block_heights.contains(block_hash)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
//! Blockchain functions - chain height, generated coins, etc.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use cuprate_database::{DatabaseRo, RuntimeError};
|
||||
use cuprate_database::{DatabaseRo, DbResult, RuntimeError};
|
||||
|
||||
use crate::{
|
||||
ops::macros::doc_error,
|
||||
ops::{block::block_exists, macros::doc_error},
|
||||
tables::{BlockHeights, BlockInfos},
|
||||
types::BlockHeight,
|
||||
types::{BlockHash, BlockHeight},
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Free Functions
|
||||
|
@ -22,9 +22,7 @@ use crate::{
|
|||
/// So the height of a new block would be `chain_height()`.
|
||||
#[doc = doc_error!()]
|
||||
#[inline]
|
||||
pub fn chain_height(
|
||||
table_block_heights: &impl DatabaseRo<BlockHeights>,
|
||||
) -> Result<BlockHeight, RuntimeError> {
|
||||
pub fn chain_height(table_block_heights: &impl DatabaseRo<BlockHeights>) -> DbResult<BlockHeight> {
|
||||
#[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")]
|
||||
table_block_heights.len().map(|height| height as usize)
|
||||
}
|
||||
|
@ -45,7 +43,7 @@ pub fn chain_height(
|
|||
#[inline]
|
||||
pub fn top_block_height(
|
||||
table_block_heights: &impl DatabaseRo<BlockHeights>,
|
||||
) -> Result<BlockHeight, RuntimeError> {
|
||||
) -> DbResult<BlockHeight> {
|
||||
match table_block_heights.len()? {
|
||||
0 => Err(RuntimeError::KeyNotFound),
|
||||
#[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")]
|
||||
|
@ -70,7 +68,7 @@ pub fn top_block_height(
|
|||
pub fn cumulative_generated_coins(
|
||||
block_height: &BlockHeight,
|
||||
table_block_infos: &impl DatabaseRo<BlockInfos>,
|
||||
) -> Result<u64, RuntimeError> {
|
||||
) -> DbResult<u64> {
|
||||
match table_block_infos.get(block_height) {
|
||||
Ok(block_info) => Ok(block_info.cumulative_generated_coins),
|
||||
Err(RuntimeError::KeyNotFound) if block_height == &0 => Ok(0),
|
||||
|
@ -78,6 +76,44 @@ pub fn cumulative_generated_coins(
|
|||
}
|
||||
}
|
||||
|
||||
/// Find the split point between our chain and a list of [`BlockHash`]s from another chain.
|
||||
///
|
||||
/// This function accepts chains in chronological and reverse chronological order, however
|
||||
/// if the wrong order is specified the return value is meaningless.
|
||||
///
|
||||
/// For chronologically ordered chains this will return the index of the first unknown, for reverse
|
||||
/// chronologically ordered chains this will return the index of the first known.
|
||||
///
|
||||
/// If all blocks are known for chronologically ordered chains or unknown for reverse chronologically
|
||||
/// ordered chains then the length of the chain will be returned.
|
||||
#[doc = doc_error!()]
|
||||
#[inline]
|
||||
pub fn find_split_point(
|
||||
block_ids: &[BlockHash],
|
||||
chronological_order: bool,
|
||||
table_block_heights: &impl DatabaseRo<BlockHeights>,
|
||||
) -> Result<usize, RuntimeError> {
|
||||
let mut err = None;
|
||||
|
||||
// Do a binary search to find the first unknown/known block in the batch.
|
||||
let idx = block_ids.partition_point(|block_id| {
|
||||
match block_exists(block_id, table_block_heights) {
|
||||
Ok(exists) => exists == chronological_order,
|
||||
Err(e) => {
|
||||
err.get_or_insert(e);
|
||||
// if this happens the search is scrapped, just return `false` back.
|
||||
false
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(e) = err {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(idx)
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! Key image functions.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError};
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, DbResult};
|
||||
|
||||
use crate::{
|
||||
ops::macros::{doc_add_block_inner_invariant, doc_error},
|
||||
|
@ -17,7 +17,7 @@ use crate::{
|
|||
pub fn add_key_image(
|
||||
key_image: &KeyImage,
|
||||
table_key_images: &mut impl DatabaseRw<KeyImages>,
|
||||
) -> Result<(), RuntimeError> {
|
||||
) -> DbResult<()> {
|
||||
table_key_images.put(key_image, &())
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ pub fn add_key_image(
|
|||
pub fn remove_key_image(
|
||||
key_image: &KeyImage,
|
||||
table_key_images: &mut impl DatabaseRw<KeyImages>,
|
||||
) -> Result<(), RuntimeError> {
|
||||
) -> DbResult<()> {
|
||||
table_key_images.delete(key_image)
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ pub fn remove_key_image(
|
|||
pub fn key_image_exists(
|
||||
key_image: &KeyImage,
|
||||
table_key_images: &impl DatabaseRo<KeyImages>,
|
||||
) -> Result<bool, RuntimeError> {
|
||||
) -> DbResult<bool> {
|
||||
table_key_images.contains(key_image)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
macro_rules! doc_error {
|
||||
() => {
|
||||
r#"# Errors
|
||||
This function returns [`RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."#
|
||||
This function returns [`cuprate_database::RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."#
|
||||
};
|
||||
}
|
||||
pub(super) use doc_error;
|
||||
|
|
|
@ -71,7 +71,7 @@
|
|||
//! let tmp_dir = tempfile::tempdir()?;
|
||||
//! let db_dir = tmp_dir.path().to_owned();
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .build();
|
||||
//!
|
||||
//! // Initialize the database environment.
|
||||
|
|
|
@ -5,7 +5,7 @@ use curve25519_dalek::edwards::CompressedEdwardsY;
|
|||
use monero_serai::transaction::Timelock;
|
||||
|
||||
use cuprate_database::{
|
||||
RuntimeError, {DatabaseRo, DatabaseRw},
|
||||
DbResult, RuntimeError, {DatabaseRo, DatabaseRw},
|
||||
};
|
||||
use cuprate_helper::crypto::compute_zero_commitment;
|
||||
use cuprate_helper::map::u64_to_timelock;
|
||||
|
@ -30,7 +30,7 @@ pub fn add_output(
|
|||
amount: Amount,
|
||||
output: &Output,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<PreRctOutputId, RuntimeError> {
|
||||
) -> DbResult<PreRctOutputId> {
|
||||
// FIXME: this would be much better expressed with a
|
||||
// `btree_map::Entry`-like API, fix `trait DatabaseRw`.
|
||||
let num_outputs = match tables.num_outputs().get(&amount) {
|
||||
|
@ -61,7 +61,7 @@ pub fn add_output(
|
|||
pub fn remove_output(
|
||||
pre_rct_output_id: &PreRctOutputId,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<(), RuntimeError> {
|
||||
) -> DbResult<()> {
|
||||
// Decrement the amount index by 1, or delete the entry out-right.
|
||||
// FIXME: this would be much better expressed with a
|
||||
// `btree_map::Entry`-like API, fix `trait DatabaseRw`.
|
||||
|
@ -86,7 +86,7 @@ pub fn remove_output(
|
|||
pub fn get_output(
|
||||
pre_rct_output_id: &PreRctOutputId,
|
||||
table_outputs: &impl DatabaseRo<Outputs>,
|
||||
) -> Result<Output, RuntimeError> {
|
||||
) -> DbResult<Output> {
|
||||
table_outputs.get(pre_rct_output_id)
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ pub fn get_output(
|
|||
/// This returns the amount of pre-RCT outputs currently stored.
|
||||
#[doc = doc_error!()]
|
||||
#[inline]
|
||||
pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> Result<u64, RuntimeError> {
|
||||
pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> DbResult<u64> {
|
||||
table_outputs.len()
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> Result<u64,
|
|||
pub fn add_rct_output(
|
||||
rct_output: &RctOutput,
|
||||
table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
|
||||
) -> Result<AmountIndex, RuntimeError> {
|
||||
) -> DbResult<AmountIndex> {
|
||||
let amount_index = get_rct_num_outputs(table_rct_outputs)?;
|
||||
table_rct_outputs.put(&amount_index, rct_output)?;
|
||||
Ok(amount_index)
|
||||
|
@ -123,7 +123,7 @@ pub fn add_rct_output(
|
|||
pub fn remove_rct_output(
|
||||
amount_index: &AmountIndex,
|
||||
table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
|
||||
) -> Result<(), RuntimeError> {
|
||||
) -> DbResult<()> {
|
||||
table_rct_outputs.delete(amount_index)
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ pub fn remove_rct_output(
|
|||
pub fn get_rct_output(
|
||||
amount_index: &AmountIndex,
|
||||
table_rct_outputs: &impl DatabaseRo<RctOutputs>,
|
||||
) -> Result<RctOutput, RuntimeError> {
|
||||
) -> DbResult<RctOutput> {
|
||||
table_rct_outputs.get(amount_index)
|
||||
}
|
||||
|
||||
|
@ -142,9 +142,7 @@ pub fn get_rct_output(
|
|||
/// This returns the amount of RCT outputs currently stored.
|
||||
#[doc = doc_error!()]
|
||||
#[inline]
|
||||
pub fn get_rct_num_outputs(
|
||||
table_rct_outputs: &impl DatabaseRo<RctOutputs>,
|
||||
) -> Result<u64, RuntimeError> {
|
||||
pub fn get_rct_num_outputs(table_rct_outputs: &impl DatabaseRo<RctOutputs>) -> DbResult<u64> {
|
||||
table_rct_outputs.len()
|
||||
}
|
||||
|
||||
|
@ -155,7 +153,7 @@ pub fn output_to_output_on_chain(
|
|||
output: &Output,
|
||||
amount: Amount,
|
||||
table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
|
||||
) -> Result<OutputOnChain, RuntimeError> {
|
||||
) -> DbResult<OutputOnChain> {
|
||||
let commitment = compute_zero_commitment(amount);
|
||||
|
||||
let time_lock = if output
|
||||
|
@ -191,7 +189,7 @@ pub fn output_to_output_on_chain(
|
|||
pub fn rct_output_to_output_on_chain(
|
||||
rct_output: &RctOutput,
|
||||
table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
|
||||
) -> Result<OutputOnChain, RuntimeError> {
|
||||
) -> DbResult<OutputOnChain> {
|
||||
// INVARIANT: Commitments stored are valid when stored by the database.
|
||||
let commitment = CompressedEdwardsY::from_slice(&rct_output.commitment)
|
||||
.unwrap()
|
||||
|
@ -223,10 +221,7 @@ pub fn rct_output_to_output_on_chain(
|
|||
///
|
||||
/// Note that this still support RCT outputs, in that case, [`PreRctOutputId::amount`] should be `0`.
|
||||
#[doc = doc_error!()]
|
||||
pub fn id_to_output_on_chain(
|
||||
id: &PreRctOutputId,
|
||||
tables: &impl Tables,
|
||||
) -> Result<OutputOnChain, RuntimeError> {
|
||||
pub fn id_to_output_on_chain(id: &PreRctOutputId, tables: &impl Tables) -> DbResult<OutputOnChain> {
|
||||
// v2 transactions.
|
||||
if id.amount == 0 {
|
||||
let rct_output = get_rct_output(&id.amount_index, tables.rct_outputs())?;
|
||||
|
|
|
@ -3,10 +3,9 @@
|
|||
//! SOMEDAY: the database `properties` table is not yet implemented.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use cuprate_database::DbResult;
|
||||
use cuprate_pruning::PruningSeed;
|
||||
|
||||
use cuprate_database::RuntimeError;
|
||||
|
||||
use crate::ops::macros::doc_error;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Free Functions
|
||||
|
@ -20,7 +19,7 @@ use crate::ops::macros::doc_error;
|
|||
/// // SOMEDAY
|
||||
/// ```
|
||||
#[inline]
|
||||
pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError> {
|
||||
pub const fn get_blockchain_pruning_seed() -> DbResult<PruningSeed> {
|
||||
// SOMEDAY: impl pruning.
|
||||
// We need a DB properties table.
|
||||
Ok(PruningSeed::NotPruned)
|
||||
|
@ -36,7 +35,7 @@ pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError>
|
|||
/// // SOMEDAY
|
||||
/// ```
|
||||
#[inline]
|
||||
pub const fn db_version() -> Result<u64, RuntimeError> {
|
||||
pub const fn db_version() -> DbResult<u64> {
|
||||
// SOMEDAY: We need a DB properties table.
|
||||
Ok(crate::constants::DATABASE_VERSION)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
use bytemuck::TransparentWrapper;
|
||||
use monero_serai::transaction::{Input, Timelock, Transaction};
|
||||
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec};
|
||||
use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec};
|
||||
use cuprate_helper::crypto::compute_zero_commitment;
|
||||
|
||||
use crate::{
|
||||
|
@ -52,7 +52,7 @@ pub fn add_tx(
|
|||
tx_hash: &TxHash,
|
||||
block_height: &BlockHeight,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<TxId, RuntimeError> {
|
||||
) -> DbResult<TxId> {
|
||||
let tx_id = get_num_tx(tables.tx_ids_mut())?;
|
||||
|
||||
//------------------------------------------------------ Transaction data
|
||||
|
@ -129,7 +129,7 @@ pub fn add_tx(
|
|||
)?
|
||||
.amount_index)
|
||||
})
|
||||
.collect::<Result<Vec<_>, RuntimeError>>()?,
|
||||
.collect::<DbResult<Vec<_>>>()?,
|
||||
Transaction::V2 { prefix, proofs } => prefix
|
||||
.outputs
|
||||
.iter()
|
||||
|
@ -186,10 +186,7 @@ pub fn add_tx(
|
|||
///
|
||||
#[doc = doc_error!()]
|
||||
#[inline]
|
||||
pub fn remove_tx(
|
||||
tx_hash: &TxHash,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<(TxId, Transaction), RuntimeError> {
|
||||
pub fn remove_tx(tx_hash: &TxHash, tables: &mut impl TablesMut) -> DbResult<(TxId, Transaction)> {
|
||||
//------------------------------------------------------ Transaction data
|
||||
let tx_id = tables.tx_ids_mut().take(tx_hash)?;
|
||||
let tx_blob = tables.tx_blobs_mut().take(&tx_id)?;
|
||||
|
@ -267,7 +264,7 @@ pub fn get_tx(
|
|||
tx_hash: &TxHash,
|
||||
table_tx_ids: &impl DatabaseRo<TxIds>,
|
||||
table_tx_blobs: &impl DatabaseRo<TxBlobs>,
|
||||
) -> Result<Transaction, RuntimeError> {
|
||||
) -> DbResult<Transaction> {
|
||||
get_tx_from_id(&table_tx_ids.get(tx_hash)?, table_tx_blobs)
|
||||
}
|
||||
|
||||
|
@ -277,7 +274,7 @@ pub fn get_tx(
|
|||
pub fn get_tx_from_id(
|
||||
tx_id: &TxId,
|
||||
table_tx_blobs: &impl DatabaseRo<TxBlobs>,
|
||||
) -> Result<Transaction, RuntimeError> {
|
||||
) -> DbResult<Transaction> {
|
||||
let tx_blob = table_tx_blobs.get(tx_id)?.0;
|
||||
Ok(Transaction::read(&mut tx_blob.as_slice())?)
|
||||
}
|
||||
|
@ -294,7 +291,7 @@ pub fn get_tx_from_id(
|
|||
/// - etc
|
||||
#[doc = doc_error!()]
|
||||
#[inline]
|
||||
pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> Result<u64, RuntimeError> {
|
||||
pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> DbResult<u64> {
|
||||
table_tx_ids.len()
|
||||
}
|
||||
|
||||
|
@ -304,10 +301,7 @@ pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> Result<u64, RuntimeE
|
|||
/// Returns `true` if it does, else `false`.
|
||||
#[doc = doc_error!()]
|
||||
#[inline]
|
||||
pub fn tx_exists(
|
||||
tx_hash: &TxHash,
|
||||
table_tx_ids: &impl DatabaseRo<TxIds>,
|
||||
) -> Result<bool, RuntimeError> {
|
||||
pub fn tx_exists(tx_hash: &TxHash, table_tx_ids: &impl DatabaseRo<TxIds>) -> DbResult<bool> {
|
||||
table_tx_ids.contains(tx_hash)
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
//! let tmp_dir = tempfile::tempdir()?;
|
||||
//! let db_dir = tmp_dir.path().to_owned();
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .build();
|
||||
//!
|
||||
//! // Initialize the database thread-pool.
|
||||
|
|
|
@ -10,23 +10,26 @@
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::{
|
||||
cmp::min,
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use rayon::{
|
||||
iter::{IntoParallelIterator, ParallelIterator},
|
||||
iter::{Either, IntoParallelIterator, ParallelIterator},
|
||||
prelude::*,
|
||||
ThreadPool,
|
||||
};
|
||||
use thread_local::ThreadLocal;
|
||||
|
||||
use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError};
|
||||
use cuprate_database::{
|
||||
ConcreteEnv, DatabaseIter, DatabaseRo, DbResult, Env, EnvInner, RuntimeError,
|
||||
};
|
||||
use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads};
|
||||
use cuprate_helper::map::combine_low_high_bits_to_u128;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain,
|
||||
Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, TxsInBlock,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
|
@ -36,9 +39,10 @@ use crate::{
|
|||
get_alt_chain_history_ranges,
|
||||
},
|
||||
block::{
|
||||
block_exists, get_block_extended_header_from_height, get_block_height, get_block_info,
|
||||
block_exists, get_block_blob_with_tx_indexes, get_block_complete_entry,
|
||||
get_block_extended_header_from_height, get_block_height, get_block_info,
|
||||
},
|
||||
blockchain::{cumulative_generated_coins, top_block_height},
|
||||
blockchain::{cumulative_generated_coins, find_split_point, top_block_height},
|
||||
key_image::key_image_exists,
|
||||
output::id_to_output_on_chain,
|
||||
},
|
||||
|
@ -46,7 +50,7 @@ use crate::{
|
|||
free::{compact_history_genesis_not_included, compact_history_index_to_height_offset},
|
||||
types::{BlockchainReadHandle, ResponseResult},
|
||||
},
|
||||
tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables},
|
||||
tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables, TablesIter},
|
||||
types::{
|
||||
AltBlockHeight, Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId,
|
||||
},
|
||||
|
@ -100,6 +104,7 @@ fn map_request(
|
|||
/* SOMEDAY: pre-request handling, run some code for each request? */
|
||||
|
||||
match request {
|
||||
R::BlockCompleteEntries(block_hashes) => block_complete_entries(env, block_hashes),
|
||||
R::BlockExtendedHeader(block) => block_extended_header(env, block),
|
||||
R::BlockHash(block, chain) => block_hash(env, block, chain),
|
||||
R::FindBlock(block_hash) => find_block(env, block_hash),
|
||||
|
@ -113,7 +118,12 @@ fn map_request(
|
|||
R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec),
|
||||
R::KeyImagesSpent(set) => key_images_spent(env, set),
|
||||
R::CompactChainHistory => compact_chain_history(env),
|
||||
R::NextChainEntry(block_hashes, amount) => next_chain_entry(env, &block_hashes, amount),
|
||||
R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids),
|
||||
R::TxsInBlock {
|
||||
block_hash,
|
||||
tx_indexes,
|
||||
} => txs_in_block(env, block_hash, tx_indexes),
|
||||
R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id),
|
||||
R::Block { height } => block(env, height),
|
||||
R::BlockByHash(hash) => block_by_hash(env, hash),
|
||||
|
@ -198,6 +208,38 @@ macro_rules! get_tables {
|
|||
// TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal
|
||||
// amount of parallelism.
|
||||
|
||||
/// [`BlockchainReadRequest::BlockCompleteEntries`].
|
||||
fn block_complete_entries(env: &ConcreteEnv, block_hashes: Vec<BlockHash>) -> ResponseResult {
|
||||
// Prepare tx/tables in `ThreadLocal`.
|
||||
let env_inner = env.env_inner();
|
||||
let tx_ro = thread_local(env);
|
||||
let tables = thread_local(env);
|
||||
|
||||
let (missing_hashes, blocks) = block_hashes
|
||||
.into_par_iter()
|
||||
.map(|block_hash| {
|
||||
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
|
||||
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
|
||||
|
||||
match get_block_complete_entry(&block_hash, tables) {
|
||||
Err(RuntimeError::KeyNotFound) => Ok(Either::Left(block_hash)),
|
||||
res => res.map(Either::Right),
|
||||
}
|
||||
})
|
||||
.collect::<DbResult<_>>()?;
|
||||
|
||||
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
|
||||
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
|
||||
|
||||
let blockchain_height = crate::ops::blockchain::chain_height(tables.block_heights())?;
|
||||
|
||||
Ok(BlockchainResponse::BlockCompleteEntries {
|
||||
blocks,
|
||||
missing_hashes,
|
||||
blockchain_height,
|
||||
})
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::BlockExtendedHeader`].
|
||||
#[inline]
|
||||
fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
|
||||
|
@ -305,7 +347,7 @@ fn block_extended_header_in_range(
|
|||
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
|
||||
get_block_extended_header_from_height(&block_height, tables)
|
||||
})
|
||||
.collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?,
|
||||
.collect::<DbResult<Vec<ExtendedBlockHeader>>>()?,
|
||||
Chain::Alt(chain_id) => {
|
||||
let ranges = {
|
||||
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
|
||||
|
@ -335,7 +377,7 @@ fn block_extended_header_in_range(
|
|||
}
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.collect::<DbResult<Vec<_>>>()?
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -381,7 +423,7 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) ->
|
|||
|
||||
// The 2nd mapping function.
|
||||
// This is pulled out from the below `map()` for readability.
|
||||
let inner_map = |amount, amount_index| -> Result<(AmountIndex, OutputOnChain), RuntimeError> {
|
||||
let inner_map = |amount, amount_index| -> DbResult<(AmountIndex, OutputOnChain)> {
|
||||
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
|
||||
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
|
||||
|
||||
|
@ -404,10 +446,10 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) ->
|
|||
amount_index_set
|
||||
.into_par_iter()
|
||||
.map(|amount_index| inner_map(amount, amount_index))
|
||||
.collect::<Result<HashMap<AmountIndex, OutputOnChain>, RuntimeError>>()?,
|
||||
.collect::<DbResult<HashMap<AmountIndex, OutputOnChain>>>()?,
|
||||
))
|
||||
})
|
||||
.collect::<Result<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>, RuntimeError>>()?;
|
||||
.collect::<DbResult<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>>>()?;
|
||||
|
||||
Ok(BlockchainResponse::Outputs(map))
|
||||
}
|
||||
|
@ -456,7 +498,7 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> Respon
|
|||
}
|
||||
}
|
||||
})
|
||||
.collect::<Result<HashMap<Amount, usize>, RuntimeError>>()?;
|
||||
.collect::<DbResult<HashMap<Amount, usize>>>()?;
|
||||
|
||||
Ok(BlockchainResponse::NumberOutputsWithAmount(map))
|
||||
}
|
||||
|
@ -522,7 +564,7 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult {
|
|||
.map(compact_history_index_to_height_offset::<INITIAL_BLOCKS>)
|
||||
.map_while(|i| top_block_height.checked_sub(i))
|
||||
.map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash))
|
||||
.collect::<Result<Vec<_>, RuntimeError>>()?;
|
||||
.collect::<DbResult<Vec<_>>>()?;
|
||||
|
||||
if compact_history_genesis_not_included::<INITIAL_BLOCKS>(top_block_height) {
|
||||
block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash);
|
||||
|
@ -534,6 +576,76 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult {
|
|||
})
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::NextChainEntry`]
|
||||
///
|
||||
/// # Invariant
|
||||
/// `block_ids` must be sorted in reverse chronological block order, or else
|
||||
/// the returned result is unspecified and meaningless, as this function
|
||||
/// performs a binary search.
|
||||
fn next_chain_entry(
|
||||
env: &ConcreteEnv,
|
||||
block_ids: &[BlockHash],
|
||||
next_entry_size: usize,
|
||||
) -> ResponseResult {
|
||||
// Single-threaded, no `ThreadLocal` required.
|
||||
let env_inner = env.env_inner();
|
||||
let tx_ro = env_inner.tx_ro()?;
|
||||
|
||||
let tables = env_inner.open_tables(&tx_ro)?;
|
||||
let table_block_heights = tables.block_heights();
|
||||
let table_block_infos = tables.block_infos_iter();
|
||||
|
||||
let idx = find_split_point(block_ids, false, table_block_heights)?;
|
||||
|
||||
// This will happen if we have a different genesis block.
|
||||
if idx == block_ids.len() {
|
||||
return Ok(BlockchainResponse::NextChainEntry {
|
||||
start_height: None,
|
||||
chain_height: 0,
|
||||
block_ids: vec![],
|
||||
block_weights: vec![],
|
||||
cumulative_difficulty: 0,
|
||||
first_block_blob: None,
|
||||
});
|
||||
}
|
||||
|
||||
// The returned chain entry must overlap with one of the blocks we were told about.
|
||||
let first_known_block_hash = block_ids[idx];
|
||||
let first_known_height = table_block_heights.get(&first_known_block_hash)?;
|
||||
|
||||
let chain_height = crate::ops::blockchain::chain_height(table_block_heights)?;
|
||||
let last_height_in_chain_entry = min(first_known_height + next_entry_size, chain_height);
|
||||
|
||||
let (block_ids, block_weights) = table_block_infos
|
||||
.get_range(first_known_height..last_height_in_chain_entry)?
|
||||
.map(|block_info| {
|
||||
let block_info = block_info?;
|
||||
|
||||
Ok((block_info.block_hash, block_info.weight))
|
||||
})
|
||||
.collect::<DbResult<(Vec<_>, Vec<_>)>>()?;
|
||||
|
||||
let top_block_info = table_block_infos.get(&(chain_height - 1))?;
|
||||
|
||||
let first_block_blob = if block_ids.len() >= 2 {
|
||||
Some(get_block_blob_with_tx_indexes(&(first_known_height + 1), &tables)?.0)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(BlockchainResponse::NextChainEntry {
|
||||
start_height: std::num::NonZero::new(first_known_height),
|
||||
chain_height,
|
||||
block_ids,
|
||||
block_weights,
|
||||
cumulative_difficulty: combine_low_high_bits_to_u128(
|
||||
top_block_info.cumulative_difficulty_low,
|
||||
top_block_info.cumulative_difficulty_high,
|
||||
),
|
||||
first_block_blob,
|
||||
})
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::FindFirstUnknown`]
|
||||
///
|
||||
/// # Invariant
|
||||
|
@ -546,24 +658,7 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
|
|||
|
||||
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
|
||||
|
||||
let mut err = None;
|
||||
|
||||
// Do a binary search to find the first unknown block in the batch.
|
||||
let idx =
|
||||
block_ids.partition_point(
|
||||
|block_id| match block_exists(block_id, &table_block_heights) {
|
||||
Ok(exists) => exists,
|
||||
Err(e) => {
|
||||
err.get_or_insert(e);
|
||||
// if this happens the search is scrapped, just return `false` back.
|
||||
false
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
if let Some(e) = err {
|
||||
return Err(e);
|
||||
}
|
||||
let idx = find_split_point(block_ids, true, &table_block_heights)?;
|
||||
|
||||
Ok(if idx == block_ids.len() {
|
||||
BlockchainResponse::FindFirstUnknown(None)
|
||||
|
@ -576,6 +671,33 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
|
|||
})
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::TxsInBlock`]
|
||||
fn txs_in_block(env: &ConcreteEnv, block_hash: [u8; 32], missing_txs: Vec<u64>) -> ResponseResult {
|
||||
// Single-threaded, no `ThreadLocal` required.
|
||||
let env_inner = env.env_inner();
|
||||
let tx_ro = env_inner.tx_ro()?;
|
||||
let tables = env_inner.open_tables(&tx_ro)?;
|
||||
|
||||
let block_height = tables.block_heights().get(&block_hash)?;
|
||||
|
||||
let (block, miner_tx_index, numb_txs) = get_block_blob_with_tx_indexes(&block_height, &tables)?;
|
||||
let first_tx_index = miner_tx_index + 1;
|
||||
|
||||
if numb_txs < missing_txs.len() {
|
||||
return Ok(BlockchainResponse::TxsInBlock(None));
|
||||
}
|
||||
|
||||
let txs = missing_txs
|
||||
.into_iter()
|
||||
.map(|index_offset| Ok(tables.tx_blobs().get(&(first_tx_index + index_offset))?.0))
|
||||
.collect::<DbResult<_>>()?;
|
||||
|
||||
Ok(BlockchainResponse::TxsInBlock(Some(TxsInBlock {
|
||||
block,
|
||||
txs,
|
||||
})))
|
||||
}
|
||||
|
||||
/// [`BlockchainReadRequest::AltBlocksInChain`]
|
||||
fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult {
|
||||
// Prepare tx/tables in `ThreadLocal`.
|
||||
|
@ -613,7 +735,7 @@ fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult {
|
|||
)
|
||||
})
|
||||
})
|
||||
.collect::<Result<_, _>>()?;
|
||||
.collect::<DbResult<_>>()?;
|
||||
|
||||
Ok(BlockchainResponse::AltBlocksInChain(blocks))
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
@ -46,7 +45,7 @@ fn init_service() -> (
|
|||
) {
|
||||
let tempdir = tempfile::tempdir().unwrap();
|
||||
let config = ConfigBuilder::new()
|
||||
.db_directory(Cow::Owned(tempdir.path().into()))
|
||||
.data_directory(tempdir.path().into())
|
||||
.low_power()
|
||||
.build();
|
||||
let (reader, writer, env) = init(config).unwrap();
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! Database service type aliases.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use cuprate_database::RuntimeError;
|
||||
use cuprate_database::DbResult;
|
||||
use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle};
|
||||
use cuprate_types::blockchain::{
|
||||
BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest,
|
||||
|
@ -11,7 +11,7 @@ use cuprate_types::blockchain::{
|
|||
/// The actual type of the response.
|
||||
///
|
||||
/// Either our [`BlockchainResponse`], or a database error occurred.
|
||||
pub(super) type ResponseResult = Result<BlockchainResponse, RuntimeError>;
|
||||
pub(super) type ResponseResult = DbResult<BlockchainResponse>;
|
||||
|
||||
/// The blockchain database write service.
|
||||
pub type BlockchainWriteHandle = DatabaseWriteHandle<BlockchainWriteRequest, BlockchainResponse>;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::sync::Arc;
|
||||
|
||||
use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError, TxRw};
|
||||
use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, TxRw};
|
||||
use cuprate_database_service::DatabaseWriteHandle;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainResponse, BlockchainWriteRequest},
|
||||
|
@ -36,7 +36,7 @@ pub fn init_write_service(env: Arc<ConcreteEnv>) -> BlockchainWriteHandle {
|
|||
fn handle_blockchain_request(
|
||||
env: &ConcreteEnv,
|
||||
req: &BlockchainWriteRequest,
|
||||
) -> Result<BlockchainResponse, RuntimeError> {
|
||||
) -> DbResult<BlockchainResponse> {
|
||||
match req {
|
||||
BlockchainWriteRequest::WriteBlock(block) => write_block(env, block),
|
||||
BlockchainWriteRequest::WriteAltBlock(alt_block) => write_alt_block(env, alt_block),
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
//! - only used internally
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::{borrow::Cow, fmt::Debug};
|
||||
use std::fmt::Debug;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
|
@ -74,7 +74,7 @@ impl AssertTableLen {
|
|||
pub(crate) fn tmp_concrete_env() -> (impl Env, tempfile::TempDir) {
|
||||
let tempdir = tempfile::tempdir().unwrap();
|
||||
let config = ConfigBuilder::new()
|
||||
.db_directory(Cow::Owned(tempdir.path().into()))
|
||||
.data_directory(tempdir.path().into())
|
||||
.low_power()
|
||||
.build();
|
||||
let env = crate::open(config).unwrap();
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::cell::RefCell;
|
|||
use crate::{
|
||||
backend::heed::types::HeedDb,
|
||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||
error::RuntimeError,
|
||||
error::{DbResult, RuntimeError},
|
||||
table::Table,
|
||||
};
|
||||
|
||||
|
@ -54,16 +54,13 @@ fn get<T: Table>(
|
|||
db: &HeedDb<T::Key, T::Value>,
|
||||
tx_ro: &heed::RoTxn<'_>,
|
||||
key: &T::Key,
|
||||
) -> Result<T::Value, RuntimeError> {
|
||||
) -> DbResult<T::Value> {
|
||||
db.get(tx_ro, key)?.ok_or(RuntimeError::KeyNotFound)
|
||||
}
|
||||
|
||||
/// Shared [`DatabaseRo::len()`].
|
||||
#[inline]
|
||||
fn len<T: Table>(
|
||||
db: &HeedDb<T::Key, T::Value>,
|
||||
tx_ro: &heed::RoTxn<'_>,
|
||||
) -> Result<u64, RuntimeError> {
|
||||
fn len<T: Table>(db: &HeedDb<T::Key, T::Value>, tx_ro: &heed::RoTxn<'_>) -> DbResult<u64> {
|
||||
Ok(db.len(tx_ro)?)
|
||||
}
|
||||
|
||||
|
@ -72,7 +69,7 @@ fn len<T: Table>(
|
|||
fn first<T: Table>(
|
||||
db: &HeedDb<T::Key, T::Value>,
|
||||
tx_ro: &heed::RoTxn<'_>,
|
||||
) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
) -> DbResult<(T::Key, T::Value)> {
|
||||
db.first(tx_ro)?.ok_or(RuntimeError::KeyNotFound)
|
||||
}
|
||||
|
||||
|
@ -81,16 +78,13 @@ fn first<T: Table>(
|
|||
fn last<T: Table>(
|
||||
db: &HeedDb<T::Key, T::Value>,
|
||||
tx_ro: &heed::RoTxn<'_>,
|
||||
) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
) -> DbResult<(T::Key, T::Value)> {
|
||||
db.last(tx_ro)?.ok_or(RuntimeError::KeyNotFound)
|
||||
}
|
||||
|
||||
/// Shared [`DatabaseRo::is_empty()`].
|
||||
#[inline]
|
||||
fn is_empty<T: Table>(
|
||||
db: &HeedDb<T::Key, T::Value>,
|
||||
tx_ro: &heed::RoTxn<'_>,
|
||||
) -> Result<bool, RuntimeError> {
|
||||
fn is_empty<T: Table>(db: &HeedDb<T::Key, T::Value>, tx_ro: &heed::RoTxn<'_>) -> DbResult<bool> {
|
||||
Ok(db.is_empty(tx_ro)?)
|
||||
}
|
||||
|
||||
|
@ -101,7 +95,7 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
|
|||
fn get_range<'a, Range>(
|
||||
&'a self,
|
||||
range: Range,
|
||||
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError>
|
||||
) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
|
||||
where
|
||||
Range: RangeBounds<T::Key> + 'a,
|
||||
{
|
||||
|
@ -111,24 +105,17 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
|
|||
*/
|
||||
|
||||
#[inline]
|
||||
fn iter(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>
|
||||
{
|
||||
fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_> {
|
||||
Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?)))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn keys(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError> {
|
||||
fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_> {
|
||||
Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.0)))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn values(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError> {
|
||||
fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_> {
|
||||
Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.1)))
|
||||
}
|
||||
}
|
||||
|
@ -137,27 +124,27 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
|
|||
// SAFETY: `HeedTableRo: !Send` as it holds a reference to `heed::RoTxn: Send + !Sync`.
|
||||
unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
|
||||
#[inline]
|
||||
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
|
||||
fn get(&self, key: &T::Key) -> DbResult<T::Value> {
|
||||
get::<T>(&self.db, self.tx_ro, key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn len(&self) -> Result<u64, RuntimeError> {
|
||||
fn len(&self) -> DbResult<u64> {
|
||||
len::<T>(&self.db, self.tx_ro)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn first(&self) -> DbResult<(T::Key, T::Value)> {
|
||||
first::<T>(&self.db, self.tx_ro)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn last(&self) -> DbResult<(T::Key, T::Value)> {
|
||||
last::<T>(&self.db, self.tx_ro)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_empty(&self) -> Result<bool, RuntimeError> {
|
||||
fn is_empty(&self) -> DbResult<bool> {
|
||||
is_empty::<T>(&self.db, self.tx_ro)
|
||||
}
|
||||
}
|
||||
|
@ -167,45 +154,45 @@ unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
|
|||
// `HeedTableRw`'s write transaction is `!Send`.
|
||||
unsafe impl<T: Table> DatabaseRo<T> for HeedTableRw<'_, '_, T> {
|
||||
#[inline]
|
||||
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
|
||||
fn get(&self, key: &T::Key) -> DbResult<T::Value> {
|
||||
get::<T>(&self.db, &self.tx_rw.borrow(), key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn len(&self) -> Result<u64, RuntimeError> {
|
||||
fn len(&self) -> DbResult<u64> {
|
||||
len::<T>(&self.db, &self.tx_rw.borrow())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn first(&self) -> DbResult<(T::Key, T::Value)> {
|
||||
first::<T>(&self.db, &self.tx_rw.borrow())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn last(&self) -> DbResult<(T::Key, T::Value)> {
|
||||
last::<T>(&self.db, &self.tx_rw.borrow())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_empty(&self) -> Result<bool, RuntimeError> {
|
||||
fn is_empty(&self) -> DbResult<bool> {
|
||||
is_empty::<T>(&self.db, &self.tx_rw.borrow())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
|
||||
#[inline]
|
||||
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> {
|
||||
fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> {
|
||||
Ok(self.db.put(&mut self.tx_rw.borrow_mut(), key, value)?)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> {
|
||||
fn delete(&mut self, key: &T::Key) -> DbResult<()> {
|
||||
self.db.delete(&mut self.tx_rw.borrow_mut(), key)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> {
|
||||
fn take(&mut self, key: &T::Key) -> DbResult<T::Value> {
|
||||
// LMDB/heed does not return the value on deletion.
|
||||
// So, fetch it first - then delete.
|
||||
let value = get::<T>(&self.db, &self.tx_rw.borrow(), key)?;
|
||||
|
@ -219,7 +206,7 @@ impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> {
|
||||
let tx_rw = &mut self.tx_rw.borrow_mut();
|
||||
|
||||
// Get the value first...
|
||||
|
@ -238,7 +225,7 @@ impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> {
|
||||
let tx_rw = &mut self.tx_rw.borrow_mut();
|
||||
|
||||
// Get the value first...
|
||||
|
|
|
@ -18,7 +18,7 @@ use crate::{
|
|||
config::{Config, SyncMode},
|
||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||
env::{Env, EnvInner},
|
||||
error::{InitError, RuntimeError},
|
||||
error::{DbResult, InitError, RuntimeError},
|
||||
key::{Key, KeyCompare},
|
||||
resize::ResizeAlgorithm,
|
||||
table::Table,
|
||||
|
@ -203,7 +203,7 @@ impl Env for ConcreteEnv {
|
|||
&self.config
|
||||
}
|
||||
|
||||
fn sync(&self) -> Result<(), RuntimeError> {
|
||||
fn sync(&self) -> DbResult<()> {
|
||||
Ok(self.env.read().unwrap().force_sync()?)
|
||||
}
|
||||
|
||||
|
@ -253,12 +253,12 @@ where
|
|||
type Rw<'a> = RefCell<heed::RwTxn<'a>>;
|
||||
|
||||
#[inline]
|
||||
fn tx_ro(&self) -> Result<Self::Ro<'_>, RuntimeError> {
|
||||
fn tx_ro(&self) -> DbResult<Self::Ro<'_>> {
|
||||
Ok(self.read_txn()?)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn tx_rw(&self) -> Result<Self::Rw<'_>, RuntimeError> {
|
||||
fn tx_rw(&self) -> DbResult<Self::Rw<'_>> {
|
||||
Ok(RefCell::new(self.write_txn()?))
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ where
|
|||
fn open_db_ro<T: Table>(
|
||||
&self,
|
||||
tx_ro: &Self::Ro<'_>,
|
||||
) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError> {
|
||||
) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>> {
|
||||
// Open up a read-only database using our table's const metadata.
|
||||
//
|
||||
// INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`],
|
||||
|
@ -281,10 +281,7 @@ where
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn open_db_rw<T: Table>(
|
||||
&self,
|
||||
tx_rw: &Self::Rw<'_>,
|
||||
) -> Result<impl DatabaseRw<T>, RuntimeError> {
|
||||
fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>> {
|
||||
// Open up a read/write database using our table's const metadata.
|
||||
//
|
||||
// INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`],
|
||||
|
@ -296,7 +293,7 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError> {
|
||||
fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()> {
|
||||
// Create a database using our:
|
||||
// - [`Table`]'s const metadata.
|
||||
// - (potentially) our [`Key`] comparison function
|
||||
|
@ -328,7 +325,7 @@ where
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError> {
|
||||
fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()> {
|
||||
let tx_rw = tx_rw.get_mut();
|
||||
|
||||
// Open the table. We don't care about flags or key
|
||||
|
|
|
@ -4,31 +4,31 @@ use std::cell::RefCell;
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use crate::{
|
||||
error::RuntimeError,
|
||||
error::DbResult,
|
||||
transaction::{TxRo, TxRw},
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TxRo
|
||||
impl TxRo<'_> for heed::RoTxn<'_> {
|
||||
fn commit(self) -> Result<(), RuntimeError> {
|
||||
fn commit(self) -> DbResult<()> {
|
||||
Ok(heed::RoTxn::commit(self)?)
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TxRw
|
||||
impl TxRo<'_> for RefCell<heed::RwTxn<'_>> {
|
||||
fn commit(self) -> Result<(), RuntimeError> {
|
||||
fn commit(self) -> DbResult<()> {
|
||||
TxRw::commit(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl TxRw<'_> for RefCell<heed::RwTxn<'_>> {
|
||||
fn commit(self) -> Result<(), RuntimeError> {
|
||||
fn commit(self) -> DbResult<()> {
|
||||
Ok(heed::RwTxn::commit(self.into_inner())?)
|
||||
}
|
||||
|
||||
/// This function is infallible.
|
||||
fn abort(self) -> Result<(), RuntimeError> {
|
||||
fn abort(self) -> DbResult<()> {
|
||||
heed::RwTxn::abort(self.into_inner());
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ use crate::{
|
|||
types::{RedbTableRo, RedbTableRw},
|
||||
},
|
||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||
error::RuntimeError,
|
||||
error::{DbResult, RuntimeError},
|
||||
table::Table,
|
||||
};
|
||||
|
||||
|
@ -25,7 +25,7 @@ use crate::{
|
|||
fn get<T: Table + 'static>(
|
||||
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
|
||||
key: &T::Key,
|
||||
) -> Result<T::Value, RuntimeError> {
|
||||
) -> DbResult<T::Value> {
|
||||
Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value())
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ fn get<T: Table + 'static>(
|
|||
#[inline]
|
||||
fn len<T: Table>(
|
||||
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
|
||||
) -> Result<u64, RuntimeError> {
|
||||
) -> DbResult<u64> {
|
||||
Ok(db.len()?)
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ fn len<T: Table>(
|
|||
#[inline]
|
||||
fn first<T: Table>(
|
||||
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
|
||||
) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
) -> DbResult<(T::Key, T::Value)> {
|
||||
let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?;
|
||||
Ok((key.value(), value.value()))
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ fn first<T: Table>(
|
|||
#[inline]
|
||||
fn last<T: Table>(
|
||||
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
|
||||
) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
) -> DbResult<(T::Key, T::Value)> {
|
||||
let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?;
|
||||
Ok((key.value(), value.value()))
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ fn last<T: Table>(
|
|||
#[inline]
|
||||
fn is_empty<T: Table>(
|
||||
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
|
||||
) -> Result<bool, RuntimeError> {
|
||||
) -> DbResult<bool> {
|
||||
Ok(db.is_empty()?)
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
|
|||
fn get_range<'a, Range>(
|
||||
&'a self,
|
||||
range: Range,
|
||||
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError>
|
||||
) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
|
||||
where
|
||||
Range: RangeBounds<T::Key> + 'a,
|
||||
{
|
||||
|
@ -83,10 +83,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
|
|||
*/
|
||||
|
||||
#[inline]
|
||||
fn iter(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>
|
||||
{
|
||||
fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_> {
|
||||
Ok(ReadableTable::iter(self)?.map(|result| {
|
||||
let (key, value) = result?;
|
||||
Ok((key.value(), value.value()))
|
||||
|
@ -94,9 +91,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn keys(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError> {
|
||||
fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_> {
|
||||
Ok(ReadableTable::iter(self)?.map(|result| {
|
||||
let (key, _value) = result?;
|
||||
Ok(key.value())
|
||||
|
@ -104,9 +99,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn values(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError> {
|
||||
fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_> {
|
||||
Ok(ReadableTable::iter(self)?.map(|result| {
|
||||
let (_key, value) = result?;
|
||||
Ok(value.value())
|
||||
|
@ -118,27 +111,27 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
|
|||
// SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
|
||||
unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
|
||||
#[inline]
|
||||
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
|
||||
fn get(&self, key: &T::Key) -> DbResult<T::Value> {
|
||||
get::<T>(self, key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn len(&self) -> Result<u64, RuntimeError> {
|
||||
fn len(&self) -> DbResult<u64> {
|
||||
len::<T>(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn first(&self) -> DbResult<(T::Key, T::Value)> {
|
||||
first::<T>(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn last(&self) -> DbResult<(T::Key, T::Value)> {
|
||||
last::<T>(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_empty(&self) -> Result<bool, RuntimeError> {
|
||||
fn is_empty(&self) -> DbResult<bool> {
|
||||
is_empty::<T>(self)
|
||||
}
|
||||
}
|
||||
|
@ -147,27 +140,27 @@ unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value>
|
|||
// SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
|
||||
unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRw<'_, T::Key, T::Value> {
|
||||
#[inline]
|
||||
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
|
||||
fn get(&self, key: &T::Key) -> DbResult<T::Value> {
|
||||
get::<T>(self, key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn len(&self) -> Result<u64, RuntimeError> {
|
||||
fn len(&self) -> DbResult<u64> {
|
||||
len::<T>(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn first(&self) -> DbResult<(T::Key, T::Value)> {
|
||||
first::<T>(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn last(&self) -> DbResult<(T::Key, T::Value)> {
|
||||
last::<T>(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_empty(&self) -> Result<bool, RuntimeError> {
|
||||
fn is_empty(&self) -> DbResult<bool> {
|
||||
is_empty::<T>(self)
|
||||
}
|
||||
}
|
||||
|
@ -176,19 +169,19 @@ impl<T: Table + 'static> DatabaseRw<T> for RedbTableRw<'_, T::Key, T::Value> {
|
|||
// `redb` returns the value after function calls so we end with Ok(()) instead.
|
||||
|
||||
#[inline]
|
||||
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> {
|
||||
fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> {
|
||||
redb::Table::insert(self, key, value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> {
|
||||
fn delete(&mut self, key: &T::Key) -> DbResult<()> {
|
||||
redb::Table::remove(self, key)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> {
|
||||
fn take(&mut self, key: &T::Key) -> DbResult<T::Value> {
|
||||
if let Some(value) = redb::Table::remove(self, key)? {
|
||||
Ok(value.value())
|
||||
} else {
|
||||
|
@ -197,13 +190,13 @@ impl<T: Table + 'static> DatabaseRw<T> for RedbTableRw<'_, T::Key, T::Value> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> {
|
||||
let (key, value) = redb::Table::pop_first(self)?.ok_or(RuntimeError::KeyNotFound)?;
|
||||
Ok((key.value(), value.value()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
|
||||
fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> {
|
||||
let (key, value) = redb::Table::pop_last(self)?.ok_or(RuntimeError::KeyNotFound)?;
|
||||
Ok((key.value(), value.value()))
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ use crate::{
|
|||
config::{Config, SyncMode},
|
||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||
env::{Env, EnvInner},
|
||||
error::{InitError, RuntimeError},
|
||||
error::{DbResult, InitError, RuntimeError},
|
||||
table::Table,
|
||||
TxRw,
|
||||
};
|
||||
|
@ -105,7 +105,7 @@ impl Env for ConcreteEnv {
|
|||
&self.config
|
||||
}
|
||||
|
||||
fn sync(&self) -> Result<(), RuntimeError> {
|
||||
fn sync(&self) -> DbResult<()> {
|
||||
// `redb`'s syncs are tied with write transactions,
|
||||
// so just create one, don't do anything and commit.
|
||||
let mut tx_rw = self.env.begin_write()?;
|
||||
|
@ -127,12 +127,12 @@ where
|
|||
type Rw<'a> = redb::WriteTransaction;
|
||||
|
||||
#[inline]
|
||||
fn tx_ro(&self) -> Result<redb::ReadTransaction, RuntimeError> {
|
||||
fn tx_ro(&self) -> DbResult<redb::ReadTransaction> {
|
||||
Ok(self.0.begin_read()?)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn tx_rw(&self) -> Result<redb::WriteTransaction, RuntimeError> {
|
||||
fn tx_rw(&self) -> DbResult<redb::WriteTransaction> {
|
||||
// `redb` has sync modes on the TX level, unlike heed,
|
||||
// which sets it at the Environment level.
|
||||
//
|
||||
|
@ -146,7 +146,7 @@ where
|
|||
fn open_db_ro<T: Table>(
|
||||
&self,
|
||||
tx_ro: &Self::Ro<'_>,
|
||||
) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError> {
|
||||
) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>> {
|
||||
// Open up a read-only database using our `T: Table`'s const metadata.
|
||||
let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> =
|
||||
redb::TableDefinition::new(T::NAME);
|
||||
|
@ -155,10 +155,7 @@ where
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn open_db_rw<T: Table>(
|
||||
&self,
|
||||
tx_rw: &Self::Rw<'_>,
|
||||
) -> Result<impl DatabaseRw<T>, RuntimeError> {
|
||||
fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>> {
|
||||
// Open up a read/write database using our `T: Table`'s const metadata.
|
||||
let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> =
|
||||
redb::TableDefinition::new(T::NAME);
|
||||
|
@ -168,14 +165,14 @@ where
|
|||
Ok(tx_rw.open_table(table)?)
|
||||
}
|
||||
|
||||
fn create_db<T: Table>(&self, tx_rw: &redb::WriteTransaction) -> Result<(), RuntimeError> {
|
||||
fn create_db<T: Table>(&self, tx_rw: &redb::WriteTransaction) -> DbResult<()> {
|
||||
// INVARIANT: `redb` creates tables if they don't exist.
|
||||
self.open_db_rw::<T>(tx_rw)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn clear_db<T: Table>(&self, tx_rw: &mut redb::WriteTransaction) -> Result<(), RuntimeError> {
|
||||
fn clear_db<T: Table>(&self, tx_rw: &mut redb::WriteTransaction) -> DbResult<()> {
|
||||
let table: redb::TableDefinition<
|
||||
'static,
|
||||
StorableRedb<<T as Table>::Key>,
|
||||
|
|
|
@ -2,14 +2,14 @@
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use crate::{
|
||||
error::RuntimeError,
|
||||
error::DbResult,
|
||||
transaction::{TxRo, TxRw},
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TxRo
|
||||
impl TxRo<'_> for redb::ReadTransaction {
|
||||
/// This function is infallible.
|
||||
fn commit(self) -> Result<(), RuntimeError> {
|
||||
fn commit(self) -> DbResult<()> {
|
||||
// `redb`'s read transactions cleanup automatically when all references are dropped.
|
||||
//
|
||||
// There is `close()`:
|
||||
|
@ -22,11 +22,11 @@ impl TxRo<'_> for redb::ReadTransaction {
|
|||
|
||||
//---------------------------------------------------------------------------------------------------- TxRw
|
||||
impl TxRw<'_> for redb::WriteTransaction {
|
||||
fn commit(self) -> Result<(), RuntimeError> {
|
||||
fn commit(self) -> DbResult<()> {
|
||||
Ok(self.commit()?)
|
||||
}
|
||||
|
||||
fn abort(self) -> Result<(), RuntimeError> {
|
||||
fn abort(self) -> DbResult<()> {
|
||||
Ok(self.abort()?)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
//! based on these values.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
//! Abstracted database table operations; `trait DatabaseRo` & `trait DatabaseRw`.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use crate::{error::RuntimeError, table::Table};
|
||||
use crate::{
|
||||
error::{DbResult, RuntimeError},
|
||||
table::Table,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- DatabaseIter
|
||||
/// Generic post-fix documentation for `DatabaseIter` methods.
|
||||
|
@ -49,7 +52,7 @@ pub trait DatabaseIter<T: Table> {
|
|||
fn get_range<'a, Range>(
|
||||
&'a self,
|
||||
range: Range,
|
||||
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError>
|
||||
) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
|
||||
where
|
||||
Range: RangeBounds<T::Key> + 'a;
|
||||
|
||||
|
@ -58,20 +61,15 @@ pub trait DatabaseIter<T: Table> {
|
|||
/// Get an [`Iterator`] that returns the `(key, value)` types for this database.
|
||||
#[doc = doc_iter!()]
|
||||
#[expect(clippy::iter_not_returning_iterator)]
|
||||
fn iter(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>;
|
||||
fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_>;
|
||||
|
||||
/// Get an [`Iterator`] that returns _only_ the `key` type for this database.
|
||||
#[doc = doc_iter!()]
|
||||
fn keys(&self)
|
||||
-> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError>;
|
||||
fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_>;
|
||||
|
||||
/// Get an [`Iterator`] that returns _only_ the `value` type for this database.
|
||||
#[doc = doc_iter!()]
|
||||
fn values(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError>;
|
||||
fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_>;
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- DatabaseRo
|
||||
|
@ -79,7 +77,7 @@ pub trait DatabaseIter<T: Table> {
|
|||
macro_rules! doc_database {
|
||||
() => {
|
||||
r"# Errors
|
||||
This will return [`RuntimeError::KeyNotFound`] if:
|
||||
This will return [`crate::RuntimeError::KeyNotFound`] if:
|
||||
- Input does not exist OR
|
||||
- Database is empty"
|
||||
};
|
||||
|
@ -114,7 +112,7 @@ This will return [`RuntimeError::KeyNotFound`] if:
|
|||
pub unsafe trait DatabaseRo<T: Table> {
|
||||
/// Get the value corresponding to a key.
|
||||
#[doc = doc_database!()]
|
||||
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError>;
|
||||
fn get(&self, key: &T::Key) -> DbResult<T::Value>;
|
||||
|
||||
/// Returns `true` if the database contains a value for the specified key.
|
||||
///
|
||||
|
@ -123,7 +121,7 @@ pub unsafe trait DatabaseRo<T: Table> {
|
|||
/// as in that case, `Ok(false)` will be returned.
|
||||
///
|
||||
/// Other errors may still occur.
|
||||
fn contains(&self, key: &T::Key) -> Result<bool, RuntimeError> {
|
||||
fn contains(&self, key: &T::Key) -> DbResult<bool> {
|
||||
match self.get(key) {
|
||||
Ok(_) => Ok(true),
|
||||
Err(RuntimeError::KeyNotFound) => Ok(false),
|
||||
|
@ -135,21 +133,21 @@ pub unsafe trait DatabaseRo<T: Table> {
|
|||
///
|
||||
/// # Errors
|
||||
/// This will never return [`RuntimeError::KeyNotFound`].
|
||||
fn len(&self) -> Result<u64, RuntimeError>;
|
||||
fn len(&self) -> DbResult<u64>;
|
||||
|
||||
/// Returns the first `(key, value)` pair in the database.
|
||||
#[doc = doc_database!()]
|
||||
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>;
|
||||
fn first(&self) -> DbResult<(T::Key, T::Value)>;
|
||||
|
||||
/// Returns the last `(key, value)` pair in the database.
|
||||
#[doc = doc_database!()]
|
||||
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>;
|
||||
fn last(&self) -> DbResult<(T::Key, T::Value)>;
|
||||
|
||||
/// Returns `true` if the database contains no `(key, value)` pairs.
|
||||
///
|
||||
/// # Errors
|
||||
/// This can only return [`RuntimeError::Io`] on errors.
|
||||
fn is_empty(&self) -> Result<bool, RuntimeError>;
|
||||
fn is_empty(&self) -> DbResult<bool>;
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- DatabaseRw
|
||||
|
@ -164,7 +162,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
|||
#[doc = doc_database!()]
|
||||
///
|
||||
/// This will never [`RuntimeError::KeyExists`].
|
||||
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>;
|
||||
fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()>;
|
||||
|
||||
/// Delete a key-value pair in the database.
|
||||
///
|
||||
|
@ -173,7 +171,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
|||
#[doc = doc_database!()]
|
||||
///
|
||||
/// This will never [`RuntimeError::KeyExists`].
|
||||
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>;
|
||||
fn delete(&mut self, key: &T::Key) -> DbResult<()>;
|
||||
|
||||
/// Delete and return a key-value pair in the database.
|
||||
///
|
||||
|
@ -181,7 +179,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
|||
/// it will serialize the `T::Value` and return it.
|
||||
///
|
||||
#[doc = doc_database!()]
|
||||
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError>;
|
||||
fn take(&mut self, key: &T::Key) -> DbResult<T::Value>;
|
||||
|
||||
/// Fetch the value, and apply a function to it - or delete the entry.
|
||||
///
|
||||
|
@ -195,7 +193,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
|||
/// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d
|
||||
///
|
||||
#[doc = doc_database!()]
|
||||
fn update<F>(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError>
|
||||
fn update<F>(&mut self, key: &T::Key, mut f: F) -> DbResult<()>
|
||||
where
|
||||
F: FnMut(T::Value) -> Option<T::Value>,
|
||||
{
|
||||
|
@ -210,10 +208,10 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
|
|||
/// Removes and returns the first `(key, value)` pair in the database.
|
||||
///
|
||||
#[doc = doc_database!()]
|
||||
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
|
||||
fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)>;
|
||||
|
||||
/// Removes and returns the last `(key, value)` pair in the database.
|
||||
///
|
||||
#[doc = doc_database!()]
|
||||
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
|
||||
fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)>;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::num::NonZeroUsize;
|
|||
use crate::{
|
||||
config::Config,
|
||||
database::{DatabaseIter, DatabaseRo, DatabaseRw},
|
||||
error::{InitError, RuntimeError},
|
||||
error::{DbResult, InitError},
|
||||
resize::ResizeAlgorithm,
|
||||
table::Table,
|
||||
transaction::{TxRo, TxRw},
|
||||
|
@ -39,7 +39,7 @@ pub trait Env: Sized {
|
|||
///
|
||||
/// # Invariant
|
||||
/// If this is `false`, that means this [`Env`]
|
||||
/// must _never_ return a [`RuntimeError::ResizeNeeded`].
|
||||
/// must _never_ return a [`crate::RuntimeError::ResizeNeeded`].
|
||||
///
|
||||
/// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`]
|
||||
/// _must_ be re-implemented, as it just panics by default.
|
||||
|
@ -88,7 +88,7 @@ pub trait Env: Sized {
|
|||
/// This will error if the database file could not be opened.
|
||||
///
|
||||
/// This is the only [`Env`] function that will return
|
||||
/// an [`InitError`] instead of a [`RuntimeError`].
|
||||
/// an [`InitError`] instead of a [`crate::RuntimeError`].
|
||||
fn open(config: Config) -> Result<Self, InitError>;
|
||||
|
||||
/// Return the [`Config`] that this database was [`Env::open`]ed with.
|
||||
|
@ -107,7 +107,7 @@ pub trait Env: Sized {
|
|||
///
|
||||
/// # Errors
|
||||
/// If there is a synchronization error, this should return an error.
|
||||
fn sync(&self) -> Result<(), RuntimeError>;
|
||||
fn sync(&self) -> DbResult<()>;
|
||||
|
||||
/// Resize the database's memory map to a
|
||||
/// new (bigger) size using a [`ResizeAlgorithm`].
|
||||
|
@ -218,14 +218,14 @@ pub trait EnvInner<'env> {
|
|||
/// Create a read-only transaction.
|
||||
///
|
||||
/// # Errors
|
||||
/// This will only return [`RuntimeError::Io`] if it errors.
|
||||
fn tx_ro(&self) -> Result<Self::Ro<'_>, RuntimeError>;
|
||||
/// This will only return [`crate::RuntimeError::Io`] if it errors.
|
||||
fn tx_ro(&self) -> DbResult<Self::Ro<'_>>;
|
||||
|
||||
/// Create a read/write transaction.
|
||||
///
|
||||
/// # Errors
|
||||
/// This will only return [`RuntimeError::Io`] if it errors.
|
||||
fn tx_rw(&self) -> Result<Self::Rw<'_>, RuntimeError>;
|
||||
/// This will only return [`crate::RuntimeError::Io`] if it errors.
|
||||
fn tx_rw(&self) -> DbResult<Self::Rw<'_>>;
|
||||
|
||||
/// Open a database in read-only mode.
|
||||
///
|
||||
|
@ -269,17 +269,17 @@ pub trait EnvInner<'env> {
|
|||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This will only return [`RuntimeError::Io`] on normal errors.
|
||||
/// This will only return [`crate::RuntimeError::Io`] on normal errors.
|
||||
///
|
||||
/// If the specified table is not created upon before this function is called,
|
||||
/// this will return [`RuntimeError::TableNotFound`].
|
||||
/// this will return [`crate::RuntimeError::TableNotFound`].
|
||||
///
|
||||
/// # Invariant
|
||||
#[doc = doc_heed_create_db_invariant!()]
|
||||
fn open_db_ro<T: Table>(
|
||||
&self,
|
||||
tx_ro: &Self::Ro<'_>,
|
||||
) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError>;
|
||||
) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>>;
|
||||
|
||||
/// Open a database in read/write mode.
|
||||
///
|
||||
|
@ -293,25 +293,22 @@ pub trait EnvInner<'env> {
|
|||
/// passed as a generic to this function.
|
||||
///
|
||||
/// # Errors
|
||||
/// This will only return [`RuntimeError::Io`] on errors.
|
||||
/// This will only return [`crate::RuntimeError::Io`] on errors.
|
||||
///
|
||||
/// # Invariant
|
||||
#[doc = doc_heed_create_db_invariant!()]
|
||||
fn open_db_rw<T: Table>(
|
||||
&self,
|
||||
tx_rw: &Self::Rw<'_>,
|
||||
) -> Result<impl DatabaseRw<T>, RuntimeError>;
|
||||
fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>>;
|
||||
|
||||
/// Create a database table.
|
||||
///
|
||||
/// This will create the database [`Table`] passed as a generic to this function.
|
||||
///
|
||||
/// # Errors
|
||||
/// This will only return [`RuntimeError::Io`] on errors.
|
||||
/// This will only return [`crate::RuntimeError::Io`] on errors.
|
||||
///
|
||||
/// # Invariant
|
||||
#[doc = doc_heed_create_db_invariant!()]
|
||||
fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError>;
|
||||
fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()>;
|
||||
|
||||
/// Clear all `(key, value)`'s from a database table.
|
||||
///
|
||||
|
@ -322,9 +319,9 @@ pub trait EnvInner<'env> {
|
|||
/// function's effects can be aborted using [`TxRw::abort`].
|
||||
///
|
||||
/// # Errors
|
||||
/// This will return [`RuntimeError::Io`] on normal errors.
|
||||
/// This will return [`crate::RuntimeError::Io`] on normal errors.
|
||||
///
|
||||
/// If the specified table is not created upon before this function is called,
|
||||
/// this will return [`RuntimeError::TableNotFound`].
|
||||
fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError>;
|
||||
/// this will return [`crate::RuntimeError::TableNotFound`].
|
||||
fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()>;
|
||||
}
|
||||
|
|
|
@ -7,6 +7,9 @@ use std::fmt::Debug;
|
|||
/// Alias for a thread-safe boxed error.
|
||||
type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
/// [`Result`] with [`RuntimeError`] as the error.
|
||||
pub type DbResult<T> = Result<T, RuntimeError>;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- InitError
|
||||
/// Errors that occur during ([`Env::open`](crate::env::Env::open)).
|
||||
///
|
||||
|
|
|
@ -50,7 +50,7 @@ pub use constants::{
|
|||
};
|
||||
pub use database::{DatabaseIter, DatabaseRo, DatabaseRw};
|
||||
pub use env::{Env, EnvInner};
|
||||
pub use error::{InitError, RuntimeError};
|
||||
pub use error::{DbResult, InitError, RuntimeError};
|
||||
pub use key::{Key, KeyCompare};
|
||||
pub use storable::{Storable, StorableBytes, StorableStr, StorableVec};
|
||||
pub use table::Table;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
//! Database table abstraction; `trait Table`.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
|
||||
use crate::{key::Key, storable::Storable};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Table
|
||||
|
|
|
@ -211,7 +211,7 @@ macro_rules! define_tables {
|
|||
///
|
||||
/// # Errors
|
||||
/// This returns errors on regular database errors.
|
||||
fn all_tables_empty(&self) -> Result<bool, $crate::RuntimeError>;
|
||||
fn all_tables_empty(&self) -> $crate::DbResult<bool>;
|
||||
}
|
||||
|
||||
/// Object containing all opened [`Table`](cuprate_database::Table)s in read + iter mode.
|
||||
|
@ -293,7 +293,7 @@ macro_rules! define_tables {
|
|||
}
|
||||
)*
|
||||
|
||||
fn all_tables_empty(&self) -> Result<bool, $crate::RuntimeError> {
|
||||
fn all_tables_empty(&self) -> $crate::DbResult<bool> {
|
||||
$(
|
||||
if !$crate::DatabaseRo::is_empty(&self.$index)? {
|
||||
return Ok(false);
|
||||
|
@ -369,7 +369,7 @@ macro_rules! define_tables {
|
|||
///
|
||||
/// # Errors
|
||||
/// This will only return [`cuprate_database::RuntimeError::Io`] if it errors.
|
||||
fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result<impl TablesIter, $crate::RuntimeError>;
|
||||
fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult<impl TablesIter>;
|
||||
|
||||
/// Open all tables in read-write mode.
|
||||
///
|
||||
|
@ -378,7 +378,7 @@ macro_rules! define_tables {
|
|||
///
|
||||
/// # Errors
|
||||
/// This will only return [`cuprate_database::RuntimeError::Io`] on errors.
|
||||
fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result<impl TablesMut, $crate::RuntimeError>;
|
||||
fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<impl TablesMut>;
|
||||
|
||||
/// Create all database tables.
|
||||
///
|
||||
|
@ -386,7 +386,7 @@ macro_rules! define_tables {
|
|||
///
|
||||
/// # Errors
|
||||
/// This will only return [`cuprate_database::RuntimeError::Io`] on errors.
|
||||
fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError>;
|
||||
fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()>;
|
||||
}
|
||||
|
||||
impl<'env, Ei> OpenTables<'env> for Ei
|
||||
|
@ -396,19 +396,19 @@ macro_rules! define_tables {
|
|||
type Ro<'tx> = <Ei as $crate::EnvInner<'env>>::Ro<'tx>;
|
||||
type Rw<'tx> = <Ei as $crate::EnvInner<'env>>::Rw<'tx>;
|
||||
|
||||
fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result<impl TablesIter, $crate::RuntimeError> {
|
||||
fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult<impl TablesIter> {
|
||||
Ok(($(
|
||||
Self::open_db_ro::<[<$table:camel>]>(self, tx_ro)?,
|
||||
)*))
|
||||
}
|
||||
|
||||
fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result<impl TablesMut, $crate::RuntimeError> {
|
||||
fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<impl TablesMut> {
|
||||
Ok(($(
|
||||
Self::open_db_rw::<[<$table:camel>]>(self, tx_rw)?,
|
||||
)*))
|
||||
}
|
||||
|
||||
fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError> {
|
||||
fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()> {
|
||||
let result = Ok(($(
|
||||
Self::create_db::<[<$table:camel>]>(self, tx_rw),
|
||||
)*));
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! Database transaction abstraction; `trait TxRo`, `trait TxRw`.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use crate::error::RuntimeError;
|
||||
use crate::error::DbResult;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TxRo
|
||||
/// Read-only database transaction.
|
||||
|
@ -16,7 +16,7 @@ pub trait TxRo<'tx> {
|
|||
///
|
||||
/// # Errors
|
||||
/// This operation will always return `Ok(())` with the `redb` backend.
|
||||
fn commit(self) -> Result<(), RuntimeError>;
|
||||
fn commit(self) -> DbResult<()>;
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TxRw
|
||||
|
@ -32,12 +32,12 @@ pub trait TxRw<'tx> {
|
|||
/// This operation will always return `Ok(())` with the `redb` backend.
|
||||
///
|
||||
/// If `Env::MANUAL_RESIZE == true`,
|
||||
/// [`RuntimeError::ResizeNeeded`] may be returned.
|
||||
fn commit(self) -> Result<(), RuntimeError>;
|
||||
/// [`crate::RuntimeError::ResizeNeeded`] may be returned.
|
||||
fn commit(self) -> DbResult<()>;
|
||||
|
||||
/// Abort the transaction, erasing any writes that have occurred.
|
||||
///
|
||||
/// # Errors
|
||||
/// This operation will always return `Ok(())` with the `heed` backend.
|
||||
fn abort(self) -> Result<(), RuntimeError>;
|
||||
fn abort(self) -> DbResult<()>;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ use futures::channel::oneshot;
|
|||
use rayon::ThreadPool;
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_database::{ConcreteEnv, RuntimeError};
|
||||
use cuprate_database::{ConcreteEnv, DbResult, RuntimeError};
|
||||
use cuprate_helper::asynch::InfallibleOneshotReceiver;
|
||||
|
||||
/// The [`rayon::ThreadPool`] service.
|
||||
|
@ -24,7 +24,7 @@ pub struct DatabaseReadService<Req, Res> {
|
|||
pool: Arc<ThreadPool>,
|
||||
|
||||
/// The function used to handle request.
|
||||
inner_handler: Arc<dyn Fn(Req) -> Result<Res, RuntimeError> + Send + Sync + 'static>,
|
||||
inner_handler: Arc<dyn Fn(Req) -> DbResult<Res> + Send + Sync + 'static>,
|
||||
}
|
||||
|
||||
// Deriving [`Clone`] means `Req` & `Res` need to be `Clone`, even if they aren't.
|
||||
|
@ -51,7 +51,7 @@ where
|
|||
pub fn new(
|
||||
env: Arc<ConcreteEnv>,
|
||||
pool: Arc<ThreadPool>,
|
||||
req_handler: impl Fn(&ConcreteEnv, Req) -> Result<Res, RuntimeError> + Send + Sync + 'static,
|
||||
req_handler: impl Fn(&ConcreteEnv, Req) -> DbResult<Res> + Send + Sync + 'static,
|
||||
) -> Self {
|
||||
let inner_handler = Arc::new(move |req| req_handler(&env, req));
|
||||
|
||||
|
@ -69,9 +69,9 @@ where
|
|||
{
|
||||
type Response = Res;
|
||||
type Error = RuntimeError;
|
||||
type Future = InfallibleOneshotReceiver<Result<Self::Response, Self::Error>>;
|
||||
type Future = InfallibleOneshotReceiver<DbResult<Self::Response>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<DbResult<()>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::{
|
|||
|
||||
use futures::channel::oneshot;
|
||||
|
||||
use cuprate_database::{ConcreteEnv, Env, RuntimeError};
|
||||
use cuprate_database::{ConcreteEnv, DbResult, Env, RuntimeError};
|
||||
use cuprate_helper::asynch::InfallibleOneshotReceiver;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Constants
|
||||
|
@ -26,8 +26,7 @@ pub struct DatabaseWriteHandle<Req, Res> {
|
|||
/// Sender channel to the database write thread-pool.
|
||||
///
|
||||
/// We provide the response channel for the thread-pool.
|
||||
pub(super) sender:
|
||||
crossbeam::channel::Sender<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>,
|
||||
pub(super) sender: crossbeam::channel::Sender<(Req, oneshot::Sender<DbResult<Res>>)>,
|
||||
}
|
||||
|
||||
impl<Req, Res> Clone for DatabaseWriteHandle<Req, Res> {
|
||||
|
@ -48,7 +47,7 @@ where
|
|||
#[inline(never)] // Only called once.
|
||||
pub fn init(
|
||||
env: Arc<ConcreteEnv>,
|
||||
inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result<Res, RuntimeError> + Send + 'static,
|
||||
inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult<Res> + Send + 'static,
|
||||
) -> Self {
|
||||
// Initialize `Request/Response` channels.
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
|
@ -66,10 +65,10 @@ where
|
|||
impl<Req, Res> tower::Service<Req> for DatabaseWriteHandle<Req, Res> {
|
||||
type Response = Res;
|
||||
type Error = RuntimeError;
|
||||
type Future = InfallibleOneshotReceiver<Result<Res, RuntimeError>>;
|
||||
type Future = InfallibleOneshotReceiver<DbResult<Res>>;
|
||||
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<DbResult<()>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
|
@ -89,8 +88,8 @@ impl<Req, Res> tower::Service<Req> for DatabaseWriteHandle<Req, Res> {
|
|||
/// The main function of the writer thread.
|
||||
fn database_writer<Req, Res>(
|
||||
env: &ConcreteEnv,
|
||||
receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>,
|
||||
inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result<Res, RuntimeError>,
|
||||
receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender<DbResult<Res>>)>,
|
||||
inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult<Res>,
|
||||
) where
|
||||
Req: Send + 'static,
|
||||
Res: Debug + Send + 'static,
|
||||
|
|
|
@ -15,7 +15,7 @@ default = ["heed"]
|
|||
heed = ["cuprate-database/heed"]
|
||||
redb = ["cuprate-database/redb"]
|
||||
redb-memory = ["cuprate-database/redb-memory"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"]
|
||||
serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-database = { workspace = true, features = ["heed"] }
|
||||
|
|
|
@ -78,7 +78,7 @@ use cuprate_txpool::{
|
|||
let tmp_dir = tempfile::tempdir()?;
|
||||
let db_dir = tmp_dir.path().to_owned();
|
||||
let config = ConfigBuilder::new()
|
||||
.db_directory(db_dir.into())
|
||||
.data_directory(db_dir.into())
|
||||
.build();
|
||||
|
||||
// Initialize the database environment.
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
//! The transaction pool [`Config`].
|
||||
use std::{borrow::Cow, path::Path};
|
||||
use std::{borrow::Cow, path::PathBuf};
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_database::{
|
||||
config::{Config as DbConfig, SyncMode},
|
||||
resize::ResizeAlgorithm,
|
||||
};
|
||||
use cuprate_database_service::ReaderThreads;
|
||||
use cuprate_helper::fs::CUPRATE_TXPOOL_DIR;
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
use cuprate_helper::{
|
||||
fs::{txpool_path, CUPRATE_DATA_DIR},
|
||||
network::Network,
|
||||
};
|
||||
|
||||
/// The default transaction pool weight limit.
|
||||
const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024;
|
||||
|
@ -21,8 +24,9 @@ const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024;
|
|||
#[derive(Debug, Clone, PartialEq, PartialOrd)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct ConfigBuilder {
|
||||
/// [`Config::db_directory`].
|
||||
db_directory: Option<Cow<'static, Path>>,
|
||||
network: Network,
|
||||
|
||||
data_dir: Option<PathBuf>,
|
||||
|
||||
/// [`Config::cuprate_database_config`].
|
||||
db_config: cuprate_database::config::ConfigBuilder,
|
||||
|
@ -41,10 +45,12 @@ impl ConfigBuilder {
|
|||
/// after this function to use default values.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
db_directory: None,
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(
|
||||
&*CUPRATE_TXPOOL_DIR,
|
||||
)),
|
||||
network: Network::default(),
|
||||
data_dir: None,
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path(
|
||||
&CUPRATE_DATA_DIR,
|
||||
Network::Mainnet,
|
||||
))),
|
||||
reader_threads: None,
|
||||
max_txpool_weight: None,
|
||||
}
|
||||
|
@ -53,16 +59,16 @@ impl ConfigBuilder {
|
|||
/// Build into a [`Config`].
|
||||
///
|
||||
/// # Default values
|
||||
/// If [`ConfigBuilder::db_directory`] was not called,
|
||||
/// the default [`CUPRATE_TXPOOL_DIR`] will be used.
|
||||
/// If [`ConfigBuilder::data_directory`] was not called,
|
||||
/// [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`] will be used.
|
||||
///
|
||||
/// For all other values, [`Default::default`] is used.
|
||||
pub fn build(self) -> Config {
|
||||
// INVARIANT: all PATH safety checks are done
|
||||
// in `helper::fs`. No need to do them here.
|
||||
let db_directory = self
|
||||
.db_directory
|
||||
.unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_TXPOOL_DIR));
|
||||
let data_dir = self
|
||||
.data_dir
|
||||
.unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf());
|
||||
|
||||
let reader_threads = self.reader_threads.unwrap_or_default();
|
||||
|
||||
|
@ -72,7 +78,7 @@ impl ConfigBuilder {
|
|||
|
||||
let db_config = self
|
||||
.db_config
|
||||
.db_directory(db_directory)
|
||||
.db_directory(Cow::Owned(txpool_path(&data_dir, self.network)))
|
||||
.reader_threads(reader_threads.as_threads())
|
||||
.build();
|
||||
|
||||
|
@ -83,6 +89,13 @@ impl ConfigBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
/// Change the network this database is for.
|
||||
#[must_use]
|
||||
pub const fn network(mut self, network: Network) -> Self {
|
||||
self.network = network;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets a new maximum weight for the transaction pool.
|
||||
#[must_use]
|
||||
pub const fn max_txpool_weight(mut self, max_txpool_weight: usize) -> Self {
|
||||
|
@ -90,10 +103,10 @@ impl ConfigBuilder {
|
|||
self
|
||||
}
|
||||
|
||||
/// Set a custom database directory (and file) [`Path`].
|
||||
/// Set a custom data directory [`PathBuf`].
|
||||
#[must_use]
|
||||
pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self {
|
||||
self.db_directory = Some(db_directory);
|
||||
pub fn data_directory(mut self, db_directory: PathBuf) -> Self {
|
||||
self.data_dir = Some(db_directory);
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -124,9 +137,7 @@ impl ConfigBuilder {
|
|||
/// Good default for testing, and resource-available machines.
|
||||
#[must_use]
|
||||
pub fn fast(mut self) -> Self {
|
||||
self.db_config =
|
||||
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR))
|
||||
.fast();
|
||||
self.db_config = self.db_config.fast();
|
||||
|
||||
self.reader_threads = Some(ReaderThreads::OnePerThread);
|
||||
self
|
||||
|
@ -138,9 +149,7 @@ impl ConfigBuilder {
|
|||
/// Good default for resource-limited machines, e.g. a cheap VPS.
|
||||
#[must_use]
|
||||
pub fn low_power(mut self) -> Self {
|
||||
self.db_config =
|
||||
cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR))
|
||||
.low_power();
|
||||
self.db_config = self.db_config.low_power();
|
||||
|
||||
self.reader_threads = Some(ReaderThreads::One);
|
||||
self
|
||||
|
@ -149,10 +158,13 @@ impl ConfigBuilder {
|
|||
|
||||
impl Default for ConfigBuilder {
|
||||
fn default() -> Self {
|
||||
let db_directory = Cow::Borrowed(CUPRATE_TXPOOL_DIR.as_path());
|
||||
Self {
|
||||
db_directory: Some(db_directory.clone()),
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(db_directory),
|
||||
network: Network::default(),
|
||||
data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()),
|
||||
db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path(
|
||||
&CUPRATE_DATA_DIR,
|
||||
Network::Mainnet,
|
||||
))),
|
||||
reader_threads: Some(ReaderThreads::default()),
|
||||
max_txpool_weight: Some(DEFAULT_TXPOOL_WEIGHT_LIMIT),
|
||||
}
|
||||
|
@ -184,7 +196,7 @@ impl Config {
|
|||
/// Create a new [`Config`] with sane default settings.
|
||||
///
|
||||
/// The [`DbConfig::db_directory`]
|
||||
/// will be set to [`CUPRATE_TXPOOL_DIR`].
|
||||
/// will be set to [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`].
|
||||
///
|
||||
/// All other values will be [`Default::default`].
|
||||
///
|
||||
|
@ -197,25 +209,21 @@ impl Config {
|
|||
/// DATABASE_DATA_FILENAME,
|
||||
/// };
|
||||
/// use cuprate_database_service::ReaderThreads;
|
||||
/// use cuprate_helper::fs::*;
|
||||
/// use cuprate_helper::{fs::*, network::Network};
|
||||
///
|
||||
/// use cuprate_txpool::Config;
|
||||
///
|
||||
/// let config = Config::new();
|
||||
///
|
||||
/// assert_eq!(config.db_config.db_directory(), &*CUPRATE_TXPOOL_DIR);
|
||||
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_TXPOOL_DIR));
|
||||
/// assert_eq!(config.db_config.db_directory(), txpool_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path());
|
||||
/// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR));
|
||||
/// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME));
|
||||
/// assert_eq!(config.db_config.sync_mode, SyncMode::default());
|
||||
/// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default());
|
||||
/// assert_eq!(config.reader_threads, ReaderThreads::default());
|
||||
/// ```
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)),
|
||||
reader_threads: ReaderThreads::default(),
|
||||
max_txpool_weight: 0,
|
||||
}
|
||||
ConfigBuilder::new().build()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
//! let tmp_dir = tempfile::tempdir()?;
|
||||
//! let db_dir = tmp_dir.path().to_owned();
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .build();
|
||||
//!
|
||||
//! // Initialize the database environment.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! Tx-pool key image ops.
|
||||
use monero_serai::transaction::Input;
|
||||
|
||||
use cuprate_database::{DatabaseRw, RuntimeError};
|
||||
use cuprate_database::{DatabaseRw, DbResult};
|
||||
|
||||
use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHash};
|
||||
|
||||
|
@ -34,7 +34,7 @@ pub(super) fn add_tx_key_images(
|
|||
pub(super) fn remove_tx_key_images(
|
||||
inputs: &[Input],
|
||||
kis_table: &mut impl DatabaseRw<SpentKeyImages>,
|
||||
) -> Result<(), RuntimeError> {
|
||||
) -> DbResult<()> {
|
||||
for ki in inputs.iter().map(ki_from_input) {
|
||||
kis_table.delete(&ki)?;
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ use std::sync::Mutex;
|
|||
|
||||
use monero_serai::transaction::Transaction;
|
||||
|
||||
use cuprate_database::{DatabaseRo, RuntimeError};
|
||||
use cuprate_database::{DatabaseRo, DbResult};
|
||||
use cuprate_types::{TransactionVerificationData, TxVersion};
|
||||
|
||||
use crate::{
|
||||
|
@ -17,7 +17,7 @@ use crate::{
|
|||
pub fn get_transaction_verification_data(
|
||||
tx_hash: &TransactionHash,
|
||||
tables: &impl Tables,
|
||||
) -> Result<TransactionVerificationData, RuntimeError> {
|
||||
) -> DbResult<TransactionVerificationData> {
|
||||
let tx_blob = tables.transaction_blobs().get(tx_hash)?.0;
|
||||
|
||||
let tx_info = tables.transaction_infos().get(tx_hash)?;
|
||||
|
@ -45,7 +45,7 @@ pub fn get_transaction_verification_data(
|
|||
pub fn in_stem_pool(
|
||||
tx_hash: &TransactionHash,
|
||||
tx_infos: &impl DatabaseRo<TransactionInfos>,
|
||||
) -> Result<bool, RuntimeError> {
|
||||
) -> DbResult<bool> {
|
||||
Ok(tx_infos
|
||||
.get(tx_hash)?
|
||||
.flags
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
use bytemuck::TransparentWrapper;
|
||||
use monero_serai::transaction::{NotPruned, Transaction};
|
||||
|
||||
use cuprate_database::{DatabaseRw, RuntimeError, StorableVec};
|
||||
use cuprate_database::{DatabaseRw, DbResult, StorableVec};
|
||||
use cuprate_types::TransactionVerificationData;
|
||||
|
||||
use crate::{
|
||||
|
@ -67,10 +67,7 @@ pub fn add_transaction(
|
|||
}
|
||||
|
||||
/// Removes a transaction from the transaction pool.
|
||||
pub fn remove_transaction(
|
||||
tx_hash: &TransactionHash,
|
||||
tables: &mut impl TablesMut,
|
||||
) -> Result<(), RuntimeError> {
|
||||
pub fn remove_transaction(tx_hash: &TransactionHash, tables: &mut impl TablesMut) -> DbResult<()> {
|
||||
// Remove the tx blob from table 0.
|
||||
let tx_blob = tables.transaction_blobs_mut().take(tx_hash)?.0;
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@
|
|||
//! let tmp_dir = tempfile::tempdir()?;
|
||||
//! let db_dir = tmp_dir.path().to_owned();
|
||||
//! let config = ConfigBuilder::new()
|
||||
//! .db_directory(db_dir.into())
|
||||
//! .data_directory(db_dir.into())
|
||||
//! .build();
|
||||
//!
|
||||
//! // Initialize the database thread-pool.
|
||||
|
|
|
@ -11,7 +11,7 @@ use std::{
|
|||
|
||||
use rayon::ThreadPool;
|
||||
|
||||
use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError};
|
||||
use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError};
|
||||
use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads};
|
||||
|
||||
use crate::{
|
||||
|
@ -137,7 +137,7 @@ fn filter_known_tx_blob_hashes(
|
|||
|
||||
// A closure that returns `true` if a tx with a certain blob hash is unknown.
|
||||
// This also fills in `stem_tx_hashes`.
|
||||
let mut tx_unknown = |blob_hash| -> Result<bool, RuntimeError> {
|
||||
let mut tx_unknown = |blob_hash| -> DbResult<bool> {
|
||||
match tx_blob_hashes.get(&blob_hash) {
|
||||
Ok(tx_hash) => {
|
||||
if in_stem_pool(&tx_hash, &tx_infos)? {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
//!
|
||||
//! Only used internally for our [`tower::Service`] impls.
|
||||
|
||||
use cuprate_database::RuntimeError;
|
||||
use cuprate_database::DbResult;
|
||||
use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle};
|
||||
|
||||
use crate::service::interface::{
|
||||
|
@ -12,7 +12,7 @@ use crate::service::interface::{
|
|||
/// The actual type of the response.
|
||||
///
|
||||
/// Either our [`TxpoolReadResponse`], or a database error occurred.
|
||||
pub(super) type ReadResponseResult = Result<TxpoolReadResponse, RuntimeError>;
|
||||
pub(super) type ReadResponseResult = DbResult<TxpoolReadResponse>;
|
||||
|
||||
/// The transaction pool database write service.
|
||||
pub type TxpoolWriteHandle = DatabaseWriteHandle<TxpoolWriteRequest, TxpoolWriteResponse>;
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use cuprate_database::{ConcreteEnv, DatabaseRo, DatabaseRw, Env, EnvInner, RuntimeError, TxRw};
|
||||
use cuprate_database::{
|
||||
ConcreteEnv, DatabaseRo, DatabaseRw, DbResult, Env, EnvInner, RuntimeError, TxRw,
|
||||
};
|
||||
use cuprate_database_service::DatabaseWriteHandle;
|
||||
use cuprate_types::TransactionVerificationData;
|
||||
|
||||
|
@ -25,7 +27,7 @@ pub(super) fn init_write_service(env: Arc<ConcreteEnv>) -> TxpoolWriteHandle {
|
|||
fn handle_txpool_request(
|
||||
env: &ConcreteEnv,
|
||||
req: &TxpoolWriteRequest,
|
||||
) -> Result<TxpoolWriteResponse, RuntimeError> {
|
||||
) -> DbResult<TxpoolWriteResponse> {
|
||||
match req {
|
||||
TxpoolWriteRequest::AddTransaction { tx, state_stem } => {
|
||||
add_transaction(env, tx, *state_stem)
|
||||
|
@ -50,7 +52,7 @@ fn add_transaction(
|
|||
env: &ConcreteEnv,
|
||||
tx: &TransactionVerificationData,
|
||||
state_stem: bool,
|
||||
) -> Result<TxpoolWriteResponse, RuntimeError> {
|
||||
) -> DbResult<TxpoolWriteResponse> {
|
||||
let env_inner = env.env_inner();
|
||||
let tx_rw = env_inner.tx_rw()?;
|
||||
|
||||
|
@ -83,7 +85,7 @@ fn add_transaction(
|
|||
fn remove_transaction(
|
||||
env: &ConcreteEnv,
|
||||
tx_hash: &TransactionHash,
|
||||
) -> Result<TxpoolWriteResponse, RuntimeError> {
|
||||
) -> DbResult<TxpoolWriteResponse> {
|
||||
let env_inner = env.env_inner();
|
||||
let tx_rw = env_inner.tx_rw()?;
|
||||
|
||||
|
@ -105,10 +107,7 @@ fn remove_transaction(
|
|||
}
|
||||
|
||||
/// [`TxpoolWriteRequest::Promote`]
|
||||
fn promote(
|
||||
env: &ConcreteEnv,
|
||||
tx_hash: &TransactionHash,
|
||||
) -> Result<TxpoolWriteResponse, RuntimeError> {
|
||||
fn promote(env: &ConcreteEnv, tx_hash: &TransactionHash) -> DbResult<TxpoolWriteResponse> {
|
||||
let env_inner = env.env_inner();
|
||||
let tx_rw = env_inner.tx_rw()?;
|
||||
|
||||
|
@ -134,10 +133,7 @@ fn promote(
|
|||
}
|
||||
|
||||
/// [`TxpoolWriteRequest::NewBlock`]
|
||||
fn new_block(
|
||||
env: &ConcreteEnv,
|
||||
spent_key_images: &[KeyImage],
|
||||
) -> Result<TxpoolWriteResponse, RuntimeError> {
|
||||
fn new_block(env: &ConcreteEnv, spent_key_images: &[KeyImage]) -> DbResult<TxpoolWriteResponse> {
|
||||
let env_inner = env.env_inner();
|
||||
let tx_rw = env_inner.tx_rw()?;
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ macro_rules! const_tx_blob {
|
|||
hash: $hash:literal, // Transaction hash
|
||||
data_path: $data_path:literal, // Path to the transaction blob
|
||||
version: $version:literal, // Transaction version
|
||||
timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`)
|
||||
timelock: $timelock:expr_2021, // Transaction's timelock (use the real type `Timelock`)
|
||||
input_len: $input_len:literal, // Amount of inputs
|
||||
output_len: $output_len:literal, // Amount of outputs
|
||||
) => {
|
||||
|
|
|
@ -25,11 +25,11 @@ macro_rules! define_request_and_response {
|
|||
|
||||
// The request type (and any doc comments, derives, etc).
|
||||
$( #[$request_attr:meta] )*
|
||||
Request = $request:expr;
|
||||
Request = $request:expr_2021;
|
||||
|
||||
// The response type (and any doc comments, derives, etc).
|
||||
$( #[$response_attr:meta] )*
|
||||
Response = $response:expr;
|
||||
Response = $response:expr_2021;
|
||||
) => { paste::paste! {
|
||||
#[doc = $crate::rpc::data::macros::define_request_and_response_doc!(
|
||||
"response" => [<$name:upper _RESPONSE>],
|
||||
|
|
|
@ -11,9 +11,9 @@ use std::{
|
|||
use monero_serai::block::Block;
|
||||
|
||||
use crate::{
|
||||
types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation},
|
||||
AltBlockInformation, ChainId, ChainInfo, CoinbaseTxSum, OutputHistogramEntry,
|
||||
OutputHistogramInput,
|
||||
types::{Chain, ExtendedBlockHeader, OutputOnChain, TxsInBlock, VerifiedBlockInformation},
|
||||
AltBlockInformation, BlockCompleteEntry, ChainId, ChainInfo, CoinbaseTxSum,
|
||||
OutputHistogramEntry, OutputHistogramInput,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- ReadRequest
|
||||
|
@ -27,6 +27,11 @@ use crate::{
|
|||
/// See `Response` for the expected responses per `Request`.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum BlockchainReadRequest {
|
||||
/// Request [`BlockCompleteEntry`]s.
|
||||
///
|
||||
/// The input is the block hashes.
|
||||
BlockCompleteEntries(Vec<[u8; 32]>),
|
||||
|
||||
/// Request a block's extended header.
|
||||
///
|
||||
/// The input is the block's height.
|
||||
|
@ -96,6 +101,16 @@ pub enum BlockchainReadRequest {
|
|||
/// A request for the compact chain history.
|
||||
CompactChainHistory,
|
||||
|
||||
/// A request for the next chain entry.
|
||||
///
|
||||
/// Input is a list of block hashes and the amount of block hashes to return in the next chain entry.
|
||||
///
|
||||
/// # Invariant
|
||||
/// The [`Vec`] containing the block IDs must be sorted in reverse chronological block
|
||||
/// order, or else the returned response is unspecified and meaningless,
|
||||
/// as this request performs a binary search
|
||||
NextChainEntry(Vec<[u8; 32]>, usize),
|
||||
|
||||
/// A request to find the first unknown block ID in a list of block IDs.
|
||||
///
|
||||
/// # Invariant
|
||||
|
@ -104,6 +119,16 @@ pub enum BlockchainReadRequest {
|
|||
/// as this request performs a binary search.
|
||||
FindFirstUnknown(Vec<[u8; 32]>),
|
||||
|
||||
/// A request for transactions from a specific block.
|
||||
TxsInBlock {
|
||||
/// The block to get transactions from.
|
||||
block_hash: [u8; 32],
|
||||
/// The indexes of the transactions from the block.
|
||||
/// This is not the global index of the txs, instead it is the local index as they appear in
|
||||
/// the block.
|
||||
tx_indexes: Vec<u64>,
|
||||
},
|
||||
|
||||
/// A request for all alt blocks in the chain with the given [`ChainId`].
|
||||
AltBlocksInChain(ChainId),
|
||||
|
||||
|
@ -182,6 +207,16 @@ pub enum BlockchainWriteRequest {
|
|||
#[expect(clippy::large_enum_variant)]
|
||||
pub enum BlockchainResponse {
|
||||
//------------------------------------------------------ Reads
|
||||
/// Response to [`BlockchainReadRequest::BlockCompleteEntries`].
|
||||
BlockCompleteEntries {
|
||||
/// The [`BlockCompleteEntry`]s that we had.
|
||||
blocks: Vec<BlockCompleteEntry>,
|
||||
/// The hashes of blocks that were requested, but we don't have.
|
||||
missing_hashes: Vec<[u8; 32]>,
|
||||
/// Our blockchain height.
|
||||
blockchain_height: usize,
|
||||
},
|
||||
|
||||
/// Response to [`BlockchainReadRequest::BlockExtendedHeader`].
|
||||
///
|
||||
/// Inner value is the extended headed of the requested block.
|
||||
|
@ -248,6 +283,24 @@ pub enum BlockchainResponse {
|
|||
cumulative_difficulty: u128,
|
||||
},
|
||||
|
||||
/// Response to [`BlockchainReadRequest::NextChainEntry`].
|
||||
///
|
||||
/// If all blocks were unknown `start_height` will be [`None`], the other fields will be meaningless.
|
||||
NextChainEntry {
|
||||
/// The start height of this entry, [`None`] if we could not find the split point.
|
||||
start_height: Option<std::num::NonZero<usize>>,
|
||||
/// The current chain height.
|
||||
chain_height: usize,
|
||||
/// The next block hashes in the entry.
|
||||
block_ids: Vec<[u8; 32]>,
|
||||
/// The block weights of the next blocks.
|
||||
block_weights: Vec<usize>,
|
||||
/// The current cumulative difficulty of our chain.
|
||||
cumulative_difficulty: u128,
|
||||
/// The block blob of the 2nd block in `block_ids`, if there is one.
|
||||
first_block_blob: Option<Vec<u8>>,
|
||||
},
|
||||
|
||||
/// Response to [`BlockchainReadRequest::FindFirstUnknown`].
|
||||
///
|
||||
/// Contains the index of the first unknown block and its expected height.
|
||||
|
@ -255,7 +308,12 @@ pub enum BlockchainResponse {
|
|||
/// This will be [`None`] if all blocks were known.
|
||||
FindFirstUnknown(Option<(usize, usize)>),
|
||||
|
||||
/// Response to [`BlockchainReadRequest::AltBlocksInChain`].
|
||||
/// The response for [`BlockchainReadRequest::TxsInBlock`].
|
||||
///
|
||||
/// Will return [`None`] if the request contained an index out of range.
|
||||
TxsInBlock(Option<TxsInBlock>),
|
||||
|
||||
/// The response for [`BlockchainReadRequest::AltBlocksInChain`].
|
||||
///
|
||||
/// Contains all the alt blocks in the alt-chain in chronological order.
|
||||
AltBlocksInChain(Vec<AltBlockInformation>),
|
||||
|
|
|
@ -26,8 +26,8 @@ pub use transaction_verification_data::{
|
|||
pub use types::{
|
||||
AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum,
|
||||
ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry,
|
||||
OutputHistogramEntry, OutputHistogramInput, OutputOnChain, VerifiedBlockInformation,
|
||||
VerifiedTransactionInformation,
|
||||
OutputHistogramEntry, OutputHistogramInput, OutputOnChain, TxsInBlock,
|
||||
VerifiedBlockInformation, VerifiedTransactionInformation,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Feature-gated
|
||||
|
|
|
@ -259,6 +259,13 @@ pub struct AddAuxPow {
|
|||
pub aux_pow: Vec<AuxPow>,
|
||||
}
|
||||
|
||||
/// The inner response for a request for txs in a block.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct TxsInBlock {
|
||||
pub block: Vec<u8>,
|
||||
pub txs: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
|
Loading…
Reference in a new issue