mirror of
https://github.com/Cuprate/cuprate.git
synced 2024-12-22 19:49:28 +00:00
rm -rf benches
This commit is contained in:
parent
38541dbfda
commit
a0ddc16a9a
25 changed files with 0 additions and 637 deletions
11
Cargo.toml
11
Cargo.toml
|
@ -4,13 +4,6 @@ members = [
|
||||||
# Binaries
|
# Binaries
|
||||||
"binaries/cuprated",
|
"binaries/cuprated",
|
||||||
|
|
||||||
# Benchmarks
|
|
||||||
"benches/benchmark/bin",
|
|
||||||
"benches/benchmark/lib",
|
|
||||||
"benches/benchmark/example",
|
|
||||||
"benches/criterion/example",
|
|
||||||
"benches/criterion/cuprate-json-rpc",
|
|
||||||
|
|
||||||
# Consensus
|
# Consensus
|
||||||
"consensus",
|
"consensus",
|
||||||
"consensus/context",
|
"consensus/context",
|
||||||
|
@ -74,8 +67,6 @@ opt-level = 3
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# Cuprate members
|
# Cuprate members
|
||||||
cuprate-benchmark-lib = { path = "benches/benchmark/lib", default-features = false }
|
|
||||||
cuprate-benchmark-example = { path = "benches/benchmark/example", default-features = false }
|
|
||||||
cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false }
|
cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false }
|
||||||
cuprate-consensus-rules = { path = "consensus/rules", default-features = false }
|
cuprate-consensus-rules = { path = "consensus/rules", default-features = false }
|
||||||
cuprate-constants = { path = "constants", default-features = false }
|
cuprate-constants = { path = "constants", default-features = false }
|
||||||
|
@ -148,8 +139,6 @@ tracing-subscriber = { version = "0.3", default-features = false }
|
||||||
tracing = { version = "0.1", default-features = false }
|
tracing = { version = "0.1", default-features = false }
|
||||||
|
|
||||||
## workspace.dev-dependencies
|
## workspace.dev-dependencies
|
||||||
criterion = { version = "0.5" }
|
|
||||||
function_name = { version = "0.3" }
|
|
||||||
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||||
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||||
tempfile = { version = "3" }
|
tempfile = { version = "3" }
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
# Benches
|
|
||||||
This directory contains Cuprate's benchmarks and benchmarking utilities.
|
|
||||||
|
|
||||||
See the [`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html)
|
|
||||||
to see how to create and run these benchmarks.
|
|
|
@ -1,43 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "cuprate-benchmark"
|
|
||||||
version = "0.0.0"
|
|
||||||
edition = "2021"
|
|
||||||
description = "Cuprate's benchmarking binary"
|
|
||||||
license = "MIT"
|
|
||||||
authors = ["hinto-janai"]
|
|
||||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin"
|
|
||||||
keywords = ["cuprate", "benchmarking", "binary"]
|
|
||||||
|
|
||||||
[features]
|
|
||||||
# All new benchmarks should be added here!
|
|
||||||
all = ["example"]
|
|
||||||
|
|
||||||
# Non-benchmark features.
|
|
||||||
default = []
|
|
||||||
json = []
|
|
||||||
trace = []
|
|
||||||
debug = []
|
|
||||||
warn = []
|
|
||||||
info = []
|
|
||||||
error = []
|
|
||||||
|
|
||||||
# Benchmark features.
|
|
||||||
# New benchmarks should be added here!
|
|
||||||
example = [
|
|
||||||
"dep:cuprate-benchmark-example"
|
|
||||||
]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
cuprate-benchmark-lib = { workspace = true }
|
|
||||||
cuprate-benchmark-example = { workspace = true, optional = true }
|
|
||||||
|
|
||||||
cfg-if = { workspace = true }
|
|
||||||
serde = { workspace = true, features = ["derive"] }
|
|
||||||
serde_json = { workspace = true, features = ["std"] }
|
|
||||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
|
||||||
tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
|
@ -1,27 +0,0 @@
|
||||||
## `cuprate-benchmark`
|
|
||||||
This crate links all benchmarks together into a single binary that can be run as: `cuprate-benchmark`.
|
|
||||||
|
|
||||||
`cuprate-benchmark` will run all enabled benchmarks sequentially and print data at the end.
|
|
||||||
|
|
||||||
## Benchmarks
|
|
||||||
Benchmarks are opt-in and enabled via features.
|
|
||||||
|
|
||||||
| Feature | Enables which benchmark crate? |
|
|
||||||
|----------|--------------------------------|
|
|
||||||
| example | cuprate-benchmark-example |
|
|
||||||
| database | cuprate-benchmark-database |
|
|
||||||
|
|
||||||
## Features
|
|
||||||
These are features that aren't for enabling benchmarks, but rather for other things.
|
|
||||||
|
|
||||||
Since `cuprate-benchmark` is built right before it is ran,
|
|
||||||
these features almost act like command line arguments.
|
|
||||||
|
|
||||||
| Features | Does what |
|
|
||||||
|----------|-----------|
|
|
||||||
| json | Prints JSON timings instead of a markdown table
|
|
||||||
| trace | Use the `trace` log-level
|
|
||||||
| debug | Use the `debug` log-level
|
|
||||||
| warn | Use the `warn` log-level
|
|
||||||
| info | Use the `info` log-level (default)
|
|
||||||
| error | Use the `error` log-level
|
|
|
@ -1,29 +0,0 @@
|
||||||
use cfg_if::cfg_if;
|
|
||||||
use tracing::{info, instrument, Level};
|
|
||||||
use tracing_subscriber::FmtSubscriber;
|
|
||||||
|
|
||||||
/// Initializes the `tracing` logger.
|
|
||||||
#[instrument]
|
|
||||||
pub(crate) fn init_logger() {
|
|
||||||
const LOG_LEVEL: Level = {
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(feature = "trace")] {
|
|
||||||
Level::TRACE
|
|
||||||
} else if #[cfg(feature = "debug")] {
|
|
||||||
Level::DEBUG
|
|
||||||
} else if #[cfg(feature = "warn")] {
|
|
||||||
Level::WARN
|
|
||||||
} else if #[cfg(feature = "info")] {
|
|
||||||
Level::INFO
|
|
||||||
} else if #[cfg(feature = "error")] {
|
|
||||||
Level::ERROR
|
|
||||||
} else {
|
|
||||||
Level::INFO
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
FmtSubscriber::builder().with_max_level(LOG_LEVEL).init();
|
|
||||||
|
|
||||||
info!("Log level: {LOG_LEVEL}");
|
|
||||||
}
|
|
|
@ -1,49 +0,0 @@
|
||||||
#![doc = include_str!("../README.md")]
|
|
||||||
#![allow(
|
|
||||||
unused_crate_dependencies,
|
|
||||||
reason = "this crate imports many potentially unused dependencies"
|
|
||||||
)]
|
|
||||||
|
|
||||||
mod log;
|
|
||||||
mod print;
|
|
||||||
mod run;
|
|
||||||
mod timings;
|
|
||||||
|
|
||||||
use cfg_if::cfg_if;
|
|
||||||
|
|
||||||
/// What `main()` does:
|
|
||||||
/// 1. Run all enabled benchmarks
|
|
||||||
/// 2. Record benchmark timings
|
|
||||||
/// 3. Print timing data
|
|
||||||
///
|
|
||||||
/// To add a new benchmark to be ran here:
|
|
||||||
/// 1. Copy + paste a `cfg_if` block
|
|
||||||
/// 2. Change it to your benchmark's feature flag
|
|
||||||
/// 3. Change it to your benchmark's type
|
|
||||||
#[allow(
|
|
||||||
clippy::allow_attributes,
|
|
||||||
unused_variables,
|
|
||||||
unused_mut,
|
|
||||||
unreachable_code,
|
|
||||||
reason = "clippy does not account for all cfg()s"
|
|
||||||
)]
|
|
||||||
fn main() {
|
|
||||||
log::init_logger();
|
|
||||||
|
|
||||||
let mut timings = timings::Timings::new();
|
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(not(any(feature = "example")))] {
|
|
||||||
println!("No feature specified. Use `--features $BENCHMARK_FEATURE` when building.");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(feature = "example")] {
|
|
||||||
run::run_benchmark::<cuprate_benchmark_example::Example>(&mut timings);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
print::print_timings(&timings);
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
#![expect(dead_code, reason = "code hidden behind feature flags")]
|
|
||||||
|
|
||||||
use cfg_if::cfg_if;
|
|
||||||
|
|
||||||
use crate::timings::Timings;
|
|
||||||
|
|
||||||
/// Print the final the final markdown table of benchmark timings.
|
|
||||||
pub(crate) fn print_timings(timings: &Timings) {
|
|
||||||
println!("\nFinished all benchmarks, printing results:");
|
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(feature = "json")] {
|
|
||||||
print_timings_json(timings);
|
|
||||||
} else {
|
|
||||||
print_timings_markdown(timings);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Default timing formatting.
|
|
||||||
pub(crate) fn print_timings_markdown(timings: &Timings) {
|
|
||||||
let mut s = String::new();
|
|
||||||
s.push_str("| Benchmark | Time (seconds) |\n");
|
|
||||||
s.push_str("|------------------------------------|----------------|");
|
|
||||||
|
|
||||||
#[expect(clippy::iter_over_hash_type)]
|
|
||||||
for (k, v) in timings {
|
|
||||||
s += &format!("\n| {k:<34} | {v:<14} |");
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("\n{s}");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Enabled via `json` feature.
|
|
||||||
pub(crate) fn print_timings_json(timings: &Timings) {
|
|
||||||
let json = serde_json::to_string_pretty(timings).unwrap();
|
|
||||||
println!("\n{json}");
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
use tracing::{info, instrument, trace};
|
|
||||||
|
|
||||||
use cuprate_benchmark_lib::Benchmark;
|
|
||||||
|
|
||||||
use crate::timings::Timings;
|
|
||||||
|
|
||||||
/// Run a [`Benchmark`] and record its timing.
|
|
||||||
#[instrument(skip_all)]
|
|
||||||
pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) {
|
|
||||||
// Get the benchmark name.
|
|
||||||
let name = B::name();
|
|
||||||
trace!("Running benchmark: {name}");
|
|
||||||
|
|
||||||
// Setup the benchmark input.
|
|
||||||
let input = B::SETUP();
|
|
||||||
|
|
||||||
// Sleep before running the benchmark.
|
|
||||||
trace!("Pre-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
|
|
||||||
std::thread::sleep(B::PRE_SLEEP_DURATION);
|
|
||||||
|
|
||||||
// Run/time the benchmark.
|
|
||||||
let now = std::time::Instant::now();
|
|
||||||
B::MAIN(input);
|
|
||||||
let time = now.elapsed().as_secs_f32();
|
|
||||||
|
|
||||||
// Print the benchmark timings.
|
|
||||||
info!("{name:>34} ... {time}");
|
|
||||||
assert!(
|
|
||||||
timings.insert(name, time).is_none(),
|
|
||||||
"There were 2 benchmarks with the same name - this collides the final output: {name}",
|
|
||||||
);
|
|
||||||
|
|
||||||
// Sleep for a cooldown period after the benchmark run.
|
|
||||||
trace!("Post-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
|
|
||||||
std::thread::sleep(B::POST_SLEEP_DURATION);
|
|
||||||
}
|
|
|
@ -1,5 +0,0 @@
|
||||||
/// Benchmark timing data.
|
|
||||||
///
|
|
||||||
/// - Key = benchmark name
|
|
||||||
/// - Value = benchmark time in seconds
|
|
||||||
pub(crate) type Timings = std::collections::HashMap<&'static str, f32>;
|
|
|
@ -1,17 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "cuprate-benchmark-example"
|
|
||||||
version = "0.0.0"
|
|
||||||
edition = "2021"
|
|
||||||
description = "Example showcasing Cuprate's benchmarking harness"
|
|
||||||
license = "MIT"
|
|
||||||
authors = ["hinto-janai"]
|
|
||||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example"
|
|
||||||
keywords = ["cuprate", "benchmarking", "example"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
cuprate-benchmark-lib = { path = "../lib" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
|
@ -1,3 +0,0 @@
|
||||||
## `cuprate-benchmark-example`
|
|
||||||
This crate contains a short example benchmark that shows how to implement and use
|
|
||||||
`cuprate-benchmark-lib` so that it can be ran by `cuprate-benchmark`.
|
|
|
@ -1,42 +0,0 @@
|
||||||
#![doc = include_str!("../README.md")]
|
|
||||||
|
|
||||||
use std::hint::black_box;
|
|
||||||
|
|
||||||
use cuprate_benchmark_lib::Benchmark;
|
|
||||||
|
|
||||||
/// Marker struct that implements [`Benchmark`]
|
|
||||||
pub struct Example;
|
|
||||||
|
|
||||||
/// The input to our benchmark function.
|
|
||||||
pub type ExampleBenchmarkInput = u64;
|
|
||||||
|
|
||||||
/// The setup function that creates the input.
|
|
||||||
pub const fn example_benchmark_setup() -> ExampleBenchmarkInput {
|
|
||||||
1
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The main benchmarking function.
|
|
||||||
#[expect(clippy::unit_arg)]
|
|
||||||
pub fn example_benchmark_main(input: ExampleBenchmarkInput) {
|
|
||||||
// In this case, we're simply benchmarking the
|
|
||||||
// performance of simple arithmetic on the input data.
|
|
||||||
|
|
||||||
fn math(input: ExampleBenchmarkInput, number: u64) {
|
|
||||||
let x = input;
|
|
||||||
let x = black_box(x * number);
|
|
||||||
let x = black_box(x / number);
|
|
||||||
let x = black_box(x + number);
|
|
||||||
let _ = black_box(x - number);
|
|
||||||
}
|
|
||||||
|
|
||||||
for number in 1..100_000_000 {
|
|
||||||
black_box(math(input, number));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This implementation will be run by `cuprate-benchmark`.
|
|
||||||
impl Benchmark for Example {
|
|
||||||
type Input = ExampleBenchmarkInput;
|
|
||||||
const SETUP: fn() -> Self::Input = example_benchmark_setup;
|
|
||||||
const MAIN: fn(Self::Input) = example_benchmark_main;
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "cuprate-benchmark-lib"
|
|
||||||
version = "0.0.0"
|
|
||||||
edition = "2021"
|
|
||||||
description = "Cuprate's benchmarking library"
|
|
||||||
license = "MIT"
|
|
||||||
authors = ["hinto-janai"]
|
|
||||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib"
|
|
||||||
keywords = ["cuprate", "benchmarking", "library"]
|
|
||||||
|
|
||||||
[features]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
|
@ -1,15 +0,0 @@
|
||||||
## `cuprate-benchmark-lib`
|
|
||||||
This crate is the glue between
|
|
||||||
[`cuprate-benchmark`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/bin)
|
|
||||||
and all the benchmark crates.
|
|
||||||
|
|
||||||
It defines the [`crate::Benchmark`] trait, which is the behavior of all benchmarks.
|
|
||||||
|
|
||||||
See the [`cuprate-benchmark-example`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/example)
|
|
||||||
crate to see an example implementation of this trait.
|
|
||||||
|
|
||||||
After implementing this trait, a few steps must
|
|
||||||
be done such that the `cuprate-benchmark` binary
|
|
||||||
can actually run your benchmark crate; see the
|
|
||||||
[`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html)
|
|
||||||
to see how to do this.
|
|
|
@ -1,45 +0,0 @@
|
||||||
//! Benchmarking trait.
|
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
/// A benchmarking function and its inputs.
|
|
||||||
pub trait Benchmark {
|
|
||||||
/// The benchmark's name.
|
|
||||||
///
|
|
||||||
/// This is automatically implemented
|
|
||||||
/// as the name of the [`Self`] type.
|
|
||||||
//
|
|
||||||
// FIXME: use `const` instead of `fn` when stable
|
|
||||||
// <https://github.com/rust-lang/rust/issues/63084>
|
|
||||||
fn name() -> &'static str {
|
|
||||||
std::any::type_name::<Self>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Input to the main benchmarking function.
|
|
||||||
///
|
|
||||||
/// This is passed to [`Self::MAIN`].
|
|
||||||
type Input;
|
|
||||||
|
|
||||||
/// Setup function to generate the input.
|
|
||||||
///
|
|
||||||
/// This function is not timed.
|
|
||||||
const SETUP: fn() -> Self::Input;
|
|
||||||
|
|
||||||
/// The main function to benchmark.
|
|
||||||
///
|
|
||||||
/// The start of the timer begins right before
|
|
||||||
/// this function is called and ends after the
|
|
||||||
/// function returns.
|
|
||||||
const MAIN: fn(Self::Input);
|
|
||||||
|
|
||||||
/// `cuprate-benchmark` will sleep for this [`Duration`] after
|
|
||||||
/// creating the [`Self::Input`], but before starting [`Self::MAIN`].
|
|
||||||
///
|
|
||||||
/// 1 second by default.
|
|
||||||
const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1);
|
|
||||||
|
|
||||||
/// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::MAIN`].
|
|
||||||
///
|
|
||||||
/// 1 second by default.
|
|
||||||
const POST_SLEEP_DURATION: Duration = Duration::from_secs(1);
|
|
||||||
}
|
|
|
@ -1,5 +0,0 @@
|
||||||
#![doc = include_str!("../README.md")]
|
|
||||||
|
|
||||||
mod benchmark;
|
|
||||||
|
|
||||||
pub use benchmark::Benchmark;
|
|
|
@ -1,23 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "cuprate-criterion-json-rpc"
|
|
||||||
version = "0.0.0"
|
|
||||||
edition = "2021"
|
|
||||||
description = "Criterion benchmarking for cuprate-json-rpc"
|
|
||||||
license = "MIT"
|
|
||||||
authors = ["hinto-janai"]
|
|
||||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc"
|
|
||||||
keywords = ["cuprate", "json-rpc", "criterion", "benchmark"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
cuprate-json-rpc = { workspace = true }
|
|
||||||
|
|
||||||
criterion = { workspace = true }
|
|
||||||
function_name = { workspace = true }
|
|
||||||
serde_json = { workspace = true, features = ["default"] }
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "main"
|
|
||||||
harness = false
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
|
@ -1,8 +0,0 @@
|
||||||
//! Benchmarks for `cuprate-json-rpc`.
|
|
||||||
#![allow(unused_crate_dependencies)]
|
|
||||||
|
|
||||||
mod response;
|
|
||||||
|
|
||||||
criterion::criterion_main! {
|
|
||||||
response::serde,
|
|
||||||
}
|
|
|
@ -1,110 +0,0 @@
|
||||||
//! Benchmarks for [`Response`].
|
|
||||||
#![allow(unused_attributes, unused_crate_dependencies)]
|
|
||||||
|
|
||||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
|
||||||
use function_name::named;
|
|
||||||
use serde_json::{from_str, to_string_pretty};
|
|
||||||
|
|
||||||
use cuprate_json_rpc::{Id, Response};
|
|
||||||
|
|
||||||
// `serde` benchmarks on `Response`.
|
|
||||||
//
|
|
||||||
// These are benchmarked as `Response` has a custom serde implementation.
|
|
||||||
criterion_group! {
|
|
||||||
name = serde;
|
|
||||||
config = Criterion::default();
|
|
||||||
targets =
|
|
||||||
response_from_str_u8,
|
|
||||||
response_from_str_u64,
|
|
||||||
response_from_str_string_5_len,
|
|
||||||
response_from_str_string_10_len,
|
|
||||||
response_from_str_string_100_len,
|
|
||||||
response_from_str_string_500_len,
|
|
||||||
response_to_string_pretty_u8,
|
|
||||||
response_to_string_pretty_u64,
|
|
||||||
response_to_string_pretty_string_5_len,
|
|
||||||
response_to_string_pretty_string_10_len,
|
|
||||||
response_to_string_pretty_string_100_len,
|
|
||||||
response_to_string_pretty_string_500_len,
|
|
||||||
response_from_str_bad_field_1,
|
|
||||||
response_from_str_bad_field_5,
|
|
||||||
response_from_str_bad_field_10,
|
|
||||||
response_from_str_bad_field_100,
|
|
||||||
response_from_str_missing_field,
|
|
||||||
}
|
|
||||||
criterion_main!(serde);
|
|
||||||
|
|
||||||
/// Generate `from_str` deserialization benchmark functions for [`Response`].
|
|
||||||
macro_rules! impl_from_str_benchmark {
|
|
||||||
(
|
|
||||||
$(
|
|
||||||
$fn_name:ident => $request_type:ty => $request_string:literal,
|
|
||||||
)*
|
|
||||||
) => {
|
|
||||||
$(
|
|
||||||
#[named]
|
|
||||||
fn $fn_name(c: &mut Criterion) {
|
|
||||||
let request_string = $request_string;
|
|
||||||
|
|
||||||
c.bench_function(function_name!(), |b| {
|
|
||||||
b.iter(|| {
|
|
||||||
let _r = from_str::<Response<$request_type>>(
|
|
||||||
black_box(request_string)
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_from_str_benchmark! {
|
|
||||||
response_from_str_u8 => u8 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#,
|
|
||||||
response_from_str_u64 => u64 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#,
|
|
||||||
response_from_str_string_5_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hello"}"#,
|
|
||||||
response_from_str_string_10_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hellohello"}"#,
|
|
||||||
response_from_str_string_100_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#,
|
|
||||||
response_from_str_string_500_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#,
|
|
||||||
|
|
||||||
// The custom serde currently looks at all fields.
|
|
||||||
// These are for testing the performance if the serde
|
|
||||||
// has to parse through a bunch of unrelated fields.
|
|
||||||
response_from_str_bad_field_1 => u8 => r#"{"bad_field":0,"jsonrpc":"2.0","id":123,"result":0}"#,
|
|
||||||
response_from_str_bad_field_5 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"jsonrpc":"2.0","id":123,"result":0}"#,
|
|
||||||
response_from_str_bad_field_10 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"bad_field_6":0,"bad_field_7":0,"bad_field_8":0,"bad_field_9":0,"bad_field_10":0,"jsonrpc":"2.0","id":123,"result":0}"#,
|
|
||||||
response_from_str_bad_field_100 => u8 => r#"{"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0,"81":0,"82":0,"83":0,"84":0,"85":0,"86":0,"87":0,"88":0,"89":0,"90":0,"91":0,"92":0,"93":0,"94":0,"95":0,"96":0,"97":0,"98":0,"99":0,"100":0,"jsonrpc":"2.0","id":123,"result":0}"#,
|
|
||||||
|
|
||||||
// These are missing the `jsonrpc` field.
|
|
||||||
response_from_str_missing_field => u8 => r#"{"id":123,"result":0}"#,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate `to_string_pretty` serialization benchmark functions for [`Response`].
|
|
||||||
macro_rules! impl_to_string_pretty_benchmark {
|
|
||||||
(
|
|
||||||
$(
|
|
||||||
$fn_name:ident => $request_constructor:expr_2021,
|
|
||||||
)*
|
|
||||||
) => {
|
|
||||||
$(
|
|
||||||
#[named]
|
|
||||||
fn $fn_name(c: &mut Criterion) {
|
|
||||||
let request = $request_constructor;
|
|
||||||
|
|
||||||
c.bench_function(function_name!(), |b| {
|
|
||||||
b.iter(|| {
|
|
||||||
let _s = to_string_pretty(black_box(&request)).unwrap();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_to_string_pretty_benchmark! {
|
|
||||||
response_to_string_pretty_u8 => Response::<u8>::ok(Id::Null, 0),
|
|
||||||
response_to_string_pretty_u64 => Response::<u64>::ok(Id::Null, 0),
|
|
||||||
response_to_string_pretty_string_5_len => Response::ok(Id::Null, String::from("hello")),
|
|
||||||
response_to_string_pretty_string_10_len => Response::ok(Id::Null, String::from("hellohello")),
|
|
||||||
response_to_string_pretty_string_100_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")),
|
|
||||||
response_to_string_pretty_string_500_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")),
|
|
||||||
}
|
|
|
@ -1,2 +0,0 @@
|
||||||
//! Benchmark lib for `cuprate-json-rpc`.
|
|
||||||
#![allow(unused_crate_dependencies, reason = "used in benchmarks")]
|
|
|
@ -1,21 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "cuprate-criterion-example"
|
|
||||||
version = "0.0.0"
|
|
||||||
edition = "2021"
|
|
||||||
description = "Criterion benchmarking example for Cuprate"
|
|
||||||
license = "MIT"
|
|
||||||
authors = ["hinto-janai"]
|
|
||||||
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example"
|
|
||||||
keywords = ["cuprate", "criterion", "benchmark", "example"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
criterion = { workspace = true }
|
|
||||||
function_name = { workspace = true }
|
|
||||||
serde_json = { workspace = true, features = ["default"] }
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "main"
|
|
||||||
harness = false
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
|
@ -1,14 +0,0 @@
|
||||||
## `cuprate-criterion-example`
|
|
||||||
An example of using Criterion for benchmarking Cuprate crates.
|
|
||||||
|
|
||||||
Consider copy+pasting this crate to use as a base when creating new Criterion benchmark crates.
|
|
||||||
|
|
||||||
## `src/`
|
|
||||||
Benchmark crates have a `benches/` ran by `cargo bench`, but they are also crates themselves,
|
|
||||||
as in, they have a `src` folder that `benches/` can pull code from.
|
|
||||||
|
|
||||||
The `src` directories in these benchmarking crates are usually filled with
|
|
||||||
helper functions, types, etc, that are used repeatedly in the benchmarks.
|
|
||||||
|
|
||||||
## `benches/`
|
|
||||||
These are the actual benchmarks ran by `cargo bench`.
|
|
|
@ -1,48 +0,0 @@
|
||||||
//! Benchmarks.
|
|
||||||
#![allow(unused_attributes, unused_crate_dependencies)]
|
|
||||||
|
|
||||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
|
||||||
use function_name::named;
|
|
||||||
|
|
||||||
use cuprate_criterion_example::SomeHardToCreateObject;
|
|
||||||
|
|
||||||
// This is how you register criterion benchmarks.
|
|
||||||
criterion_group! {
|
|
||||||
name = benches;
|
|
||||||
config = Criterion::default();
|
|
||||||
targets = benchmark_1, benchmark_range,
|
|
||||||
}
|
|
||||||
criterion_main!(benches);
|
|
||||||
|
|
||||||
/// Benchmark a single input.
|
|
||||||
///
|
|
||||||
/// <https://bheisler.github.io/criterion.rs/book/user_guide/benchmarking_with_inputs.html#benchmarking-with-one-input>
|
|
||||||
#[named]
|
|
||||||
fn benchmark_1(c: &mut Criterion) {
|
|
||||||
// It is recommended to use `function_name!()` as a benchmark
|
|
||||||
// identifier instead of manually re-typing the function name.
|
|
||||||
c.bench_function(function_name!(), |b| {
|
|
||||||
b.iter(|| {
|
|
||||||
black_box(SomeHardToCreateObject::from(1));
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Benchmark a range of inputs.
|
|
||||||
///
|
|
||||||
/// <https://bheisler.github.io/criterion.rs/book/user_guide/benchmarking_with_inputs.html#benchmarking-with-a-range-of-values>
|
|
||||||
#[named]
|
|
||||||
fn benchmark_range(c: &mut Criterion) {
|
|
||||||
let mut group = c.benchmark_group(function_name!());
|
|
||||||
|
|
||||||
for i in 0..4 {
|
|
||||||
group.throughput(Throughput::Elements(i));
|
|
||||||
group.bench_with_input(BenchmarkId::from_parameter(i), &i, |b, &i| {
|
|
||||||
b.iter(|| {
|
|
||||||
black_box(SomeHardToCreateObject::from(i));
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
group.finish();
|
|
||||||
}
|
|
|
@ -1,10 +0,0 @@
|
||||||
//! Benchmarks examples.
|
|
||||||
#![allow(unused_crate_dependencies)]
|
|
||||||
|
|
||||||
// All modules within `benches/` are `mod`ed here.
|
|
||||||
mod example;
|
|
||||||
|
|
||||||
// And all the Criterion benchmarks are registered like so:
|
|
||||||
criterion::criterion_main! {
|
|
||||||
example::benches,
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
#![doc = include_str!("../README.md")] // See the README for crate documentation.
|
|
||||||
#![allow(unused_crate_dependencies, reason = "used in benchmarks")]
|
|
||||||
|
|
||||||
/// Shared type that all benchmarks can use.
|
|
||||||
#[expect(dead_code)]
|
|
||||||
pub struct SomeHardToCreateObject(u64);
|
|
||||||
|
|
||||||
impl From<u64> for SomeHardToCreateObject {
|
|
||||||
/// Shared function that all benchmarks can use.
|
|
||||||
fn from(value: u64) -> Self {
|
|
||||||
Self(value)
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in a new issue