Compare commits

...

3 commits

Author SHA1 Message Date
hinto.janai
828cbbac27
docs
Some checks failed
Audit / audit (push) Has been cancelled
Deny / audit (push) Has been cancelled
2024-10-08 20:49:13 -04:00
hinto.janai
8c9d159124
fix clippy 2024-10-08 20:35:00 -04:00
hinto.janai
9a0b6138b5
docs, tracing 2024-10-08 20:19:22 -04:00
18 changed files with 194 additions and 44 deletions

38
Cargo.lock generated
View file

@ -641,6 +641,8 @@ dependencies = [
"cuprate-benchmark-lib", "cuprate-benchmark-lib",
"serde", "serde",
"serde_json", "serde_json",
"tracing",
"tracing-subscriber",
] ]
[[package]] [[package]]
@ -1862,6 +1864,15 @@ version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "matchers"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
dependencies = [
"regex-automata 0.1.10",
]
[[package]] [[package]]
name = "matchit" name = "matchit"
version = "0.7.3" version = "0.7.3"
@ -2362,7 +2373,7 @@ dependencies = [
"rand", "rand",
"rand_chacha", "rand_chacha",
"rand_xorshift", "rand_xorshift",
"regex-syntax", "regex-syntax 0.8.4",
"rusty-fork", "rusty-fork",
"tempfile", "tempfile",
"unarray", "unarray",
@ -2536,8 +2547,17 @@ checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
dependencies = [ dependencies = [
"aho-corasick", "aho-corasick",
"memchr", "memchr",
"regex-automata", "regex-automata 0.4.7",
"regex-syntax", "regex-syntax 0.8.4",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"regex-syntax 0.6.29",
] ]
[[package]] [[package]]
@ -2548,9 +2568,15 @@ checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
dependencies = [ dependencies = [
"aho-corasick", "aho-corasick",
"memchr", "memchr",
"regex-syntax", "regex-syntax 0.8.4",
] ]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.8.4" version = "0.8.4"
@ -3257,10 +3283,14 @@ version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
dependencies = [ dependencies = [
"matchers",
"nu-ansi-term", "nu-ansi-term",
"once_cell",
"regex",
"sharded-slab", "sharded-slab",
"smallvec", "smallvec",
"thread_local", "thread_local",
"tracing",
"tracing-core", "tracing-core",
"tracing-log", "tracing-log",
] ]

View file

@ -106,7 +106,7 @@ tempfile = { version = "3.13.0" }
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
pretty_assertions = { version = "1.4.1" } pretty_assertions = { version = "1.4.1" }
proptest = { version = "1" } proptest = { version = "1.5.0" }
proptest-derive = { version = "0.4.0" } proptest-derive = { version = "0.4.0" }
tokio-test = { version = "0.4.4" } tokio-test = { version = "0.4.4" }

View file

@ -12,17 +12,30 @@ keywords = ["cuprate", "benchmarking", "binary"]
# All new benchmarks should be added here! # All new benchmarks should be added here!
all = ["example"] all = ["example"]
default = [] # Non-benchmark features.
json = [] default = []
example = ["dep:cuprate-benchmark-example"] json = []
trace = []
debug = []
warn = []
info = []
error = []
# Benchmark features.
# New benchmarks should be added here!
example = [
"dep:cuprate-benchmark-example"
]
[dependencies] [dependencies]
cuprate-benchmark-lib = { path = "../lib" } cuprate-benchmark-lib = { path = "../lib" }
cuprate-benchmark-example = { path = "../example", optional = true } cuprate-benchmark-example = { path = "../example", optional = true }
cfg-if = { workspace = true } cfg-if = { workspace = true }
serde = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true, features = ["std"] } serde_json = { workspace = true, features = ["std"] }
tracing = { workspace = true, features = ["std", "attributes"] }
tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"] }
[dev-dependencies] [dev-dependencies]

View file

@ -14,6 +14,14 @@ Benchmarks are opt-in and enabled via features.
## Features ## Features
These are features that aren't for enabling benchmarks, but rather for other things. These are features that aren't for enabling benchmarks, but rather for other things.
Since `cuprate-benchmark` is built right before it is ran,
these features almost act like command line arguments.
| Features | Does what | | Features | Does what |
|----------|-----------| |----------|-----------|
| json | Prints JSON timings instead of a markdown table | json | Prints JSON timings instead of a markdown table
| trace | Use the `trace` log-level
| debug | Use the `debug` log-level
| warn | Use the `warn` log-level
| info | Use the `info` log-level (default)
| error | Use the `error` log-level

View file

@ -0,0 +1,29 @@
use cfg_if::cfg_if;
use tracing::{info, instrument, Level};
use tracing_subscriber::FmtSubscriber;
/// Initializes the `tracing` logger.
#[instrument]
pub(crate) fn init_logger() {
const LOG_LEVEL: Level = {
cfg_if! {
if #[cfg(feature = "trace")] {
Level::TRACE
} else if #[cfg(feature = "debug")] {
Level::DEBUG
} else if #[cfg(feature = "warn")] {
Level::WARN
} else if #[cfg(feature = "info")] {
Level::INFO
} else if #[cfg(feature = "error")] {
Level::ERROR
} else {
Level::INFO
}
}
};
FmtSubscriber::builder().with_max_level(LOG_LEVEL).init();
info!("Log level: {LOG_LEVEL}");
}

View file

@ -4,6 +4,7 @@
reason = "this crate imports many potentially unused dependencies" reason = "this crate imports many potentially unused dependencies"
)] )]
mod log;
mod print; mod print;
mod run; mod run;
mod timings; mod timings;
@ -20,11 +21,13 @@ use cfg_if::cfg_if;
/// 2. Change it to your benchmark's feature flag /// 2. Change it to your benchmark's feature flag
/// 3. Change it to your benchmark's type /// 3. Change it to your benchmark's type
fn main() { fn main() {
log::init_logger();
let mut timings = timings::Timings::new(); let mut timings = timings::Timings::new();
cfg_if! { cfg_if! {
if #[cfg(not(any(feature = "example")))] { if #[cfg(not(any(feature = "example")))] {
compile_error!("[cuprate_benchmark]: no feature specified. Use `--features $BENCHMARK_FEATURE` when building."); compile_error!("No feature specified. Use `--features $BENCHMARK_FEATURE` when building.");
} }
} }

View file

@ -1,9 +1,13 @@
#![expect(dead_code, reason = "code hidden behind feature flags")]
use cfg_if::cfg_if; use cfg_if::cfg_if;
use crate::timings::Timings; use crate::timings::Timings;
/// Print the final the final markdown table of benchmark timings. /// Print the final the final markdown table of benchmark timings.
pub(crate) fn print_timings(timings: &Timings) { pub(crate) fn print_timings(timings: &Timings) {
println!("\nFinished all benchmarks, printing results:");
cfg_if! { cfg_if! {
if #[cfg(feature = "json")] { if #[cfg(feature = "json")] {
print_timings_json(timings); print_timings_json(timings);
@ -28,7 +32,6 @@ pub(crate) fn print_timings_markdown(timings: &Timings) {
} }
/// Enabled via `json` feature. /// Enabled via `json` feature.
#[expect(dead_code)]
pub(crate) fn print_timings_json(timings: &Timings) { pub(crate) fn print_timings_json(timings: &Timings) {
let json = serde_json::to_string_pretty(timings).unwrap(); let json = serde_json::to_string_pretty(timings).unwrap();
println!("\n{json}"); println!("\n{json}");

View file

@ -1,20 +1,21 @@
use std::io::Write; use tracing::{info, instrument, trace};
use cuprate_benchmark_lib::Benchmark; use cuprate_benchmark_lib::Benchmark;
use crate::timings::Timings; use crate::timings::Timings;
/// Run a [`Benchmark`] and record its timing. /// Run a [`Benchmark`] and record its timing.
#[instrument(skip_all)]
pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) { pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) {
// Print the benchmark name. // Get the benchmark name.
let name = std::any::type_name::<B>(); let name = B::name();
print!("{name:>34} ... "); trace!("Running benchmark: {name}");
std::io::stdout().flush().unwrap();
// Setup the benchmark input. // Setup the benchmark input.
let input = B::SETUP(); let input = B::SETUP();
// Sleep before running the benchmark. // Sleep before running the benchmark.
trace!("Pre-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
std::thread::sleep(B::PRE_SLEEP_DURATION); std::thread::sleep(B::PRE_SLEEP_DURATION);
// Run/time the benchmark. // Run/time the benchmark.
@ -23,12 +24,13 @@ pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) {
let time = now.elapsed().as_secs_f32(); let time = now.elapsed().as_secs_f32();
// Print the benchmark timings. // Print the benchmark timings.
println!("{time}"); info!("{name:>34} ... {time}");
assert!( assert!(
timings.insert(name, time).is_none(), timings.insert(name, time).is_none(),
"[cuprate_benchmark]: there were 2 benchmarks with the same name - this collides the final output: {name}", "There were 2 benchmarks with the same name - this collides the final output: {name}",
); );
// Sleep for a cooldown period after the benchmark run. // Sleep for a cooldown period after the benchmark run.
trace!("Post-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
std::thread::sleep(B::POST_SLEEP_DURATION); std::thread::sleep(B::POST_SLEEP_DURATION);
} }

View file

@ -4,6 +4,14 @@ use std::time::Duration;
/// A benchmarking function and its inputs. /// A benchmarking function and its inputs.
pub trait Benchmark { pub trait Benchmark {
/// The benchmark's name.
///
/// This is automatically implemented
/// as the name of the [`Self`] type.
fn name() -> &'static str {
std::any::type_name::<Self>()
}
/// Input to the main benchmarking function. /// Input to the main benchmarking function.
/// ///
/// This is passed to [`Self::MAIN`]. /// This is passed to [`Self::MAIN`].
@ -22,12 +30,12 @@ pub trait Benchmark {
const MAIN: fn(Self::Input); const MAIN: fn(Self::Input);
/// `cuprate-benchmark` will sleep for this [`Duration`] after /// `cuprate-benchmark` will sleep for this [`Duration`] after
/// creating the [`Self::Input`], but before starting [`Self::Main`]. /// creating the [`Self::Input`], but before starting [`Self::MAIN`].
/// ///
/// 1 second by default. /// 1 second by default.
const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1); const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1);
/// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::Main`]. /// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::MAIN`].
/// ///
/// 1 second by default. /// 1 second by default.
const POST_SLEEP_DURATION: Duration = Duration::from_secs(1); const POST_SLEEP_DURATION: Duration = Duration::from_secs(1);

View file

@ -1,8 +1,10 @@
//! Benchmarks for `cuprate-json-rpc`. //! Benchmarks for `cuprate-json-rpc`.
//!
//! TODO: this crate is not finished.
#![allow(unused_crate_dependencies)] #![allow(unused_crate_dependencies)]
mod response; mod response;
criterion::criterion_main! { criterion::criterion_main! {
response::benches, response::serde,
} }

View file

@ -7,8 +7,11 @@ use serde_json::{from_str, to_string_pretty};
use cuprate_json_rpc::{Id, Response}; use cuprate_json_rpc::{Id, Response};
// `serde` benchmarks on `Response`.
criterion_group! { criterion_group! {
benches, name = serde;
config = Criterion::default();
targets =
response_from_str_u8, response_from_str_u8,
response_from_str_u64, response_from_str_u64,
response_from_str_string_5_len, response_from_str_string_5_len,
@ -22,7 +25,7 @@ criterion_group! {
response_to_string_pretty_string_100_len, response_to_string_pretty_string_100_len,
response_to_string_pretty_string_500_len, response_to_string_pretty_string_500_len,
} }
criterion_main!(benches); criterion_main!(serde);
/// Generate `from_str` deserialization benchmark functions for [`Response`]. /// Generate `from_str` deserialization benchmark functions for [`Response`].
macro_rules! impl_from_str_benchmark { macro_rules! impl_from_str_benchmark {

View file

@ -1,2 +1,2 @@
//! TODO //! TODO: this crate is not finished.
#![allow(unused_crate_dependencies, reason = "used in benchmarks")] #![allow(unused_crate_dependencies, reason = "used in benchmarks")]

View file

@ -6,5 +6,16 @@ although, it requires knowledge of how to use Criterion first:
2. Copy [`benches/criterion/example`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example) as base 2. Copy [`benches/criterion/example`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example) as base
3. Get started 3. Get started
## Naming
New benchmark crates using Criterion should:
- Be in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/)
- Be in the `cuprate-criterion-$CRATE_NAME` format
For a real example, see: For a real example, see:
[`cuprate-criterion-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc). [`cuprate-criterion-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc).
## Workspace
Finally, make sure to add the benchmark crate to the workspace
[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
Your benchmark is now ready to be ran.

View file

@ -1,6 +1,4 @@
# Criterion # Criterion
Each sub-directory in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) is a crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) for timing single functions and/or groups of functions. Each sub-directory in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) is a crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) for timing single functions and/or groups of functions.
They are generally be small in scope. They are generally be small in scope.
See [`benches/criterion/cuprate-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc) for an example.

View file

@ -4,8 +4,7 @@ New benchmarks are plugged into `cuprate-benchmark` by:
1. Registering the benchmark in the `cuprate_benchmark` binary 1. Registering the benchmark in the `cuprate_benchmark` binary
See [`benches/benchmark/example`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example) See [`benches/benchmark/example`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example)
for an example. For a real example, see: for an example.
[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database).
## Creating the benchmark crate ## Creating the benchmark crate
Before plugging into `cuprate-benchmark`, your actual benchmark crate must be created: Before plugging into `cuprate-benchmark`, your actual benchmark crate must be created:
@ -15,6 +14,13 @@ Before plugging into `cuprate-benchmark`, your actual benchmark crate must be cr
1. Create a benchmark 1. Create a benchmark
1. Implement `cuprate_benchmark_lib::Benchmark` 1. Implement `cuprate_benchmark_lib::Benchmark`
New benchmark crates using `cuprate-database` should:
- Be in [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/)
- Be in the `cuprate-benchmark-$CRATE_NAME` format
For a real example, see:
[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database).
## `cuprate_benchmark_lib::Benchmark` ## `cuprate_benchmark_lib::Benchmark`
This is the trait that standardizes all benchmarks ran under `cuprate-benchmark`. This is the trait that standardizes all benchmarks ran under `cuprate-benchmark`.
@ -28,6 +34,9 @@ in the binary that is actually ran: `cuprate-benchmark`.
If your benchmark is new, add a new crate feature to [`cuprate-benchmark`'s Cargo.toml file](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/Cargo.toml) with an optional dependency to your benchmarking crate. If your benchmark is new, add a new crate feature to [`cuprate-benchmark`'s Cargo.toml file](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/Cargo.toml) with an optional dependency to your benchmarking crate.
Please remember to edit the feature table in the
[`README.md`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/README.md) as well!
## Adding to `cuprate-benchmark`'s `main()` ## Adding to `cuprate-benchmark`'s `main()`
After adding your crate's feature, add a conditional line that run the benchmark After adding your crate's feature, add a conditional line that run the benchmark
if the feature is enabled to the `main()` function: if the feature is enabled to the `main()` function:
@ -39,4 +48,10 @@ cfg_if! {
run::run_benchmark::<cuprate_benchmark_egg::Benchmark>(&mut timings); run::run_benchmark::<cuprate_benchmark_egg::Benchmark>(&mut timings);
} }
} }
``` ```
## Workspace
Finally, make sure to add the benchmark crate to the workspace
[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
Your benchmark is now ready to be ran.

View file

@ -1,12 +1,37 @@
# cuprate-benchmark # cuprate-benchmark
Cuprate has 2 custom crates for macro benchmarking: Cuprate has 2 custom crates for general benchmarking:
- `cuprate-benchmark`; the actual binary crate ran - `cuprate-benchmark`; the actual binary crate ran
- `cuprate-benchmark-lib`; the library that other crates hook into - `cuprate-benchmark-lib`; the library that other crates hook into
The purpose of `cuprate-benchmark` is very simple: The abstract purpose of `cuprate-benchmark` is very simple:
1. Set-up the benchmark 1. Set-up the benchmark
1. Start timer 1. Start timer
1. Run benchmark 1. Run benchmark
1. Output data 1. Output data
`cuprate-benchmark` runs the benchmarks found in [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark). `cuprate-benchmark` runs the benchmarks found in [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark).
`cuprate-benchmark-lib` defines the `Benchmark` trait that all
benchmark crates implement to "plug-in" to the benchmarking harness.
## Diagram
A diagram displaying the relation between `cuprate-benchmark` and related crates.
```
┌─────────────────────┐
│ cuprate_benchmark │
│ (actual binary ran) │
└──────────┬──────────┘
┌──────────────────┴───────────────────┐
│ cuprate_benchmark_lib │
│ ┌───────────────────────────────────┐│
│ │ trait Benchmark ││
│ └───────────────────────────────────┘│
└──────────────────┬───────────────────┘
┌───────────────────────────┐ │ ┌───────────────────────────┐
│ cuprate_benchmark_example ├──┼───┤ cuprate_benchmark_* │
└───────────────────────────┘ │ └───────────────────────────┘
┌───────────────────────────┐ │ ┌───────────────────────────┐
│ cuprate_benchmark_* ├──┴───┤ cuprate_benchmark_* │
└───────────────────────────┘ └───────────────────────────┘
```

View file

@ -1,7 +1,7 @@
# Running # Running
`cuprate-benchmark` benchmarks are ran with this command: `cuprate-benchmark` benchmarks are ran with this command:
```bash ```bash
cargo run --release --package cuprate-benchmark --features $YOUR_BENCHMARK_CRATE_FEATURE cargo run --release --package cuprate-benchmark --features $BENCHMARK_CRATE_FEATURE
``` ```
For example, to run the example benchmark: For example, to run the example benchmark:

View file

@ -1,11 +1,11 @@
# Benchmarking # Benchmarking
Cuprate has 2 types of benchmarks: Cuprate has 2 types of benchmarks:
- Criterion benchmarks - [Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) benchmarks
- `cuprate-benchmark` benchmarks - `cuprate-benchmark` benchmarks
[Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope. Criterion is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope.
`cuprate-benchmark` and `cuprate-benchmark-lib` are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't suited for micro benchmarks. `cuprate-benchmark` and [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't well-suited for micro benchmarks.
## File layout and purpose ## File layout and purpose
All benchmarking related files are in the [`benches/`](https://github.com/Cuprate/cuprate/tree/main/benches) folder. All benchmarking related files are in the [`benches/`](https://github.com/Cuprate/cuprate/tree/main/benches) folder.
@ -14,9 +14,9 @@ This directory is organized like such:
| Directory | Purpose | | Directory | Purpose |
|-------------------------------|---------| |-------------------------------|---------|
| `benches/criterion/` | Criterion (micro) benchmarks | [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Criterion (micro) benchmarks
| `benches/criterion/cuprate-*` | Criterion benchmarks for the crate with the same name | `benches/criterion/cuprate-*` | Criterion benchmarks for the crate with the same name
| `benches/benchmark/` | Cuprate's custom benchmarking files | [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark) | Cuprate's custom benchmarking files
| `benches/benchmark/bin` | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks | [`benches/benchmark/bin`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks
| `benches/benchmark/lib` | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into | [`benches/benchmark/lib`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into
| `benches/benchmark/cuprate-*` | `cuprate-benchmark` benchmarks for the crate with the same name | `benches/benchmark/cuprate-*` | `cuprate-benchmark` benchmarks for the crate with the same name