Compare commits

..

No commits in common. "828cbbac27e55ae3e2976534a204fbf1ae83dd6e" and "185c2ee25a79414f7df6c841591d2ed9d84ab42e" have entirely different histories.

18 changed files with 44 additions and 194 deletions

38
Cargo.lock generated
View file

@ -641,8 +641,6 @@ dependencies = [
"cuprate-benchmark-lib",
"serde",
"serde_json",
"tracing",
"tracing-subscriber",
]
[[package]]
@ -1864,15 +1862,6 @@ version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "matchers"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
dependencies = [
"regex-automata 0.1.10",
]
[[package]]
name = "matchit"
version = "0.7.3"
@ -2373,7 +2362,7 @@ dependencies = [
"rand",
"rand_chacha",
"rand_xorshift",
"regex-syntax 0.8.4",
"regex-syntax",
"rusty-fork",
"tempfile",
"unarray",
@ -2547,17 +2536,8 @@ checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata 0.4.7",
"regex-syntax 0.8.4",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"regex-syntax 0.6.29",
"regex-automata",
"regex-syntax",
]
[[package]]
@ -2568,15 +2548,9 @@ checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax 0.8.4",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
version = "0.8.4"
@ -3283,14 +3257,10 @@ version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
dependencies = [
"matchers",
"nu-ansi-term",
"once_cell",
"regex",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log",
]

View file

@ -106,7 +106,7 @@ tempfile = { version = "3.13.0" }
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
pretty_assertions = { version = "1.4.1" }
proptest = { version = "1.5.0" }
proptest = { version = "1" }
proptest-derive = { version = "0.4.0" }
tokio-test = { version = "0.4.4" }

View file

@ -12,20 +12,9 @@ keywords = ["cuprate", "benchmarking", "binary"]
# All new benchmarks should be added here!
all = ["example"]
# Non-benchmark features.
default = []
json = []
trace = []
debug = []
warn = []
info = []
error = []
# Benchmark features.
# New benchmarks should be added here!
example = [
"dep:cuprate-benchmark-example"
]
example = ["dep:cuprate-benchmark-example"]
[dependencies]
cuprate-benchmark-lib = { path = "../lib" }
@ -34,8 +23,6 @@ cuprate-benchmark-example = { path = "../example", optional = true }
cfg-if = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true, features = ["std"] }
tracing = { workspace = true, features = ["std", "attributes"] }
tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"] }
[dev-dependencies]

View file

@ -14,14 +14,6 @@ Benchmarks are opt-in and enabled via features.
## Features
These are features that aren't for enabling benchmarks, but rather for other things.
Since `cuprate-benchmark` is built right before it is ran,
these features almost act like command line arguments.
| Features | Does what |
|----------|-----------|
| json | Prints JSON timings instead of a markdown table
| trace | Use the `trace` log-level
| debug | Use the `debug` log-level
| warn | Use the `warn` log-level
| info | Use the `info` log-level (default)
| error | Use the `error` log-level

View file

@ -1,29 +0,0 @@
use cfg_if::cfg_if;
use tracing::{info, instrument, Level};
use tracing_subscriber::FmtSubscriber;
/// Initializes the `tracing` logger.
#[instrument]
pub(crate) fn init_logger() {
const LOG_LEVEL: Level = {
cfg_if! {
if #[cfg(feature = "trace")] {
Level::TRACE
} else if #[cfg(feature = "debug")] {
Level::DEBUG
} else if #[cfg(feature = "warn")] {
Level::WARN
} else if #[cfg(feature = "info")] {
Level::INFO
} else if #[cfg(feature = "error")] {
Level::ERROR
} else {
Level::INFO
}
}
};
FmtSubscriber::builder().with_max_level(LOG_LEVEL).init();
info!("Log level: {LOG_LEVEL}");
}

View file

@ -4,7 +4,6 @@
reason = "this crate imports many potentially unused dependencies"
)]
mod log;
mod print;
mod run;
mod timings;
@ -21,13 +20,11 @@ use cfg_if::cfg_if;
/// 2. Change it to your benchmark's feature flag
/// 3. Change it to your benchmark's type
fn main() {
log::init_logger();
let mut timings = timings::Timings::new();
cfg_if! {
if #[cfg(not(any(feature = "example")))] {
compile_error!("No feature specified. Use `--features $BENCHMARK_FEATURE` when building.");
compile_error!("[cuprate_benchmark]: no feature specified. Use `--features $BENCHMARK_FEATURE` when building.");
}
}

View file

@ -1,13 +1,9 @@
#![expect(dead_code, reason = "code hidden behind feature flags")]
use cfg_if::cfg_if;
use crate::timings::Timings;
/// Print the final the final markdown table of benchmark timings.
pub(crate) fn print_timings(timings: &Timings) {
println!("\nFinished all benchmarks, printing results:");
cfg_if! {
if #[cfg(feature = "json")] {
print_timings_json(timings);
@ -32,6 +28,7 @@ pub(crate) fn print_timings_markdown(timings: &Timings) {
}
/// Enabled via `json` feature.
#[expect(dead_code)]
pub(crate) fn print_timings_json(timings: &Timings) {
let json = serde_json::to_string_pretty(timings).unwrap();
println!("\n{json}");

View file

@ -1,21 +1,20 @@
use tracing::{info, instrument, trace};
use std::io::Write;
use cuprate_benchmark_lib::Benchmark;
use crate::timings::Timings;
/// Run a [`Benchmark`] and record its timing.
#[instrument(skip_all)]
pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) {
// Get the benchmark name.
let name = B::name();
trace!("Running benchmark: {name}");
// Print the benchmark name.
let name = std::any::type_name::<B>();
print!("{name:>34} ... ");
std::io::stdout().flush().unwrap();
// Setup the benchmark input.
let input = B::SETUP();
// Sleep before running the benchmark.
trace!("Pre-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
std::thread::sleep(B::PRE_SLEEP_DURATION);
// Run/time the benchmark.
@ -24,13 +23,12 @@ pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) {
let time = now.elapsed().as_secs_f32();
// Print the benchmark timings.
info!("{name:>34} ... {time}");
println!("{time}");
assert!(
timings.insert(name, time).is_none(),
"There were 2 benchmarks with the same name - this collides the final output: {name}",
"[cuprate_benchmark]: there were 2 benchmarks with the same name - this collides the final output: {name}",
);
// Sleep for a cooldown period after the benchmark run.
trace!("Post-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
std::thread::sleep(B::POST_SLEEP_DURATION);
}

View file

@ -4,14 +4,6 @@ use std::time::Duration;
/// A benchmarking function and its inputs.
pub trait Benchmark {
/// The benchmark's name.
///
/// This is automatically implemented
/// as the name of the [`Self`] type.
fn name() -> &'static str {
std::any::type_name::<Self>()
}
/// Input to the main benchmarking function.
///
/// This is passed to [`Self::MAIN`].
@ -30,12 +22,12 @@ pub trait Benchmark {
const MAIN: fn(Self::Input);
/// `cuprate-benchmark` will sleep for this [`Duration`] after
/// creating the [`Self::Input`], but before starting [`Self::MAIN`].
/// creating the [`Self::Input`], but before starting [`Self::Main`].
///
/// 1 second by default.
const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1);
/// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::MAIN`].
/// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::Main`].
///
/// 1 second by default.
const POST_SLEEP_DURATION: Duration = Duration::from_secs(1);

View file

@ -1,10 +1,8 @@
//! Benchmarks for `cuprate-json-rpc`.
//!
//! TODO: this crate is not finished.
#![allow(unused_crate_dependencies)]
mod response;
criterion::criterion_main! {
response::serde,
response::benches,
}

View file

@ -7,11 +7,8 @@ use serde_json::{from_str, to_string_pretty};
use cuprate_json_rpc::{Id, Response};
// `serde` benchmarks on `Response`.
criterion_group! {
name = serde;
config = Criterion::default();
targets =
benches,
response_from_str_u8,
response_from_str_u64,
response_from_str_string_5_len,
@ -25,7 +22,7 @@ criterion_group! {
response_to_string_pretty_string_100_len,
response_to_string_pretty_string_500_len,
}
criterion_main!(serde);
criterion_main!(benches);
/// Generate `from_str` deserialization benchmark functions for [`Response`].
macro_rules! impl_from_str_benchmark {

View file

@ -1,2 +1,2 @@
//! TODO: this crate is not finished.
//! TODO
#![allow(unused_crate_dependencies, reason = "used in benchmarks")]

View file

@ -6,16 +6,5 @@ although, it requires knowledge of how to use Criterion first:
2. Copy [`benches/criterion/example`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example) as base
3. Get started
## Naming
New benchmark crates using Criterion should:
- Be in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/)
- Be in the `cuprate-criterion-$CRATE_NAME` format
For a real example, see:
[`cuprate-criterion-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc).
## Workspace
Finally, make sure to add the benchmark crate to the workspace
[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
Your benchmark is now ready to be ran.

View file

@ -2,3 +2,5 @@
Each sub-directory in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) is a crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) for timing single functions and/or groups of functions.
They are generally be small in scope.
See [`benches/criterion/cuprate-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc) for an example.

View file

@ -4,7 +4,8 @@ New benchmarks are plugged into `cuprate-benchmark` by:
1. Registering the benchmark in the `cuprate_benchmark` binary
See [`benches/benchmark/example`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example)
for an example.
for an example. For a real example, see:
[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database).
## Creating the benchmark crate
Before plugging into `cuprate-benchmark`, your actual benchmark crate must be created:
@ -14,13 +15,6 @@ Before plugging into `cuprate-benchmark`, your actual benchmark crate must be cr
1. Create a benchmark
1. Implement `cuprate_benchmark_lib::Benchmark`
New benchmark crates using `cuprate-database` should:
- Be in [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/)
- Be in the `cuprate-benchmark-$CRATE_NAME` format
For a real example, see:
[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database).
## `cuprate_benchmark_lib::Benchmark`
This is the trait that standardizes all benchmarks ran under `cuprate-benchmark`.
@ -34,9 +28,6 @@ in the binary that is actually ran: `cuprate-benchmark`.
If your benchmark is new, add a new crate feature to [`cuprate-benchmark`'s Cargo.toml file](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/Cargo.toml) with an optional dependency to your benchmarking crate.
Please remember to edit the feature table in the
[`README.md`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/README.md) as well!
## Adding to `cuprate-benchmark`'s `main()`
After adding your crate's feature, add a conditional line that run the benchmark
if the feature is enabled to the `main()` function:
@ -49,9 +40,3 @@ cfg_if! {
}
}
```
## Workspace
Finally, make sure to add the benchmark crate to the workspace
[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
Your benchmark is now ready to be ran.

View file

@ -1,37 +1,12 @@
# cuprate-benchmark
Cuprate has 2 custom crates for general benchmarking:
Cuprate has 2 custom crates for macro benchmarking:
- `cuprate-benchmark`; the actual binary crate ran
- `cuprate-benchmark-lib`; the library that other crates hook into
The abstract purpose of `cuprate-benchmark` is very simple:
The purpose of `cuprate-benchmark` is very simple:
1. Set-up the benchmark
1. Start timer
1. Run benchmark
1. Output data
`cuprate-benchmark` runs the benchmarks found in [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark).
`cuprate-benchmark-lib` defines the `Benchmark` trait that all
benchmark crates implement to "plug-in" to the benchmarking harness.
## Diagram
A diagram displaying the relation between `cuprate-benchmark` and related crates.
```
┌─────────────────────┐
│ cuprate_benchmark │
│ (actual binary ran) │
└──────────┬──────────┘
┌──────────────────┴───────────────────┐
│ cuprate_benchmark_lib │
│ ┌───────────────────────────────────┐│
│ │ trait Benchmark ││
│ └───────────────────────────────────┘│
└──────────────────┬───────────────────┘
┌───────────────────────────┐ │ ┌───────────────────────────┐
│ cuprate_benchmark_example ├──┼───┤ cuprate_benchmark_* │
└───────────────────────────┘ │ └───────────────────────────┘
┌───────────────────────────┐ │ ┌───────────────────────────┐
│ cuprate_benchmark_* ├──┴───┤ cuprate_benchmark_* │
└───────────────────────────┘ └───────────────────────────┘
```

View file

@ -1,7 +1,7 @@
# Running
`cuprate-benchmark` benchmarks are ran with this command:
```bash
cargo run --release --package cuprate-benchmark --features $BENCHMARK_CRATE_FEATURE
cargo run --release --package cuprate-benchmark --features $YOUR_BENCHMARK_CRATE_FEATURE
```
For example, to run the example benchmark:

View file

@ -1,11 +1,11 @@
# Benchmarking
Cuprate has 2 types of benchmarks:
- [Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) benchmarks
- Criterion benchmarks
- `cuprate-benchmark` benchmarks
Criterion is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope.
[Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope.
`cuprate-benchmark` and [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't well-suited for micro benchmarks.
`cuprate-benchmark` and `cuprate-benchmark-lib` are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't suited for micro benchmarks.
## File layout and purpose
All benchmarking related files are in the [`benches/`](https://github.com/Cuprate/cuprate/tree/main/benches) folder.
@ -14,9 +14,9 @@ This directory is organized like such:
| Directory | Purpose |
|-------------------------------|---------|
| [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Criterion (micro) benchmarks
| `benches/criterion/` | Criterion (micro) benchmarks
| `benches/criterion/cuprate-*` | Criterion benchmarks for the crate with the same name
| [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark) | Cuprate's custom benchmarking files
| [`benches/benchmark/bin`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks
| [`benches/benchmark/lib`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into
| `benches/benchmark/` | Cuprate's custom benchmarking files
| `benches/benchmark/bin` | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks
| `benches/benchmark/lib` | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into
| `benches/benchmark/cuprate-*` | `cuprate-benchmark` benchmarks for the crate with the same name