Merge branch 'main' into init
Some checks failed
Deny / audit (push) Has been cancelled

This commit is contained in:
Boog900 2024-12-04 00:02:56 +00:00
commit 3c751a6406
No known key found for this signature in database
GPG key ID: 42AB1287CB0041C2
113 changed files with 2533 additions and 759 deletions

342
Cargo.lock generated
View file

@ -29,6 +29,15 @@ dependencies = [
"zerocopy", "zerocopy",
] ]
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "android-tzdata" name = "android-tzdata"
version = "0.1.1" version = "0.1.1"
@ -44,6 +53,12 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]] [[package]]
name = "anstyle" name = "anstyle"
version = "1.0.10" version = "1.0.10"
@ -68,6 +83,16 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "assert-json-diff"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12"
dependencies = [
"serde",
"serde_json",
]
[[package]] [[package]]
name = "async-stream" name = "async-stream"
version = "0.3.6" version = "0.3.6"
@ -337,6 +362,12 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]] [[package]]
name = "cc" name = "cc"
version = "1.1.31" version = "1.1.31"
@ -370,6 +401,33 @@ dependencies = [
"windows-targets 0.52.6", "windows-targets 0.52.6",
] ]
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half",
]
[[package]] [[package]]
name = "clap" name = "clap"
version = "4.5.20" version = "4.5.20"
@ -469,6 +527,42 @@ dependencies = [
"cfg-if", "cfg-if",
] ]
[[package]]
name = "criterion"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"is-terminal",
"itertools",
"num-traits",
"once_cell",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools",
]
[[package]] [[package]]
name = "crossbeam" name = "crossbeam"
version = "0.8.4" version = "0.8.4"
@ -525,6 +619,12 @@ version = "0.8.20"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
[[package]]
name = "crunchy"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
[[package]] [[package]]
name = "crypto-bigint" name = "crypto-bigint"
version = "0.5.5" version = "0.5.5"
@ -574,6 +674,30 @@ dependencies = [
"tokio", "tokio",
] ]
[[package]]
name = "cuprate-benchmark"
version = "0.0.0"
dependencies = [
"cfg-if",
"cuprate-benchmark-example",
"cuprate-benchmark-lib",
"serde",
"serde_json",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "cuprate-benchmark-example"
version = "0.0.0"
dependencies = [
"cuprate-benchmark-lib",
]
[[package]]
name = "cuprate-benchmark-lib"
version = "0.0.0"
[[package]] [[package]]
name = "cuprate-blockchain" name = "cuprate-blockchain"
version = "0.0.0" version = "0.0.0"
@ -677,6 +801,25 @@ dependencies = [
name = "cuprate-constants" name = "cuprate-constants"
version = "0.1.0" version = "0.1.0"
[[package]]
name = "cuprate-criterion-example"
version = "0.0.0"
dependencies = [
"criterion",
"function_name",
"serde_json",
]
[[package]]
name = "cuprate-criterion-json-rpc"
version = "0.0.0"
dependencies = [
"criterion",
"cuprate-json-rpc",
"function_name",
"serde_json",
]
[[package]] [[package]]
name = "cuprate-cryptonight" name = "cuprate-cryptonight"
version = "0.1.0" version = "0.1.0"
@ -1015,6 +1158,17 @@ dependencies = [
"thiserror", "thiserror",
] ]
[[package]]
name = "cuprate-zmq-types"
version = "0.1.0"
dependencies = [
"assert-json-diff",
"cuprate-types",
"hex",
"serde",
"serde_json",
]
[[package]] [[package]]
name = "cuprated" name = "cuprated"
version = "0.0.1" version = "0.0.1"
@ -1296,6 +1450,21 @@ dependencies = [
"percent-encoding", "percent-encoding",
] ]
[[package]]
name = "function_name"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1ab577a896d09940b5fe12ec5ae71f9d8211fff62c919c03a3750a9901e98a7"
dependencies = [
"function_name-proc-macro",
]
[[package]]
name = "function_name-proc-macro"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "673464e1e314dd67a0fd9544abc99e8eb28d0c7e3b69b033bcff9b2d00b87333"
[[package]] [[package]]
name = "funty" name = "funty"
version = "2.0.0" version = "2.0.0"
@ -1445,6 +1614,16 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "half"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.14.5" version = "0.14.5"
@ -1672,6 +1851,26 @@ dependencies = [
"hashbrown 0.15.0", "hashbrown 0.15.0",
] ]
[[package]]
name = "is-terminal"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b"
dependencies = [
"hermit-abi",
"libc",
"windows-sys 0.52.0",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]] [[package]]
name = "itoa" name = "itoa"
version = "1.0.11" version = "1.0.11"
@ -1768,6 +1967,15 @@ version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "matchers"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
dependencies = [
"regex-automata 0.1.10",
]
[[package]] [[package]]
name = "matchit" name = "matchit"
version = "0.7.3" version = "0.7.3"
@ -2027,6 +2235,12 @@ version = "1.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
[[package]]
name = "oorandom"
version = "11.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]] [[package]]
name = "openssl-probe" name = "openssl-probe"
version = "0.1.5" version = "0.1.5"
@ -2164,6 +2378,34 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "plotters"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7"
[[package]]
name = "plotters-svg"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705"
dependencies = [
"plotters-backend",
]
[[package]] [[package]]
name = "powerfmt" name = "powerfmt"
version = "0.2.0" version = "0.2.0"
@ -2244,7 +2486,7 @@ dependencies = [
"rand", "rand",
"rand_chacha", "rand_chacha",
"rand_xorshift", "rand_xorshift",
"regex-syntax", "regex-syntax 0.8.5",
"rusty-fork", "rusty-fork",
"tempfile", "tempfile",
"unarray", "unarray",
@ -2410,6 +2652,44 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "regex"
version = "1.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata 0.4.7",
"regex-syntax 0.8.5",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"regex-syntax 0.6.29",
]
[[package]]
name = "regex-automata"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax 0.8.5",
]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.8.5" version = "0.8.5"
@ -2537,6 +2817,15 @@ version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]] [[package]]
name = "schannel" name = "schannel"
version = "0.1.26" version = "0.1.26"
@ -2912,9 +3201,9 @@ checksum = "a693d0c8cf16973fac5a93fbe47b8c6452e7097d4fcac49f3d7a18e39c76e62e"
[[package]] [[package]]
name = "time" name = "time"
version = "0.3.36" version = "0.3.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
dependencies = [ dependencies = [
"deranged", "deranged",
"itoa", "itoa",
@ -2933,14 +3222,24 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]] [[package]]
name = "time-macros" name = "time-macros"
version = "0.2.18" version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
dependencies = [ dependencies = [
"num-conv", "num-conv",
"time-core", "time-core",
] ]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]] [[package]]
name = "tinyvec" name = "tinyvec"
version = "1.8.0" version = "1.8.0"
@ -3188,10 +3487,14 @@ version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
dependencies = [ dependencies = [
"matchers",
"nu-ansi-term", "nu-ansi-term",
"once_cell",
"regex",
"sharded-slab", "sharded-slab",
"smallvec", "smallvec",
"thread_local", "thread_local",
"tracing",
"tracing-core", "tracing-core",
"tracing-log", "tracing-log",
] ]
@ -3297,6 +3600,16 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]] [[package]]
name = "want" name = "want"
version = "0.3.1" version = "0.3.1"
@ -3367,6 +3680,16 @@ version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
[[package]]
name = "web-sys"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]] [[package]]
name = "webpki-roots" name = "webpki-roots"
version = "0.26.6" version = "0.26.6"
@ -3392,6 +3715,15 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b"
dependencies = [
"windows-sys 0.52.0",
]
[[package]] [[package]]
name = "winapi-x86_64-pc-windows-gnu" name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0" version = "0.4.0"

View file

@ -1,35 +1,57 @@
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
# Binaries
"binaries/cuprated", "binaries/cuprated",
"constants",
# Benchmarks
"benches/benchmark/bin",
"benches/benchmark/lib",
"benches/benchmark/example",
"benches/criterion/example",
"benches/criterion/cuprate-json-rpc",
# Consensus
"consensus", "consensus",
"consensus/context", "consensus/context",
"consensus/fast-sync", "consensus/fast-sync",
"consensus/rules", "consensus/rules",
"cryptonight",
"helper", # Net
"net/epee-encoding", "net/epee-encoding",
"net/fixed-bytes", "net/fixed-bytes",
"net/levin", "net/levin",
"net/wire", "net/wire",
# P2P
"p2p/p2p", "p2p/p2p",
"p2p/p2p-core", "p2p/p2p-core",
"p2p/bucket", "p2p/bucket",
"p2p/dandelion-tower", "p2p/dandelion-tower",
"p2p/async-buffer", "p2p/async-buffer",
"p2p/address-book", "p2p/address-book",
# Storage
"storage/blockchain", "storage/blockchain",
"storage/service", "storage/service",
"storage/txpool", "storage/txpool",
"storage/database", "storage/database",
"pruning",
"test-utils", # RPC
"types",
"rpc/json-rpc", "rpc/json-rpc",
"rpc/types", "rpc/types",
"rpc/interface", "rpc/interface",
# ZMQ
"zmq/types",
# Misc
"constants",
"cryptonight",
"helper",
"pruning",
"test-utils",
"types",
] ]
[profile.release] [profile.release]
@ -54,6 +76,8 @@ opt-level = 3
[workspace.dependencies] [workspace.dependencies]
# Cuprate members # Cuprate members
cuprate-benchmark-lib = { path = "benches/benchmark/lib", default-features = false }
cuprate-benchmark-example = { path = "benches/benchmark/example", default-features = false }
cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false } cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false }
cuprate-consensus-rules = { path = "consensus/rules", default-features = false } cuprate-consensus-rules = { path = "consensus/rules", default-features = false }
cuprate-constants = { path = "constants", default-features = false } cuprate-constants = { path = "constants", default-features = false }
@ -81,6 +105,7 @@ cuprate-types = { path = "types", default-features =
cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false }
cuprate-rpc-types = { path = "rpc/types", default-features = false } cuprate-rpc-types = { path = "rpc/types", default-features = false }
cuprate-rpc-interface = { path = "rpc/interface", default-features = false } cuprate-rpc-interface = { path = "rpc/interface", default-features = false }
cuprate-zmq-types = { path = "zmq/types", default-features = false }
# External dependencies # External dependencies
anyhow = { version = "1", default-features = false } anyhow = { version = "1", default-features = false }
@ -127,6 +152,8 @@ tracing-subscriber = { version = "0.3", default-features = false }
tracing = { version = "0.1", default-features = false } tracing = { version = "0.1", default-features = false }
## workspace.dev-dependencies ## workspace.dev-dependencies
criterion = { version = "0.5" }
function_name = { version = "0.3" }
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6" } monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6" }
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6" } monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6" }
tempfile = { version = "3" } tempfile = { version = "3" }
@ -256,6 +283,9 @@ rest_pat_in_fully_bound_structs = "deny"
redundant_type_annotations = "deny" redundant_type_annotations = "deny"
infinite_loop = "deny" infinite_loop = "deny"
zero_repeat_side_effects = "deny" zero_repeat_side_effects = "deny"
non_zero_suggestions = "deny"
manual_is_power_of_two = "deny"
used_underscore_items = "deny"
# Warm # Warm
cast_possible_truncation = "deny" cast_possible_truncation = "deny"

View file

@ -1 +1,5 @@
# TODO # Benches
This directory contains Cuprate's benchmarks and benchmarking utilities.
See the [`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html)
to see how to create and run these benchmarks.

View file

@ -0,0 +1,43 @@
[package]
name = "cuprate-benchmark"
version = "0.0.0"
edition = "2021"
description = "Cuprate's benchmarking binary"
license = "MIT"
authors = ["hinto-janai"]
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin"
keywords = ["cuprate", "benchmarking", "binary"]
[features]
# All new benchmarks should be added here!
all = ["example"]
# Non-benchmark features.
default = []
json = []
trace = []
debug = []
warn = []
info = []
error = []
# Benchmark features.
# New benchmarks should be added here!
example = [
"dep:cuprate-benchmark-example"
]
[dependencies]
cuprate-benchmark-lib = { workspace = true }
cuprate-benchmark-example = { workspace = true, optional = true }
cfg-if = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true, features = ["std"] }
tracing = { workspace = true, features = ["std", "attributes"] }
tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"] }
[dev-dependencies]
[lints]
workspace = true

View file

@ -0,0 +1,27 @@
## `cuprate-benchmark`
This crate links all benchmarks together into a single binary that can be run as: `cuprate-benchmark`.
`cuprate-benchmark` will run all enabled benchmarks sequentially and print data at the end.
## Benchmarks
Benchmarks are opt-in and enabled via features.
| Feature | Enables which benchmark crate? |
|----------|--------------------------------|
| example | cuprate-benchmark-example |
| database | cuprate-benchmark-database |
## Features
These are features that aren't for enabling benchmarks, but rather for other things.
Since `cuprate-benchmark` is built right before it is ran,
these features almost act like command line arguments.
| Features | Does what |
|----------|-----------|
| json | Prints JSON timings instead of a markdown table
| trace | Use the `trace` log-level
| debug | Use the `debug` log-level
| warn | Use the `warn` log-level
| info | Use the `info` log-level (default)
| error | Use the `error` log-level

View file

@ -0,0 +1,29 @@
use cfg_if::cfg_if;
use tracing::{info, instrument, Level};
use tracing_subscriber::FmtSubscriber;
/// Initializes the `tracing` logger.
#[instrument]
pub(crate) fn init_logger() {
const LOG_LEVEL: Level = {
cfg_if! {
if #[cfg(feature = "trace")] {
Level::TRACE
} else if #[cfg(feature = "debug")] {
Level::DEBUG
} else if #[cfg(feature = "warn")] {
Level::WARN
} else if #[cfg(feature = "info")] {
Level::INFO
} else if #[cfg(feature = "error")] {
Level::ERROR
} else {
Level::INFO
}
}
};
FmtSubscriber::builder().with_max_level(LOG_LEVEL).init();
info!("Log level: {LOG_LEVEL}");
}

View file

@ -0,0 +1,49 @@
#![doc = include_str!("../README.md")]
#![allow(
unused_crate_dependencies,
reason = "this crate imports many potentially unused dependencies"
)]
mod log;
mod print;
mod run;
mod timings;
use cfg_if::cfg_if;
/// What `main()` does:
/// 1. Run all enabled benchmarks
/// 2. Record benchmark timings
/// 3. Print timing data
///
/// To add a new benchmark to be ran here:
/// 1. Copy + paste a `cfg_if` block
/// 2. Change it to your benchmark's feature flag
/// 3. Change it to your benchmark's type
#[allow(
clippy::allow_attributes,
unused_variables,
unused_mut,
unreachable_code,
reason = "clippy does not account for all cfg()s"
)]
fn main() {
log::init_logger();
let mut timings = timings::Timings::new();
cfg_if! {
if #[cfg(not(any(feature = "example")))] {
println!("No feature specified. Use `--features $BENCHMARK_FEATURE` when building.");
return;
}
}
cfg_if! {
if #[cfg(feature = "example")] {
run::run_benchmark::<cuprate_benchmark_example::Example>(&mut timings);
}
}
print::print_timings(&timings);
}

View file

@ -0,0 +1,38 @@
#![expect(dead_code, reason = "code hidden behind feature flags")]
use cfg_if::cfg_if;
use crate::timings::Timings;
/// Print the final the final markdown table of benchmark timings.
pub(crate) fn print_timings(timings: &Timings) {
println!("\nFinished all benchmarks, printing results:");
cfg_if! {
if #[cfg(feature = "json")] {
print_timings_json(timings);
} else {
print_timings_markdown(timings);
}
}
}
/// Default timing formatting.
pub(crate) fn print_timings_markdown(timings: &Timings) {
let mut s = String::new();
s.push_str("| Benchmark | Time (seconds) |\n");
s.push_str("|------------------------------------|----------------|");
#[expect(clippy::iter_over_hash_type)]
for (k, v) in timings {
s += &format!("\n| {k:<34} | {v:<14} |");
}
println!("\n{s}");
}
/// Enabled via `json` feature.
pub(crate) fn print_timings_json(timings: &Timings) {
let json = serde_json::to_string_pretty(timings).unwrap();
println!("\n{json}");
}

View file

@ -0,0 +1,36 @@
use tracing::{info, instrument, trace};
use cuprate_benchmark_lib::Benchmark;
use crate::timings::Timings;
/// Run a [`Benchmark`] and record its timing.
#[instrument(skip_all)]
pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) {
// Get the benchmark name.
let name = B::name();
trace!("Running benchmark: {name}");
// Setup the benchmark input.
let input = B::SETUP();
// Sleep before running the benchmark.
trace!("Pre-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
std::thread::sleep(B::PRE_SLEEP_DURATION);
// Run/time the benchmark.
let now = std::time::Instant::now();
B::MAIN(input);
let time = now.elapsed().as_secs_f32();
// Print the benchmark timings.
info!("{name:>34} ... {time}");
assert!(
timings.insert(name, time).is_none(),
"There were 2 benchmarks with the same name - this collides the final output: {name}",
);
// Sleep for a cooldown period after the benchmark run.
trace!("Post-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
std::thread::sleep(B::POST_SLEEP_DURATION);
}

View file

@ -0,0 +1,5 @@
/// Benchmark timing data.
///
/// - Key = benchmark name
/// - Value = benchmark time in seconds
pub(crate) type Timings = std::collections::HashMap<&'static str, f32>;

View file

@ -0,0 +1,17 @@
[package]
name = "cuprate-benchmark-example"
version = "0.0.0"
edition = "2021"
description = "Example showcasing Cuprate's benchmarking harness"
license = "MIT"
authors = ["hinto-janai"]
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example"
keywords = ["cuprate", "benchmarking", "example"]
[dependencies]
cuprate-benchmark-lib = { path = "../lib" }
[dev-dependencies]
[lints]
workspace = true

View file

@ -0,0 +1,3 @@
## `cuprate-benchmark-example`
This crate contains a short example benchmark that shows how to implement and use
`cuprate-benchmark-lib` so that it can be ran by `cuprate-benchmark`.

View file

@ -0,0 +1,42 @@
#![doc = include_str!("../README.md")]
use std::hint::black_box;
use cuprate_benchmark_lib::Benchmark;
/// Marker struct that implements [`Benchmark`]
pub struct Example;
/// The input to our benchmark function.
pub type ExampleBenchmarkInput = u64;
/// The setup function that creates the input.
pub const fn example_benchmark_setup() -> ExampleBenchmarkInput {
1
}
/// The main benchmarking function.
#[expect(clippy::unit_arg)]
pub fn example_benchmark_main(input: ExampleBenchmarkInput) {
// In this case, we're simply benchmarking the
// performance of simple arithmetic on the input data.
fn math(input: ExampleBenchmarkInput, number: u64) {
let x = input;
let x = black_box(x * number);
let x = black_box(x / number);
let x = black_box(x + number);
let _ = black_box(x - number);
}
for number in 1..100_000_000 {
black_box(math(input, number));
}
}
// This implementation will be run by `cuprate-benchmark`.
impl Benchmark for Example {
type Input = ExampleBenchmarkInput;
const SETUP: fn() -> Self::Input = example_benchmark_setup;
const MAIN: fn(Self::Input) = example_benchmark_main;
}

View file

@ -0,0 +1,18 @@
[package]
name = "cuprate-benchmark-lib"
version = "0.0.0"
edition = "2021"
description = "Cuprate's benchmarking library"
license = "MIT"
authors = ["hinto-janai"]
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib"
keywords = ["cuprate", "benchmarking", "library"]
[features]
[dependencies]
[dev-dependencies]
[lints]
workspace = true

View file

@ -0,0 +1,15 @@
## `cuprate-benchmark-lib`
This crate is the glue between
[`cuprate-benchmark`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/bin)
and all the benchmark crates.
It defines the [`crate::Benchmark`] trait, which is the behavior of all benchmarks.
See the [`cuprate-benchmark-example`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/example)
crate to see an example implementation of this trait.
After implementing this trait, a few steps must
be done such that the `cuprate-benchmark` binary
can actually run your benchmark crate; see the
[`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html)
to see how to do this.

View file

@ -0,0 +1,45 @@
//! Benchmarking trait.
use std::time::Duration;
/// A benchmarking function and its inputs.
pub trait Benchmark {
/// The benchmark's name.
///
/// This is automatically implemented
/// as the name of the [`Self`] type.
//
// FIXME: use `const` instead of `fn` when stable
// <https://github.com/rust-lang/rust/issues/63084>
fn name() -> &'static str {
std::any::type_name::<Self>()
}
/// Input to the main benchmarking function.
///
/// This is passed to [`Self::MAIN`].
type Input;
/// Setup function to generate the input.
///
/// This function is not timed.
const SETUP: fn() -> Self::Input;
/// The main function to benchmark.
///
/// The start of the timer begins right before
/// this function is called and ends after the
/// function returns.
const MAIN: fn(Self::Input);
/// `cuprate-benchmark` will sleep for this [`Duration`] after
/// creating the [`Self::Input`], but before starting [`Self::MAIN`].
///
/// 1 second by default.
const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1);
/// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::MAIN`].
///
/// 1 second by default.
const POST_SLEEP_DURATION: Duration = Duration::from_secs(1);
}

View file

@ -0,0 +1,5 @@
#![doc = include_str!("../README.md")]
mod benchmark;
pub use benchmark::Benchmark;

View file

@ -0,0 +1,23 @@
[package]
name = "cuprate-criterion-json-rpc"
version = "0.0.0"
edition = "2021"
description = "Criterion benchmarking for cuprate-json-rpc"
license = "MIT"
authors = ["hinto-janai"]
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc"
keywords = ["cuprate", "json-rpc", "criterion", "benchmark"]
[dependencies]
cuprate-json-rpc = { workspace = true }
criterion = { workspace = true }
function_name = { workspace = true }
serde_json = { workspace = true, features = ["default"] }
[[bench]]
name = "main"
harness = false
[lints]
workspace = true

View file

@ -0,0 +1,8 @@
//! Benchmarks for `cuprate-json-rpc`.
#![allow(unused_crate_dependencies)]
mod response;
criterion::criterion_main! {
response::serde,
}

View file

@ -0,0 +1,110 @@
//! Benchmarks for [`Response`].
#![allow(unused_attributes, unused_crate_dependencies)]
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use function_name::named;
use serde_json::{from_str, to_string_pretty};
use cuprate_json_rpc::{Id, Response};
// `serde` benchmarks on `Response`.
//
// These are benchmarked as `Response` has a custom serde implementation.
criterion_group! {
name = serde;
config = Criterion::default();
targets =
response_from_str_u8,
response_from_str_u64,
response_from_str_string_5_len,
response_from_str_string_10_len,
response_from_str_string_100_len,
response_from_str_string_500_len,
response_to_string_pretty_u8,
response_to_string_pretty_u64,
response_to_string_pretty_string_5_len,
response_to_string_pretty_string_10_len,
response_to_string_pretty_string_100_len,
response_to_string_pretty_string_500_len,
response_from_str_bad_field_1,
response_from_str_bad_field_5,
response_from_str_bad_field_10,
response_from_str_bad_field_100,
response_from_str_missing_field,
}
criterion_main!(serde);
/// Generate `from_str` deserialization benchmark functions for [`Response`].
macro_rules! impl_from_str_benchmark {
(
$(
$fn_name:ident => $request_type:ty => $request_string:literal,
)*
) => {
$(
#[named]
fn $fn_name(c: &mut Criterion) {
let request_string = $request_string;
c.bench_function(function_name!(), |b| {
b.iter(|| {
let _r = from_str::<Response<$request_type>>(
black_box(request_string)
);
});
});
}
)*
};
}
impl_from_str_benchmark! {
response_from_str_u8 => u8 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#,
response_from_str_u64 => u64 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#,
response_from_str_string_5_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hello"}"#,
response_from_str_string_10_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hellohello"}"#,
response_from_str_string_100_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#,
response_from_str_string_500_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#,
// The custom serde currently looks at all fields.
// These are for testing the performance if the serde
// has to parse through a bunch of unrelated fields.
response_from_str_bad_field_1 => u8 => r#"{"bad_field":0,"jsonrpc":"2.0","id":123,"result":0}"#,
response_from_str_bad_field_5 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"jsonrpc":"2.0","id":123,"result":0}"#,
response_from_str_bad_field_10 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"bad_field_6":0,"bad_field_7":0,"bad_field_8":0,"bad_field_9":0,"bad_field_10":0,"jsonrpc":"2.0","id":123,"result":0}"#,
response_from_str_bad_field_100 => u8 => r#"{"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0,"81":0,"82":0,"83":0,"84":0,"85":0,"86":0,"87":0,"88":0,"89":0,"90":0,"91":0,"92":0,"93":0,"94":0,"95":0,"96":0,"97":0,"98":0,"99":0,"100":0,"jsonrpc":"2.0","id":123,"result":0}"#,
// These are missing the `jsonrpc` field.
response_from_str_missing_field => u8 => r#"{"id":123,"result":0}"#,
}
/// Generate `to_string_pretty` serialization benchmark functions for [`Response`].
macro_rules! impl_to_string_pretty_benchmark {
(
$(
$fn_name:ident => $request_constructor:expr_2021,
)*
) => {
$(
#[named]
fn $fn_name(c: &mut Criterion) {
let request = $request_constructor;
c.bench_function(function_name!(), |b| {
b.iter(|| {
let _s = to_string_pretty(black_box(&request)).unwrap();
});
});
}
)*
};
}
impl_to_string_pretty_benchmark! {
response_to_string_pretty_u8 => Response::<u8>::ok(Id::Null, 0),
response_to_string_pretty_u64 => Response::<u64>::ok(Id::Null, 0),
response_to_string_pretty_string_5_len => Response::ok(Id::Null, String::from("hello")),
response_to_string_pretty_string_10_len => Response::ok(Id::Null, String::from("hellohello")),
response_to_string_pretty_string_100_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")),
response_to_string_pretty_string_500_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")),
}

View file

@ -0,0 +1,2 @@
//! Benchmark lib for `cuprate-json-rpc`.
#![allow(unused_crate_dependencies, reason = "used in benchmarks")]

View file

@ -0,0 +1,21 @@
[package]
name = "cuprate-criterion-example"
version = "0.0.0"
edition = "2021"
description = "Criterion benchmarking example for Cuprate"
license = "MIT"
authors = ["hinto-janai"]
repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example"
keywords = ["cuprate", "criterion", "benchmark", "example"]
[dependencies]
criterion = { workspace = true }
function_name = { workspace = true }
serde_json = { workspace = true, features = ["default"] }
[[bench]]
name = "main"
harness = false
[lints]
workspace = true

View file

@ -0,0 +1,14 @@
## `cuprate-criterion-example`
An example of using Criterion for benchmarking Cuprate crates.
Consider copy+pasting this crate to use as a base when creating new Criterion benchmark crates.
## `src/`
Benchmark crates have a `benches/` ran by `cargo bench`, but they are also crates themselves,
as in, they have a `src` folder that `benches/` can pull code from.
The `src` directories in these benchmarking crates are usually filled with
helper functions, types, etc, that are used repeatedly in the benchmarks.
## `benches/`
These are the actual benchmarks ran by `cargo bench`.

View file

@ -0,0 +1,48 @@
//! Benchmarks.
#![allow(unused_attributes, unused_crate_dependencies)]
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use function_name::named;
use cuprate_criterion_example::SomeHardToCreateObject;
// This is how you register criterion benchmarks.
criterion_group! {
name = benches;
config = Criterion::default();
targets = benchmark_1, benchmark_range,
}
criterion_main!(benches);
/// Benchmark a single input.
///
/// <https://bheisler.github.io/criterion.rs/book/user_guide/benchmarking_with_inputs.html#benchmarking-with-one-input>
#[named]
fn benchmark_1(c: &mut Criterion) {
// It is recommended to use `function_name!()` as a benchmark
// identifier instead of manually re-typing the function name.
c.bench_function(function_name!(), |b| {
b.iter(|| {
black_box(SomeHardToCreateObject::from(1));
});
});
}
/// Benchmark a range of inputs.
///
/// <https://bheisler.github.io/criterion.rs/book/user_guide/benchmarking_with_inputs.html#benchmarking-with-a-range-of-values>
#[named]
fn benchmark_range(c: &mut Criterion) {
let mut group = c.benchmark_group(function_name!());
for i in 0..4 {
group.throughput(Throughput::Elements(i));
group.bench_with_input(BenchmarkId::from_parameter(i), &i, |b, &i| {
b.iter(|| {
black_box(SomeHardToCreateObject::from(i));
});
});
}
group.finish();
}

View file

@ -0,0 +1,10 @@
//! Benchmarks examples.
#![allow(unused_crate_dependencies)]
// All modules within `benches/` are `mod`ed here.
mod example;
// And all the Criterion benchmarks are registered like so:
criterion::criterion_main! {
example::benches,
}

View file

@ -0,0 +1,13 @@
#![doc = include_str!("../README.md")] // See the README for crate documentation.
#![allow(unused_crate_dependencies, reason = "used in benchmarks")]
/// Shared type that all benchmarks can use.
#[expect(dead_code)]
pub struct SomeHardToCreateObject(u64);
impl From<u64> for SomeHardToCreateObject {
/// Shared function that all benchmarks can use.
fn from(value: u64) -> Self {
Self(value)
}
}

View file

@ -0,0 +1,67 @@
# ____ _
# / ___| _ _ __ _ __ __ _| |_ ___
# | | | | | | '_ \| '__/ _` | __/ _ \
# | |__| |_| | |_) | | | (_| | || __/
# \____\__,_| .__/|_| \__,_|\__\___|
# |_|
#
## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet".
network = "Mainnet"
## Tracing config.
[tracing]
## The minimum level for log events to be displayed.
level = "info"
## Clear-net config.
[p2p.clear_net]
## The number of outbound connections we should make and maintain.
outbound_connections = 64
## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing.
extra_outbound_connections = 8
## The maximum number of incoming we should allow.
max_inbound_connections = 128
## The percent of outbound connections that should be to nodes we have not connected to before.
gray_peers_percent = 0.7
## The port to accept connections on, if left `0` no connections will be accepted.
p2p_port = 0
## The IP address to listen to connections on.
listen_on = "0.0.0.0"
## The Clear-net addressbook config.
[p2p.clear_net.address_book_config]
## The size of the white peer list, which contains peers we have made a connection to before.
max_white_list_length = 1_000
## The size of the gray peer list, which contains peers we have not made a connection to before.
max_gray_list_length = 5_000
## The amount of time between address book saves.
peer_save_period = { secs = 90, nanos = 0 }
## The block downloader config.
[p2p.block_downloader]
## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes).
buffer_bytes = 50_000_000
## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes).
in_progress_queue_bytes = 50_000_000
## The target size of a batch of blocks (bytes), must not exceed 100MB.
target_batch_bytes= 5_000_000
## The amount of time between checking the pool of connected peers for free peers to download blocks.
check_client_pool_interval = { secs = 30, nanos = 0 }
## Storage config
[storage]
## The amount of reader threads to spawn.
reader_threads = "OnePerThread"
## Txpool storage config.
[storage.txpool]
## The database sync mode for the txpool.
sync_mode = "Async"
## The maximum size of all the txs in the pool (bytes).
max_txpool_byte_size = 100_000_000
## Blockchain storage config.
[storage.blockchain]
## The database sync mode for the blockchain.
sync_mode = "Async"

View file

@ -18,7 +18,6 @@ use cuprate_p2p::block_downloader::BlockDownloaderConfig;
use cuprate_p2p_core::{ClearNet, ClearNetServerCfg}; use cuprate_p2p_core::{ClearNet, ClearNetServerCfg};
mod args; mod args;
mod default;
mod fs; mod fs;
mod p2p; mod p2p;
mod storage; mod storage;
@ -32,13 +31,14 @@ use tracing_config::TracingConfig;
/// Reads the args & config file, returning a [`Config`]. /// Reads the args & config file, returning a [`Config`].
pub fn read_config_and_args() -> Config { pub fn read_config_and_args() -> Config {
let args = args::Args::parse(); let args = args::Args::parse();
args.do_quick_requests();
let config: Config = if let Some(config_file) = &args.config_file { let config: Config = if let Some(config_file) = &args.config_file {
// If a config file was set in the args try to read it and exit if we can't. // If a config file was set in the args try to read it and exit if we can't.
match Config::read_from_path(config_file) { match Config::read_from_path(config_file) {
Ok(config) => config, Ok(config) => config,
Err(e) => { Err(e) => {
tracing::error!("Failed to read config from file: {e}"); eprintln!("Failed to read config from file: {e}");
std::process::exit(1); std::process::exit(1);
} }
} }
@ -56,7 +56,7 @@ pub fn read_config_and_args() -> Config {
}) })
.inspect_err(|e| { .inspect_err(|e| {
tracing::debug!("Failed to read config from config dir: {e}"); tracing::debug!("Failed to read config from config dir: {e}");
println!("Failed to find/read config file, using default config."); eprintln!("Failed to find/read config file, using default config.");
}) })
.unwrap_or_default() .unwrap_or_default()
}; };
@ -93,10 +93,10 @@ impl Config {
let file_text = read_to_string(file.as_ref())?; let file_text = read_to_string(file.as_ref())?;
Ok(toml::from_str(&file_text) Ok(toml::from_str(&file_text)
.inspect(|_| println!("Using config at: {}", file.as_ref().to_string_lossy())) .inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy()))
.inspect_err(|e| { .inspect_err(|e| {
println!("{e}"); eprintln!("{e}");
println!( eprintln!(
"Failed to parse config file at: {}", "Failed to parse config file at: {}",
file.as_ref().to_string_lossy() file.as_ref().to_string_lossy()
); );

View file

@ -1,10 +1,10 @@
use std::{io::Write, path::PathBuf}; use std::{io::Write, path::PathBuf, process::exit};
use clap::builder::TypedValueParser; use clap::builder::TypedValueParser;
use cuprate_helper::network::Network; use cuprate_helper::network::Network;
use crate::config::{default::create_default_config_file, Config}; use crate::{config::Config, constants::EXAMPLE_CONFIG};
/// Cuprate Args. /// Cuprate Args.
#[derive(clap::Parser, Debug)] #[derive(clap::Parser, Debug)]
@ -24,21 +24,26 @@ pub struct Args {
/// The PATH of the `cuprated` config file. /// The PATH of the `cuprated` config file.
#[arg(long)] #[arg(long)]
pub config_file: Option<PathBuf>, pub config_file: Option<PathBuf>,
/// Generate a config file and place it in the given PATH. /// Generate a config file and print it to stdout.
#[arg(long)] #[arg(long)]
pub generate_config: Option<PathBuf>, pub generate_config: bool,
} }
impl Args { impl Args {
/// Complete any quick requests asked for in [`Args`].
///
/// May cause the process to [`exit`].
pub fn do_quick_requests(&self) {
if self.generate_config {
println!("{EXAMPLE_CONFIG}");
exit(0);
}
}
/// Apply the [`Args`] to the given [`Config`]. /// Apply the [`Args`] to the given [`Config`].
/// ///
/// This may exit the program if a config value was set that requires an early exit. /// This may exit the program if a config value was set that requires an early exit.
pub fn apply_args(&self, mut config: Config) -> Config { pub const fn apply_args(&self, mut config: Config) -> Config {
if let Some(config_folder) = self.generate_config.as_ref() {
// This will create the config file and exit.
create_default_config_file(config_folder)
};
config.network = self.network; config.network = self.network;
if let Some(outbound_connections) = self.outbound_connections { if let Some(outbound_connections) = self.outbound_connections {

View file

@ -6,7 +6,7 @@ use std::{
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use cuprate_helper::{fs::addressbook_path, network::Network}; use cuprate_helper::{fs::address_book_path, network::Network};
/// P2P config. /// P2P config.
#[derive(Default, Deserialize, Serialize)] #[derive(Default, Deserialize, Serialize)]
@ -23,22 +23,22 @@ pub struct P2PConfig {
pub struct BlockDownloaderConfig { pub struct BlockDownloaderConfig {
/// The size in bytes of the buffer between the block downloader and the place which /// The size in bytes of the buffer between the block downloader and the place which
/// is consuming the downloaded blocks. /// is consuming the downloaded blocks.
pub buffer_size: usize, pub buffer_bytes: usize,
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks. /// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
pub in_progress_queue_size: usize, pub in_progress_queue_bytes: usize,
/// The [`Duration`] between checking the client pool for free peers. /// The [`Duration`] between checking the client pool for free peers.
pub check_client_pool_interval: Duration, pub check_client_pool_interval: Duration,
/// The target size of a single batch of blocks (in bytes). /// The target size of a single batch of blocks (in bytes).
pub target_batch_size: usize, pub target_batch_bytes: usize,
} }
impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloaderConfig { impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloaderConfig {
fn from(value: BlockDownloaderConfig) -> Self { fn from(value: BlockDownloaderConfig) -> Self {
Self { Self {
buffer_size: value.buffer_size, buffer_bytes: value.buffer_bytes,
in_progress_queue_size: value.in_progress_queue_size, in_progress_queue_bytes: value.in_progress_queue_bytes,
check_client_pool_interval: value.check_client_pool_interval, check_client_pool_interval: value.check_client_pool_interval,
target_batch_size: value.target_batch_size, target_batch_bytes: value.target_batch_bytes,
initial_batch_len: 1, initial_batch_len: 1,
} }
} }
@ -47,10 +47,10 @@ impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloa
impl Default for BlockDownloaderConfig { impl Default for BlockDownloaderConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
buffer_size: 50_000_000, buffer_bytes: 50_000_000,
in_progress_queue_size: 50_000_000, in_progress_queue_bytes: 50_000_000,
check_client_pool_interval: Duration::from_secs(30), check_client_pool_interval: Duration::from_secs(30),
target_batch_size: 5_000_000, target_batch_bytes: 5_000_000,
} }
} }
} }
@ -102,7 +102,7 @@ impl SharedNetConfig {
cuprate_address_book::AddressBookConfig { cuprate_address_book::AddressBookConfig {
max_white_list_length: self.address_book_config.max_white_list_length, max_white_list_length: self.address_book_config.max_white_list_length,
max_gray_list_length: self.address_book_config.max_gray_list_length, max_gray_list_length: self.address_book_config.max_gray_list_length,
peer_store_directory: addressbook_path(cache_dir, network), peer_store_directory: address_book_path(cache_dir, network),
peer_save_period: self.address_book_config.peer_save_period, peer_save_period: self.address_book_config.peer_save_period,
} }
} }

View file

@ -18,11 +18,12 @@ pub const VERSION_BUILD: &str = if cfg!(debug_assertions) {
pub const PANIC_CRITICAL_SERVICE_ERROR: &str = pub const PANIC_CRITICAL_SERVICE_ERROR: &str =
"A service critical to Cuprate's function returned an unexpected error."; "A service critical to Cuprate's function returned an unexpected error.";
pub const EXAMPLE_CONFIG: &str = include_str!("../../../Cuprated.toml"); pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml");
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::config::Config;
#[test] #[test]
fn version() { fn version() {
@ -37,4 +38,9 @@ mod test {
assert_eq!(VERSION_BUILD, "0.0.1-release"); assert_eq!(VERSION_BUILD, "0.0.1-release");
} }
} }
#[test]
fn generate_config_text_is_valid() {
let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap();
}
} }

View file

@ -38,7 +38,7 @@ use cuprate_p2p_core::{
use cuprate_txpool::service::TxpoolReadHandle; use cuprate_txpool::service::TxpoolReadHandle;
use cuprate_types::{ use cuprate_types::{
blockchain::{BlockchainReadRequest, BlockchainResponse}, blockchain::{BlockchainReadRequest, BlockchainResponse},
BlockCompleteEntry, MissingTxsInBlock, TransactionBlobs, BlockCompleteEntry, TransactionBlobs, TxsInBlock,
}; };
use cuprate_wire::protocol::{ use cuprate_wire::protocol::{
ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest, ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
@ -56,9 +56,7 @@ use crate::{
#[derive(Clone)] #[derive(Clone)]
pub struct P2pProtocolRequestHandlerMaker { pub struct P2pProtocolRequestHandlerMaker {
pub blockchain_read_handle: BlockchainReadHandle, pub blockchain_read_handle: BlockchainReadHandle,
pub blockchain_context_service: BlockChainContextService, pub blockchain_context_service: BlockChainContextService,
pub txpool_read_handle: TxpoolReadHandle, pub txpool_read_handle: TxpoolReadHandle,
/// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`] /// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`]
@ -115,13 +113,9 @@ where
#[derive(Clone)] #[derive(Clone)]
pub struct P2pProtocolRequestHandler<N: NetZoneAddress> { pub struct P2pProtocolRequestHandler<N: NetZoneAddress> {
peer_information: PeerInformation<N>, peer_information: PeerInformation<N>,
blockchain_read_handle: BlockchainReadHandle, blockchain_read_handle: BlockchainReadHandle,
blockchain_context_service: BlockChainContextService, blockchain_context_service: BlockChainContextService,
txpool_read_handle: TxpoolReadHandle, txpool_read_handle: TxpoolReadHandle,
incoming_tx_handler: IncomingTxHandler, incoming_tx_handler: IncomingTxHandler,
} }
@ -196,7 +190,7 @@ async fn get_objects(
.call(BlockchainReadRequest::BlockCompleteEntries(block_hashes)) .call(BlockchainReadRequest::BlockCompleteEntries(block_hashes))
.await? .await?
else { else {
panic!("blockchain returned wrong response!"); unreachable!();
}; };
Ok(ProtocolResponse::GetObjects(GetObjectsResponse { Ok(ProtocolResponse::GetObjects(GetObjectsResponse {
@ -233,18 +227,18 @@ async fn get_chain(
.call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000)) .call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000))
.await? .await?
else { else {
panic!("blockchain returned wrong response!"); unreachable!();
}; };
if start_height == 0 { let Some(start_height) = start_height else {
anyhow::bail!("The peers chain has a different genesis block than ours."); anyhow::bail!("The peers chain has a different genesis block than ours.");
} };
let (cumulative_difficulty_low64, cumulative_difficulty_top64) = let (cumulative_difficulty_low64, cumulative_difficulty_top64) =
split_u128_into_low_high_bits(cumulative_difficulty); split_u128_into_low_high_bits(cumulative_difficulty);
Ok(ProtocolResponse::GetChain(ChainResponse { Ok(ProtocolResponse::GetChain(ChainResponse {
start_height: usize_to_u64(start_height), start_height: usize_to_u64(std::num::NonZero::get(start_height)),
total_height: usize_to_u64(chain_height), total_height: usize_to_u64(chain_height),
cumulative_difficulty_low64, cumulative_difficulty_low64,
cumulative_difficulty_top64, cumulative_difficulty_top64,
@ -271,19 +265,19 @@ async fn fluffy_missing_txs(
// deallocate the backing `Bytes`. // deallocate the backing `Bytes`.
drop(request); drop(request);
let BlockchainResponse::MissingTxsInBlock(res) = blockchain_read_handle let BlockchainResponse::TxsInBlock(res) = blockchain_read_handle
.ready() .ready()
.await? .await?
.call(BlockchainReadRequest::MissingTxsInBlock { .call(BlockchainReadRequest::TxsInBlock {
block_hash, block_hash,
tx_indexes, tx_indexes,
}) })
.await? .await?
else { else {
panic!("blockchain returned wrong response!"); unreachable!();
}; };
let Some(MissingTxsInBlock { block, txs }) = res else { let Some(TxsInBlock { block, txs }) = res else {
anyhow::bail!("The peer requested txs out of range."); anyhow::bail!("The peer requested txs out of range.");
}; };
@ -412,11 +406,7 @@ where
}; };
// Drop all the data except the stuff we still need. // Drop all the data except the stuff we still need.
let NewTransactions { let NewTransactions { txs, .. } = request;
txs,
dandelionpp_fluff: _,
padding: _,
} = request;
let res = incoming_tx_handler let res = incoming_tx_handler
.ready() .ready()

View file

@ -13,7 +13,7 @@ use std::{
macro_rules! define_init_lazylock_statics { macro_rules! define_init_lazylock_statics {
($( ($(
$( #[$attr:meta] )* $( #[$attr:meta] )*
$name:ident: $t:ty = $init_fn:expr; $name:ident: $t:ty = $init_fn:expr_2021;
)*) => { )*) => {
/// Initialize global static `LazyLock` data. /// Initialize global static `LazyLock` data.
pub fn init_lazylock_statics() { pub fn init_lazylock_statics() {

View file

@ -143,9 +143,16 @@
--- ---
- [⚪️ Benchmarking](benchmarking/intro.md) - [🟢 Benchmarking](benchmarking/intro.md)
- [⚪️ Criterion](benchmarking/criterion.md) - [🟢 Criterion](benchmarking/criterion/intro.md)
- [⚪️ Harness](benchmarking/harness.md) - [🟢 Creating](benchmarking/criterion/creating.md)
- [🟢 Running](benchmarking/criterion/running.md)
- [🟢 `cuprate-benchmark`](benchmarking/cuprate/intro.md)
- [🟢 Creating](benchmarking/cuprate/creating.md)
- [🟢 Running](benchmarking/cuprate/running.md)
---
- [⚪️ Testing](testing/intro.md) - [⚪️ Testing](testing/intro.md)
- [⚪️ Monero data](testing/monero-data.md) - [⚪️ Monero data](testing/monero-data.md)
- [⚪️ RPC client](testing/rpc-client.md) - [⚪️ RPC client](testing/rpc-client.md)

View file

@ -54,6 +54,11 @@ cargo doc --open --package cuprate-blockchain
| [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing | [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing
| [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers | [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers
## ZMQ
| Crate | In-tree path | Purpose |
|-------|--------------|---------|
| [`cuprate-zmq-types`](https://doc.cuprate.org/cuprate_zmq_types) | [`zmq/types/`](https://github.com/Cuprate/cuprate/tree/main/zmq/types) | Message types for ZMQ Pub/Sub interface
## 1-off crates ## 1-off crates
| Crate | In-tree path | Purpose | | Crate | In-tree path | Purpose |
|-------|--------------|---------| |-------|--------------|---------|
@ -63,3 +68,11 @@ cargo doc --open --package cuprate-blockchain
| [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate | [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate
| [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate | [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate
| [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate | [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate
## Benchmarks
| Crate | In-tree path | Purpose |
|-------|--------------|---------|
| [`cuprate-benchmark`](https://doc.cuprate.org/cuprate_benchmark) | [`benches/benchmark/bin/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | Cuprate benchmarking binary
| [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) | [`benches/benchmark/lib/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | Cuprate benchmarking library
| `cuprate-benchmark-*` | [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/) | Benchmark for a Cuprate crate that uses `cuprate-benchmark`
| `cuprate-criterion-*` | [`benches/criterion/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Benchmark for a Cuprate crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book)

View file

@ -1 +0,0 @@
# ⚪️ Criterion

View file

@ -0,0 +1,21 @@
# Creating
Creating a new Criterion-based benchmarking crate for one of Cuprate's crates is relatively simple,
although, it requires knowledge of how to use Criterion first:
1. Read the `Getting Started` section of <https://bheisler.github.io/criterion.rs/book>
2. Copy [`benches/criterion/example`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example) as base
3. Get started
## Naming
New benchmark crates using Criterion should:
- Be in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/)
- Be in the `cuprate-criterion-$CRATE_NAME` format
For a real example, see:
[`cuprate-criterion-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc).
## Workspace
Finally, make sure to add the benchmark crate to the workspace
[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
Your benchmark is now ready to be ran.

View file

@ -0,0 +1,4 @@
# Criterion
Each sub-directory in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) is a crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) for timing single functions and/or groups of functions.
They are generally be small in scope.

View file

@ -0,0 +1,15 @@
# Running
To run all Criterion benchmarks, run this from the repository root:
```bash
cargo bench
```
To run specific package(s), use:
```bash
cargo bench --package $CRITERION_BENCHMARK_CRATE_NAME
```
For example:
```bash
cargo bench --package cuprate-criterion-json-rpc
```

View file

@ -0,0 +1,57 @@
# Creating
New benchmarks are plugged into `cuprate-benchmark` by:
1. Implementing `cuprate_benchmark_lib::Benchmark`
1. Registering the benchmark in the `cuprate_benchmark` binary
See [`benches/benchmark/example`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example)
for an example.
## Creating the benchmark crate
Before plugging into `cuprate-benchmark`, your actual benchmark crate must be created:
1. Create a new crate inside `benches/benchmark` (consider copying `benches/benchmark/example` as a base)
1. Pull in `cuprate_benchmark_lib` as a dependency
1. Create a benchmark
1. Implement `cuprate_benchmark_lib::Benchmark`
New benchmark crates using `cuprate-database` should:
- Be in [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/)
- Be in the `cuprate-benchmark-$CRATE_NAME` format
For a real example, see:
[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database).
## `cuprate_benchmark_lib::Benchmark`
This is the trait that standardizes all benchmarks ran under `cuprate-benchmark`.
It must be implemented by your benchmarking crate.
See `cuprate-benchmark-lib` crate documentation for a user-guide: <https://doc.cuprate.org/cuprate_benchmark_lib>.
## Adding a feature to `cuprate-benchmark`
After your benchmark's behavior is defined, it must be registered
in the binary that is actually ran: `cuprate-benchmark`.
If your benchmark is new, add a new crate feature to [`cuprate-benchmark`'s Cargo.toml file](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/Cargo.toml) with an optional dependency to your benchmarking crate.
Please remember to edit the feature table in the
[`README.md`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/README.md) as well!
## Adding to `cuprate-benchmark`'s `main()`
After adding your crate's feature, add a conditional line that run the benchmark
if the feature is enabled to the `main()` function:
For example, if your crate's name is `egg`:
```rust
cfg_if! {
if #[cfg(feature = "egg")] {
run::run_benchmark::<cuprate_benchmark_egg::Benchmark>(&mut timings);
}
}
```
## Workspace
Finally, make sure to add the benchmark crate to the workspace
[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
Your benchmark is now ready to be ran.

View file

@ -0,0 +1,37 @@
# cuprate-benchmark
Cuprate has 2 custom crates for general benchmarking:
- `cuprate-benchmark`; the actual binary crate ran
- `cuprate-benchmark-lib`; the library that other crates hook into
The abstract purpose of `cuprate-benchmark` is very simple:
1. Set-up the benchmark
1. Start timer
1. Run benchmark
1. Output data
`cuprate-benchmark` runs the benchmarks found in [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark).
`cuprate-benchmark-lib` defines the `Benchmark` trait that all
benchmark crates implement to "plug-in" to the benchmarking harness.
## Diagram
A diagram displaying the relation between `cuprate-benchmark` and related crates.
```
┌─────────────────────┐
│ cuprate_benchmark │
│ (actual binary ran) │
└──────────┬──────────┘
┌──────────────────┴───────────────────┐
│ cuprate_benchmark_lib │
│ ┌───────────────────────────────────┐│
│ │ trait Benchmark ││
│ └───────────────────────────────────┘│
└──────────────────┬───────────────────┘
┌───────────────────────────┐ │ ┌───────────────────────────┐
│ cuprate_benchmark_example ├──┼───┤ cuprate_benchmark_* │
└───────────────────────────┘ │ └───────────────────────────┘
┌───────────────────────────┐ │ ┌───────────────────────────┐
│ cuprate_benchmark_* ├──┴───┤ cuprate_benchmark_* │
└───────────────────────────┘ └───────────────────────────┘
```

View file

@ -0,0 +1,16 @@
# Running
`cuprate-benchmark` benchmarks are ran with this command:
```bash
cargo run --release --package cuprate-benchmark --features $BENCHMARK_CRATE_FEATURE
```
For example, to run the example benchmark:
```bash
cargo run --release --package cuprate-benchmark --features example
```
Use the `all` feature to run all benchmarks:
```bash
# Run all benchmarks
cargo run --release --package cuprate-benchmark --features all
```

View file

@ -1 +0,0 @@
# ⚪️ Harness

View file

@ -1 +1,22 @@
# ⚪️ Benchmarking # Benchmarking
Cuprate has 2 types of benchmarks:
- [Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) benchmarks
- `cuprate-benchmark` benchmarks
Criterion is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope.
`cuprate-benchmark` and [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't well-suited for micro benchmarks.
## File layout and purpose
All benchmarking related files are in the [`benches/`](https://github.com/Cuprate/cuprate/tree/main/benches) folder.
This directory is organized like such:
| Directory | Purpose |
|-------------------------------|---------|
| [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Criterion (micro) benchmarks
| `benches/criterion/cuprate-*` | Criterion benchmarks for the crate with the same name
| [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark) | Cuprate's custom benchmarking files
| [`benches/benchmark/bin`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks
| [`benches/benchmark/lib`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into
| `benches/benchmark/cuprate-*` | `cuprate-benchmark` benchmarks for the crate with the same name

View file

@ -328,8 +328,8 @@ fn next_difficulty(
time_span = 1; time_span = 1;
} }
// TODO: do checked operations here and unwrap so we don't silently overflow? // TODO: do `checked_mul` here and unwrap so we don't silently overflow?
(windowed_work * u128::from(hf.block_time().as_secs()) + time_span - 1) / time_span (windowed_work * u128::from(hf.block_time().as_secs())).div_ceil(time_span)
} }
/// Get the start and end of the window to calculate difficulty. /// Get the start and end of the window to calculate difficulty.

View file

@ -9,7 +9,7 @@ use clap::Parser;
use tower::{Service, ServiceExt}; use tower::{Service, ServiceExt};
use cuprate_blockchain::{ use cuprate_blockchain::{
config::ConfigBuilder, cuprate_database::RuntimeError, service::BlockchainReadHandle, config::ConfigBuilder, cuprate_database::DbResult, service::BlockchainReadHandle,
}; };
use cuprate_types::{ use cuprate_types::{
blockchain::{BlockchainReadRequest, BlockchainResponse}, blockchain::{BlockchainReadRequest, BlockchainResponse},
@ -23,7 +23,7 @@ const BATCH_SIZE: usize = 512;
async fn read_batch( async fn read_batch(
handle: &mut BlockchainReadHandle, handle: &mut BlockchainReadHandle,
height_from: usize, height_from: usize,
) -> Result<Vec<BlockId>, RuntimeError> { ) -> DbResult<Vec<BlockId>> {
let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE); let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE);
for height in height_from..(height_from + BATCH_SIZE) { for height in height_from..(height_from + BATCH_SIZE) {

View file

@ -49,7 +49,7 @@ pub(crate) fn subarray_copy<T: AsRef<[U]> + ?Sized, U: Copy, const LEN: usize>(
/// A mutable reference to a fixed-size subarray of type `[U; LEN]`. /// A mutable reference to a fixed-size subarray of type `[U; LEN]`.
/// ///
/// # Panics /// # Panics
/// Panics if `start + LEN > array.as_ref().len()`. /// Panics if `start + LEN > array.as_mut().len()`.
#[inline] #[inline]
pub(crate) fn subarray_mut<T: AsMut<[U]> + ?Sized, U, const LEN: usize>( pub(crate) fn subarray_mut<T: AsMut<[U]> + ?Sized, U, const LEN: usize>(
array: &mut T, array: &mut T,

View file

@ -220,13 +220,13 @@ pub fn logs_path(data_dir: &Path, network: Network) -> PathBuf {
/// This is the PATH used for any Cuprate address-book files. /// This is the PATH used for any Cuprate address-book files.
/// ///
/// ```rust /// ```rust
/// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, addressbook_path}}; /// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, address_book_path}};
/// ///
/// assert_eq!(addressbook_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook")); /// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook"));
/// assert_eq!(addressbook_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook")); /// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook"));
/// assert_eq!(addressbook_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook")); /// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook"));
/// ``` /// ```
pub fn addressbook_path(cache_dir: &Path, network: Network) -> PathBuf { pub fn address_book_path(cache_dir: &Path, network: Network) -> PathBuf {
path_with_network(cache_dir, network).join("addressbook") path_with_network(cache_dir, network).join("addressbook")
} }

View file

@ -76,14 +76,14 @@ macro_rules! epee_object {
// All this does is return the second (right) arg if present otherwise the left is returned. // All this does is return the second (right) arg if present otherwise the left is returned.
( (
@internal_try_right_then_left @internal_try_right_then_left
$a:expr, $b:expr $a:expr_2021, $b:expr_2021
) => { ) => {
$b $b
}; };
( (
@internal_try_right_then_left @internal_try_right_then_left
$a:expr, $a:expr_2021,
) => { ) => {
$a $a
}; };
@ -122,7 +122,7 @@ macro_rules! epee_object {
// ------------------------------------------------------------------------ Entry Point // ------------------------------------------------------------------------ Entry Point
( (
$obj:ident, $obj:ident,
$($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr)? $(=> $read_fn:expr, $write_fn:expr, $should_write_fn:expr)?, )* $($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr_2021)? $(=> $read_fn:expr_2021, $write_fn:expr_2021, $should_write_fn:expr_2021)?, )*
$(!flatten: $flat_field: ident: $flat_ty:ty ,)* $(!flatten: $flat_field: ident: $flat_ty:ty ,)*
) => { ) => {

View file

@ -42,11 +42,7 @@ pub(crate) fn save_peers_to_disk<Z: BorshNetworkZone>(
let file = cfg let file = cfg
.peer_store_directory .peer_store_directory
.join(format!("{}_p2p_state", Z::NAME)); .join(format!("{}_p2p_state", Z::NAME));
spawn_blocking(move || fs::write(&file, &data))
spawn_blocking(move || {
fs::create_dir_all(file.parent().unwrap())?;
fs::write(&file, &data)
})
} }
pub(crate) async fn read_peers_from_disk<Z: BorshNetworkZone>( pub(crate) async fn read_peers_from_disk<Z: BorshNetworkZone>(

View file

@ -157,7 +157,7 @@ pub struct BufferSinkSend<'a, T> {
item: Option<T>, item: Option<T>,
} }
impl<'a, T> Future for BufferSinkSend<'a, T> { impl<T> Future for BufferSinkSend<'_, T> {
type Output = Result<(), BufferError>; type Output = Result<(), BufferError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
@ -183,7 +183,7 @@ pub struct BufferSinkReady<'a, T> {
size_needed: usize, size_needed: usize,
} }
impl<'a, T> Future for BufferSinkReady<'a, T> { impl<T> Future for BufferSinkReady<'_, T> {
type Output = (); type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {

View file

@ -12,6 +12,7 @@ use crate::{
OutboundPeer, State, OutboundPeer, State,
}; };
#[expect(clippy::type_complexity)]
pub(crate) fn mock_discover_svc<Req: Send + 'static>() -> ( pub(crate) fn mock_discover_svc<Req: Send + 'static>() -> (
impl Stream< impl Stream<
Item = Result< Item = Result<

View file

@ -121,7 +121,6 @@ pub trait NetZoneAddress:
/// ///
/// - TODO: IP zone banning? /// - TODO: IP zone banning?
/// - TODO: rename this to Host. /// - TODO: rename this to Host.
type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static; type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static;
/// Changes the port of this address to `port`. /// Changes the port of this address to `port`.

View file

@ -62,13 +62,13 @@ pub struct BlockBatch {
pub struct BlockDownloaderConfig { pub struct BlockDownloaderConfig {
/// The size in bytes of the buffer between the block downloader and the place which /// The size in bytes of the buffer between the block downloader and the place which
/// is consuming the downloaded blocks. /// is consuming the downloaded blocks.
pub buffer_size: usize, pub buffer_bytes: usize,
/// The size of the in progress queue (in bytes) at which we stop requesting more blocks. /// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
pub in_progress_queue_size: usize, pub in_progress_queue_bytes: usize,
/// The [`Duration`] between checking the client pool for free peers. /// The [`Duration`] between checking the client pool for free peers.
pub check_client_pool_interval: Duration, pub check_client_pool_interval: Duration,
/// The target size of a single batch of blocks (in bytes). /// The target size of a single batch of blocks (in bytes).
pub target_batch_size: usize, pub target_batch_bytes: usize,
/// The initial amount of blocks to request (in number of blocks) /// The initial amount of blocks to request (in number of blocks)
pub initial_batch_len: usize, pub initial_batch_len: usize,
} }
@ -145,7 +145,7 @@ where
+ 'static, + 'static,
C::Future: Send + 'static, C::Future: Send + 'static,
{ {
let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size); let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_bytes);
let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config); let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config);
@ -381,7 +381,7 @@ where
} }
// If our ready queue is too large send duplicate requests for the blocks we are waiting on. // If our ready queue is too large send duplicate requests for the blocks we are waiting on.
if self.block_queue.size() >= self.config.in_progress_queue_size { if self.block_queue.size() >= self.config.in_progress_queue_bytes {
return self.request_inflight_batch_again(client); return self.request_inflight_batch_again(client);
} }
@ -565,7 +565,7 @@ where
self.amount_of_blocks_to_request = calculate_next_block_batch_size( self.amount_of_blocks_to_request = calculate_next_block_batch_size(
block_batch.size, block_batch.size,
block_batch.blocks.len(), block_batch.blocks.len(),
self.config.target_batch_size, self.config.target_batch_bytes,
); );
tracing::debug!( tracing::debug!(

View file

@ -146,9 +146,9 @@ fn deserialize_batch(
// Check the height lines up as expected. // Check the height lines up as expected.
// This must happen after the hash check. // This must happen after the hash check.
if !block if block
.number() .number()
.is_some_and(|height| height == expected_height) .is_none_or(|height| height != expected_height)
{ {
tracing::warn!( tracing::warn!(
"Invalid chain, expected height: {expected_height}, got height: {:?}", "Invalid chain, expected height: {expected_height}, got height: {:?}",

View file

@ -66,10 +66,10 @@ proptest! {
genesis: *blockchain.blocks.first().unwrap().0 genesis: *blockchain.blocks.first().unwrap().0
}, },
BlockDownloaderConfig { BlockDownloaderConfig {
buffer_size: 1_000, buffer_bytes: 1_000,
in_progress_queue_size: 10_000, in_progress_queue_bytes: 10_000,
check_client_pool_interval: Duration::from_secs(5), check_client_pool_interval: Duration::from_secs(5),
target_batch_size: 5_000, target_batch_bytes: 5_000,
initial_batch_len: 1, initial_batch_len: 1,
}); });

View file

@ -57,6 +57,7 @@ impl Default for BroadcastConfig {
/// - The [`BroadcastSvc`] /// - The [`BroadcastSvc`]
/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers. /// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers.
/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers. /// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers.
#[expect(clippy::type_complexity)]
pub(crate) fn init_broadcast_channels<N: NetworkZone>( pub(crate) fn init_broadcast_channels<N: NetworkZone>(
config: BroadcastConfig, config: BroadcastConfig,
) -> ( ) -> (

View file

@ -327,7 +327,7 @@ impl DecompressedPruningSeed {
/// ///
/// This function will also error if `block_height` > `blockchain_height` /// This function will also error if `block_height` > `blockchain_height`
/// ///
pub fn get_next_unpruned_block( pub const fn get_next_unpruned_block(
&self, &self,
block_height: usize, block_height: usize,
blockchain_height: usize, blockchain_height: usize,

View file

@ -68,7 +68,7 @@ macro_rules! generate_endpoints_with_no_input {
/// - [`generate_endpoints_with_input`] /// - [`generate_endpoints_with_input`]
/// - [`generate_endpoints_with_no_input`] /// - [`generate_endpoints_with_no_input`]
macro_rules! generate_endpoints_inner { macro_rules! generate_endpoints_inner {
($variant:ident, $handler:ident, $request:expr) => { ($variant:ident, $handler:ident, $request:expr_2021) => {
paste::paste! { paste::paste! {
{ {
// Check if restricted. // Check if restricted.

View file

@ -71,7 +71,7 @@ macro_rules! generate_endpoints_with_no_input {
/// - [`generate_endpoints_with_input`] /// - [`generate_endpoints_with_input`]
/// - [`generate_endpoints_with_no_input`] /// - [`generate_endpoints_with_no_input`]
macro_rules! generate_endpoints_inner { macro_rules! generate_endpoints_inner {
($variant:ident, $handler:ident, $request:expr) => { ($variant:ident, $handler:ident, $request:expr_2021) => {
paste::paste! { paste::paste! {
{ {
// Check if restricted. // Check if restricted.

View file

@ -9,26 +9,19 @@ use cuprate_fixed_bytes::ByteArrayVec;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "epee")] #[cfg(feature = "epee")]
use cuprate_epee_encoding::{ use cuprate_epee_encoding::container_as_blob::ContainerAsBlob;
container_as_blob::ContainerAsBlob,
epee_object, error,
macros::bytes::{Buf, BufMut},
read_epee_value, write_field, EpeeObject, EpeeObjectBuilder,
};
use cuprate_types::BlockCompleteEntry; use cuprate_types::BlockCompleteEntry;
use crate::{ use crate::{
base::AccessResponseBase, base::AccessResponseBase,
macros::{define_request, define_request_and_response, define_request_and_response_doc}, macros::define_request_and_response,
misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolTxInfo, Status}, misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfo},
rpc_call::RpcCallValue, rpc_call::RpcCallValue,
}; };
#[cfg(any(feature = "epee", feature = "serde"))] #[cfg(any(feature = "epee", feature = "serde"))]
use crate::defaults::{default_false, default_zero}; use crate::defaults::{default_false, default_zero};
#[cfg(feature = "epee")]
use crate::misc::PoolInfoExtent;
//---------------------------------------------------------------------------------------------------- Definitions //---------------------------------------------------------------------------------------------------- Definitions
define_request_and_response! { define_request_and_response! {
@ -115,15 +108,14 @@ define_request_and_response! {
} }
} }
//---------------------------------------------------------------------------------------------------- GetBlocks define_request_and_response! {
define_request! {
#[doc = define_request_and_response_doc!(
"response" => GetBlocksResponse,
get_blocksbin, get_blocksbin,
cc73fe71162d564ffda8e549b79a350bca53c454, cc73fe71162d564ffda8e549b79a350bca53c454 =>
core_rpc_server_commands_defs, h, 162, 262, core_rpc_server_commands_defs.h => 162..=262,
)]
GetBlocksRequest { GetBlocks,
Request {
requested_info: u8 = default_zero::<u8>(), "default_zero", requested_info: u8 = default_zero::<u8>(), "default_zero",
// FIXME: This is a `std::list` in `monerod` because...? // FIXME: This is a `std::list` in `monerod` because...?
block_ids: ByteArrayVec<32>, block_ids: ByteArrayVec<32>,
@ -131,259 +123,17 @@ define_request! {
prune: bool, prune: bool,
no_miner_tx: bool = default_false(), "default_false", no_miner_tx: bool = default_false(), "default_false",
pool_info_since: u64 = default_zero::<u64>(), "default_zero", pool_info_since: u64 = default_zero::<u64>(), "default_zero",
} },
}
#[doc = define_request_and_response_doc!( // TODO: add `top_block_hash` field
"request" => GetBlocksRequest, // <https://github.com/monero-project/monero/blame/893916ad091a92e765ce3241b94e706ad012b62a/src/rpc/core_rpc_server_commands_defs.h#L263>
get_blocksbin, AccessResponseBase {
cc73fe71162d564ffda8e549b79a350bca53c454,
core_rpc_server_commands_defs, h, 162, 262,
)]
///
/// This response's variant depends upon [`PoolInfoExtent`].
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum GetBlocksResponse {
/// Will always serialize a [`PoolInfoExtent::None`] field.
PoolInfoNone(GetBlocksResponsePoolInfoNone),
/// Will always serialize a [`PoolInfoExtent::Incremental`] field.
PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental),
/// Will always serialize a [`PoolInfoExtent::Full`] field.
PoolInfoFull(GetBlocksResponsePoolInfoFull),
}
impl Default for GetBlocksResponse {
fn default() -> Self {
Self::PoolInfoNone(GetBlocksResponsePoolInfoNone::default())
}
}
/// Data within [`GetBlocksResponse::PoolInfoNone`].
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct GetBlocksResponsePoolInfoNone {
pub status: Status,
pub untrusted: bool,
pub blocks: Vec<BlockCompleteEntry>,
pub start_height: u64,
pub current_height: u64,
pub output_indices: Vec<BlockOutputIndices>,
pub daemon_time: u64,
}
#[cfg(feature = "epee")]
epee_object! {
GetBlocksResponsePoolInfoNone,
status: Status,
untrusted: bool,
blocks: Vec<BlockCompleteEntry>, blocks: Vec<BlockCompleteEntry>,
start_height: u64, start_height: u64,
current_height: u64, current_height: u64,
output_indices: Vec<BlockOutputIndices>, output_indices: Vec<BlockOutputIndices>,
daemon_time: u64, daemon_time: u64,
} pool_info: PoolInfo,
/// Data within [`GetBlocksResponse::PoolInfoIncremental`].
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct GetBlocksResponsePoolInfoIncremental {
pub status: Status,
pub untrusted: bool,
pub blocks: Vec<BlockCompleteEntry>,
pub start_height: u64,
pub current_height: u64,
pub output_indices: Vec<BlockOutputIndices>,
pub daemon_time: u64,
pub added_pool_txs: Vec<PoolTxInfo>,
pub remaining_added_pool_txids: ByteArrayVec<32>,
pub removed_pool_txids: ByteArrayVec<32>,
}
#[cfg(feature = "epee")]
epee_object! {
GetBlocksResponsePoolInfoIncremental,
status: Status,
untrusted: bool,
blocks: Vec<BlockCompleteEntry>,
start_height: u64,
current_height: u64,
output_indices: Vec<BlockOutputIndices>,
daemon_time: u64,
added_pool_txs: Vec<PoolTxInfo>,
remaining_added_pool_txids: ByteArrayVec<32>,
removed_pool_txids: ByteArrayVec<32>,
}
/// Data within [`GetBlocksResponse::PoolInfoFull`].
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct GetBlocksResponsePoolInfoFull {
pub status: Status,
pub untrusted: bool,
pub blocks: Vec<BlockCompleteEntry>,
pub start_height: u64,
pub current_height: u64,
pub output_indices: Vec<BlockOutputIndices>,
pub daemon_time: u64,
pub added_pool_txs: Vec<PoolTxInfo>,
pub remaining_added_pool_txids: ByteArrayVec<32>,
}
#[cfg(feature = "epee")]
epee_object! {
GetBlocksResponsePoolInfoFull,
status: Status,
untrusted: bool,
blocks: Vec<BlockCompleteEntry>,
start_height: u64,
current_height: u64,
output_indices: Vec<BlockOutputIndices>,
daemon_time: u64,
added_pool_txs: Vec<PoolTxInfo>,
remaining_added_pool_txids: ByteArrayVec<32>,
}
#[cfg(feature = "epee")]
/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`].
///
/// Not for public usage.
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct __GetBlocksResponseEpeeBuilder {
pub status: Option<Status>,
pub untrusted: Option<bool>,
pub blocks: Option<Vec<BlockCompleteEntry>>,
pub start_height: Option<u64>,
pub current_height: Option<u64>,
pub output_indices: Option<Vec<BlockOutputIndices>>,
pub daemon_time: Option<u64>,
pub pool_info_extent: Option<PoolInfoExtent>,
pub added_pool_txs: Option<Vec<PoolTxInfo>>,
pub remaining_added_pool_txids: Option<ByteArrayVec<32>>,
pub removed_pool_txids: Option<ByteArrayVec<32>>,
}
#[cfg(feature = "epee")]
impl EpeeObjectBuilder<GetBlocksResponse> for __GetBlocksResponseEpeeBuilder {
fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> error::Result<bool> {
macro_rules! read_epee_field {
($($field:ident),*) => {
match name {
$(
stringify!($field) => { self.$field = Some(read_epee_value(r)?); },
)*
_ => return Ok(false),
}
};
}
read_epee_field! {
status,
untrusted,
blocks,
start_height,
current_height,
output_indices,
daemon_time,
pool_info_extent,
added_pool_txs,
remaining_added_pool_txids,
removed_pool_txids
}
Ok(true)
}
fn finish(self) -> error::Result<GetBlocksResponse> {
const ELSE: error::Error = error::Error::Format("Required field was not found!");
let status = self.status.ok_or(ELSE)?;
let untrusted = self.untrusted.ok_or(ELSE)?;
let blocks = self.blocks.ok_or(ELSE)?;
let start_height = self.start_height.ok_or(ELSE)?;
let current_height = self.current_height.ok_or(ELSE)?;
let output_indices = self.output_indices.ok_or(ELSE)?;
let daemon_time = self.daemon_time.ok_or(ELSE)?;
let pool_info_extent = self.pool_info_extent.ok_or(ELSE)?;
let this = match pool_info_extent {
PoolInfoExtent::None => {
GetBlocksResponse::PoolInfoNone(GetBlocksResponsePoolInfoNone {
status,
untrusted,
blocks,
start_height,
current_height,
output_indices,
daemon_time,
})
}
PoolInfoExtent::Incremental => {
GetBlocksResponse::PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental {
status,
untrusted,
blocks,
start_height,
current_height,
output_indices,
daemon_time,
added_pool_txs: self.added_pool_txs.ok_or(ELSE)?,
remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?,
removed_pool_txids: self.removed_pool_txids.ok_or(ELSE)?,
})
}
PoolInfoExtent::Full => {
GetBlocksResponse::PoolInfoFull(GetBlocksResponsePoolInfoFull {
status,
untrusted,
blocks,
start_height,
current_height,
output_indices,
daemon_time,
added_pool_txs: self.added_pool_txs.ok_or(ELSE)?,
remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?,
})
}
};
Ok(this)
}
}
#[cfg(feature = "epee")]
impl EpeeObject for GetBlocksResponse {
type Builder = __GetBlocksResponseEpeeBuilder;
fn number_of_fields(&self) -> u64 {
// [`PoolInfoExtent`] + inner struct fields.
let inner_fields = match self {
Self::PoolInfoNone(s) => s.number_of_fields(),
Self::PoolInfoIncremental(s) => s.number_of_fields(),
Self::PoolInfoFull(s) => s.number_of_fields(),
};
1 + inner_fields
}
fn write_fields<B: BufMut>(self, w: &mut B) -> error::Result<()> {
match self {
Self::PoolInfoNone(s) => {
s.write_fields(w)?;
write_field(PoolInfoExtent::None.to_u8(), "pool_info_extent", w)?;
}
Self::PoolInfoIncremental(s) => {
s.write_fields(w)?;
write_field(PoolInfoExtent::Incremental.to_u8(), "pool_info_extent", w)?;
}
Self::PoolInfoFull(s) => {
s.write_fields(w)?;
write_field(PoolInfoExtent::Full.to_u8(), "pool_info_extent", w)?;
}
}
Ok(())
} }
} }

View file

@ -37,7 +37,7 @@ macro_rules! serde_doc_test {
( (
// `const` string from `cuprate_test_utils::rpc::data` // `const` string from `cuprate_test_utils::rpc::data`
// v // v
$cuprate_test_utils_rpc_const:ident => $expected:expr $cuprate_test_utils_rpc_const:ident => $expected:expr_2021
// ^ // ^
// Expected value as an expression // Expected value as an expression
) => { ) => {

View file

@ -77,7 +77,7 @@ macro_rules! define_request_and_response {
$( #[$request_field_attr:meta] )* // Field attribute. $( #[$request_field_attr:meta] )* // Field attribute.
$request_field:ident: $request_field_type:ty // field_name: field type $request_field:ident: $request_field_type:ty // field_name: field type
$(as $request_field_type_as:ty)? // (optional) alternative type (de)serialization $(as $request_field_type_as:ty)? // (optional) alternative type (de)serialization
$(= $request_field_type_default:expr, $request_field_type_default_string:literal)?, // (optional) default value $(= $request_field_type_default:expr_2021, $request_field_type_default_string:literal)?, // (optional) default value
)* )*
}, },
@ -89,7 +89,7 @@ macro_rules! define_request_and_response {
$( #[$response_field_attr:meta] )* $( #[$response_field_attr:meta] )*
$response_field:ident: $response_field_type:ty $response_field:ident: $response_field_type:ty
$(as $response_field_type_as:ty)? $(as $response_field_type_as:ty)?
$(= $response_field_type_default:expr, $response_field_type_default_string:literal)?, $(= $response_field_type_default:expr_2021, $response_field_type_default_string:literal)?,
)* )*
} }
) => { paste::paste! { ) => { paste::paste! {
@ -229,7 +229,7 @@ macro_rules! define_request {
// field_name: FieldType // field_name: FieldType
$field:ident: $field_type:ty $field:ident: $field_type:ty
$(as $field_as:ty)? $(as $field_as:ty)?
$(= $field_default:expr, $field_default_string:literal)?, $(= $field_default:expr_2021, $field_default_string:literal)?,
// The $field_default is an optional extra token that represents // The $field_default is an optional extra token that represents
// a default value to pass to [`cuprate_epee_encoding::epee_object`], // a default value to pass to [`cuprate_epee_encoding::epee_object`],
// see it for usage. // see it for usage.
@ -286,7 +286,7 @@ macro_rules! define_response {
$( #[$field_attr:meta] )* $( #[$field_attr:meta] )*
$field:ident: $field_type:ty $field:ident: $field_type:ty
$(as $field_as:ty)? $(as $field_as:ty)?
$(= $field_default:expr, $field_default_string:literal)?, $(= $field_default:expr_2021, $field_default_string:literal)?,
)* )*
} }
) => { ) => {
@ -323,7 +323,7 @@ macro_rules! define_response {
$( #[$field_attr:meta] )* $( #[$field_attr:meta] )*
$field:ident: $field_type:ty $field:ident: $field_type:ty
$(as $field_as:ty)? $(as $field_as:ty)?
$(= $field_default:expr, $field_default_string:literal)?, $(= $field_default:expr_2021, $field_default_string:literal)?,
)* )*
} }
) => { ) => {

View file

@ -11,11 +11,11 @@ use serde::{Deserialize, Serialize};
#[cfg(feature = "epee")] #[cfg(feature = "epee")]
use cuprate_epee_encoding::epee_object; use cuprate_epee_encoding::epee_object;
use crate::macros::monero_definition_link;
#[cfg(any(feature = "epee", feature = "serde"))] #[cfg(any(feature = "epee", feature = "serde"))]
use crate::defaults::default_zero; use crate::defaults::default_zero;
use crate::macros::monero_definition_link;
//---------------------------------------------------------------------------------------------------- Macros //---------------------------------------------------------------------------------------------------- Macros
/// This macro (local to this file) defines all the misc types. /// This macro (local to this file) defines all the misc types.
/// ///
@ -37,7 +37,7 @@ macro_rules! define_struct_and_impl_epee {
$( $(
$( #[$field_attr:meta] )* // Field attributes $( #[$field_attr:meta] )* // Field attributes
// Field name => the type => optional `epee_object` default value. // Field name => the type => optional `epee_object` default value.
$field_name:ident: $field_type:ty $(= $field_default:expr)?, $field_name:ident: $field_type:ty $(= $field_default:expr_2021)?,
)* )*
} }
) => { ) => {

View file

@ -17,6 +17,7 @@ mod distribution;
mod key_image_spent_status; mod key_image_spent_status;
#[expect(clippy::module_inception)] #[expect(clippy::module_inception)]
mod misc; mod misc;
mod pool_info;
mod pool_info_extent; mod pool_info_extent;
mod status; mod status;
mod tx_entry; mod tx_entry;
@ -30,6 +31,7 @@ pub use misc::{
OutputDistributionData, Peer, PoolTxInfo, PublicNode, SetBan, Span, SpentKeyImageInfo, OutputDistributionData, Peer, PoolTxInfo, PublicNode, SetBan, Span, SpentKeyImageInfo,
SyncInfoPeer, TxBacklogEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats, SyncInfoPeer, TxBacklogEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats,
}; };
pub use pool_info::PoolInfo;
pub use pool_info_extent::PoolInfoExtent; pub use pool_info_extent::PoolInfoExtent;
pub use status::Status; pub use status::Status;
pub use tx_entry::TxEntry; pub use tx_entry::TxEntry;

View file

@ -0,0 +1,171 @@
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "epee")]
use crate::misc::PoolInfoExtent;
#[cfg(feature = "epee")]
use cuprate_epee_encoding::{
epee_object, error,
macros::bytes::{Buf, BufMut},
read_epee_value, write_field, EpeeObject, EpeeObjectBuilder,
};
use cuprate_fixed_bytes::ByteArrayVec;
use crate::misc::PoolTxInfo;
//---------------------------------------------------------------------------------------------------- PoolInfo
#[doc = crate::macros::monero_definition_link!(
cc73fe71162d564ffda8e549b79a350bca53c454,
"rpc/core_rpc_server_commands_defs.h",
223..=228
)]
/// Used in [`crate::bin::GetBlocksResponse`].
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(u8)]
pub enum PoolInfo {
#[default]
None,
Incremental(PoolInfoIncremental),
Full(PoolInfoFull),
}
//---------------------------------------------------------------------------------------------------- Internal data
/// Data within [`PoolInfo::Incremental`].
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PoolInfoIncremental {
pub added_pool_txs: Vec<PoolTxInfo>,
pub remaining_added_pool_txids: ByteArrayVec<32>,
pub removed_pool_txids: ByteArrayVec<32>,
}
#[cfg(feature = "epee")]
epee_object! {
PoolInfoIncremental,
added_pool_txs: Vec<PoolTxInfo>,
remaining_added_pool_txids: ByteArrayVec<32>,
removed_pool_txids: ByteArrayVec<32>,
}
/// Data within [`PoolInfo::Full`].
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PoolInfoFull {
pub added_pool_txs: Vec<PoolTxInfo>,
pub remaining_added_pool_txids: ByteArrayVec<32>,
}
#[cfg(feature = "epee")]
epee_object! {
PoolInfoFull,
added_pool_txs: Vec<PoolTxInfo>,
remaining_added_pool_txids: ByteArrayVec<32>,
}
//---------------------------------------------------------------------------------------------------- PoolInfo epee impl
#[cfg(feature = "epee")]
/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`].
///
/// Not for public usage.
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct __PoolInfoEpeeBuilder {
/// This is a distinct field in `monerod`,
/// which as represented in this library with [`PoolInfo`]'s `u8` tag.
pub pool_info_extent: Option<PoolInfoExtent>,
pub added_pool_txs: Option<Vec<PoolTxInfo>>,
pub remaining_added_pool_txids: Option<ByteArrayVec<32>>,
pub removed_pool_txids: Option<ByteArrayVec<32>>,
}
// Custom epee implementation.
//
// HACK/INVARIANT:
// If any data within [`PoolInfo`] changes, the below code should be changed as well.
#[cfg(feature = "epee")]
impl EpeeObjectBuilder<PoolInfo> for __PoolInfoEpeeBuilder {
fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> error::Result<bool> {
macro_rules! read_epee_field {
($($field:ident),*) => {
match name {
$(
stringify!($field) => { self.$field = Some(read_epee_value(r)?); },
)*
_ => return Ok(false),
}
};
}
read_epee_field! {
pool_info_extent,
added_pool_txs,
remaining_added_pool_txids,
removed_pool_txids
}
Ok(true)
}
fn finish(self) -> error::Result<PoolInfo> {
// INVARIANT:
// `monerod` omits serializing the field itself when a container is empty,
// `unwrap_or_default()` is used over `error()` in these cases.
// Some of the uses are when values have default fallbacks: `pool_info_extent`.
let pool_info_extent = self.pool_info_extent.unwrap_or_default();
let this = match pool_info_extent {
PoolInfoExtent::None => PoolInfo::None,
PoolInfoExtent::Incremental => PoolInfo::Incremental(PoolInfoIncremental {
added_pool_txs: self.added_pool_txs.unwrap_or_default(),
remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(),
removed_pool_txids: self.removed_pool_txids.unwrap_or_default(),
}),
PoolInfoExtent::Full => PoolInfo::Full(PoolInfoFull {
added_pool_txs: self.added_pool_txs.unwrap_or_default(),
remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(),
}),
};
Ok(this)
}
}
#[cfg(feature = "epee")]
impl EpeeObject for PoolInfo {
type Builder = __PoolInfoEpeeBuilder;
fn number_of_fields(&self) -> u64 {
// Inner struct fields.
let inner_fields = match self {
Self::None => 0,
Self::Incremental(s) => s.number_of_fields(),
Self::Full(s) => s.number_of_fields(),
};
// [`PoolInfoExtent`] + inner struct fields
1 + inner_fields
}
fn write_fields<B: BufMut>(self, w: &mut B) -> error::Result<()> {
const FIELD: &str = "pool_info_extent";
match self {
Self::None => {
write_field(PoolInfoExtent::None.to_u8(), FIELD, w)?;
}
Self::Incremental(s) => {
s.write_fields(w)?;
write_field(PoolInfoExtent::Incremental.to_u8(), FIELD, w)?;
}
Self::Full(s) => {
s.write_fields(w)?;
write_field(PoolInfoExtent::Full.to_u8(), FIELD, w)?;
}
}
Ok(())
}
}

View file

@ -2,8 +2,6 @@
//---------------------------------------------------------------------------------------------------- Use //---------------------------------------------------------------------------------------------------- Use
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use crate::serde::{serde_false, serde_true};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "epee")] #[cfg(feature = "epee")]
@ -13,6 +11,9 @@ use cuprate_epee_encoding::{
EpeeObject, EpeeObjectBuilder, EpeeObject, EpeeObjectBuilder,
}; };
#[cfg(feature = "serde")]
use crate::serde::{serde_false, serde_true};
//---------------------------------------------------------------------------------------------------- TxEntry //---------------------------------------------------------------------------------------------------- TxEntry
#[doc = crate::macros::monero_definition_link!( #[doc = crate::macros::monero_definition_link!(
cc73fe71162d564ffda8e549b79a350bca53c454, cc73fe71162d564ffda8e549b79a350bca53c454,

View file

@ -65,7 +65,7 @@ macro_rules! serde_doc_test {
( (
// `const` string from `cuprate_test_utils::rpc::data` // `const` string from `cuprate_test_utils::rpc::data`
// v // v
$cuprate_test_utils_rpc_const:ident => $expected:expr $cuprate_test_utils_rpc_const:ident => $expected:expr_2021
// ^ // ^
// Expected value as an expression // Expected value as an expression
) => { ) => {

View file

@ -34,7 +34,7 @@ serde = { workspace = true, optional = true }
tower = { workspace = true } tower = { workspace = true }
thread_local = { workspace = true } thread_local = { workspace = true }
rayon = { workspace = true } rayon = { workspace = true }
bytes = "1.7.2" bytes = { workspace = true }
[dev-dependencies] [dev-dependencies]
cuprate-constants = { workspace = true } cuprate-constants = { workspace = true }

View file

@ -1,7 +1,7 @@
use bytemuck::TransparentWrapper; use bytemuck::TransparentWrapper;
use monero_serai::block::{Block, BlockHeader}; use monero_serai::block::{Block, BlockHeader};
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, StorableVec};
use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits};
use cuprate_types::{AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, HardFork}; use cuprate_types::{AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, HardFork};
@ -21,7 +21,7 @@ use crate::{
pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>( pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>(
env_inner: &E, env_inner: &E,
tx_rw: &mut E::Rw<'_>, tx_rw: &mut E::Rw<'_>,
) -> Result<(), RuntimeError> { ) -> DbResult<()> {
use crate::tables::{ use crate::tables::{
AltBlockBlobs, AltBlockHeights, AltBlocksInfo, AltChainInfos, AltTransactionBlobs, AltBlockBlobs, AltBlockHeights, AltBlocksInfo, AltChainInfos, AltTransactionBlobs,
AltTransactionInfos, AltTransactionInfos,
@ -47,10 +47,7 @@ pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>(
/// - `alt_block.height` is == `0` /// - `alt_block.height` is == `0`
/// - `alt_block.txs.len()` != `alt_block.block.transactions.len()` /// - `alt_block.txs.len()` != `alt_block.block.transactions.len()`
/// ///
pub fn add_alt_block( pub fn add_alt_block(alt_block: &AltBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> {
alt_block: &AltBlockInformation,
tables: &mut impl TablesMut,
) -> Result<(), RuntimeError> {
let alt_block_height = AltBlockHeight { let alt_block_height = AltBlockHeight {
chain_id: alt_block.chain_id.into(), chain_id: alt_block.chain_id.into(),
height: alt_block.height, height: alt_block.height,
@ -100,7 +97,7 @@ pub fn add_alt_block(
pub fn get_alt_block( pub fn get_alt_block(
alt_block_height: &AltBlockHeight, alt_block_height: &AltBlockHeight,
tables: &impl Tables, tables: &impl Tables,
) -> Result<AltBlockInformation, RuntimeError> { ) -> DbResult<AltBlockInformation> {
let block_info = tables.alt_blocks_info().get(alt_block_height)?; let block_info = tables.alt_blocks_info().get(alt_block_height)?;
let block_blob = tables.alt_block_blobs().get(alt_block_height)?.0; let block_blob = tables.alt_block_blobs().get(alt_block_height)?.0;
@ -111,7 +108,7 @@ pub fn get_alt_block(
.transactions .transactions
.iter() .iter()
.map(|tx_hash| get_alt_transaction(tx_hash, tables)) .map(|tx_hash| get_alt_transaction(tx_hash, tables))
.collect::<Result<_, RuntimeError>>()?; .collect::<DbResult<_>>()?;
Ok(AltBlockInformation { Ok(AltBlockInformation {
block, block,
@ -141,7 +138,7 @@ pub fn get_alt_block_hash(
block_height: &BlockHeight, block_height: &BlockHeight,
alt_chain: ChainId, alt_chain: ChainId,
tables: &impl Tables, tables: &impl Tables,
) -> Result<BlockHash, RuntimeError> { ) -> DbResult<BlockHash> {
let alt_chains = tables.alt_chain_infos(); let alt_chains = tables.alt_chain_infos();
// First find what [`ChainId`] this block would be stored under. // First find what [`ChainId`] this block would be stored under.
@ -188,7 +185,7 @@ pub fn get_alt_block_hash(
pub fn get_alt_block_extended_header_from_height( pub fn get_alt_block_extended_header_from_height(
height: &AltBlockHeight, height: &AltBlockHeight,
table: &impl Tables, table: &impl Tables,
) -> Result<ExtendedBlockHeader, RuntimeError> { ) -> DbResult<ExtendedBlockHeader> {
let block_info = table.alt_blocks_info().get(height)?; let block_info = table.alt_blocks_info().get(height)?;
let block_blob = table.alt_block_blobs().get(height)?.0; let block_blob = table.alt_block_blobs().get(height)?.0;

View file

@ -1,6 +1,6 @@
use std::cmp::{max, min}; use std::cmp::{max, min};
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError};
use cuprate_types::{Chain, ChainId}; use cuprate_types::{Chain, ChainId};
use crate::{ use crate::{
@ -21,7 +21,7 @@ pub fn update_alt_chain_info(
alt_block_height: &AltBlockHeight, alt_block_height: &AltBlockHeight,
prev_hash: &BlockHash, prev_hash: &BlockHash,
tables: &mut impl TablesMut, tables: &mut impl TablesMut,
) -> Result<(), RuntimeError> { ) -> DbResult<()> {
let parent_chain = match tables.alt_block_heights().get(prev_hash) { let parent_chain = match tables.alt_block_heights().get(prev_hash) {
Ok(alt_parent_height) => Chain::Alt(alt_parent_height.chain_id.into()), Ok(alt_parent_height) => Chain::Alt(alt_parent_height.chain_id.into()),
Err(RuntimeError::KeyNotFound) => Chain::Main, Err(RuntimeError::KeyNotFound) => Chain::Main,
@ -74,7 +74,7 @@ pub fn get_alt_chain_history_ranges(
range: std::ops::Range<BlockHeight>, range: std::ops::Range<BlockHeight>,
alt_chain: ChainId, alt_chain: ChainId,
alt_chain_infos: &impl DatabaseRo<AltChainInfos>, alt_chain_infos: &impl DatabaseRo<AltChainInfos>,
) -> Result<Vec<(Chain, std::ops::Range<BlockHeight>)>, RuntimeError> { ) -> DbResult<Vec<(Chain, std::ops::Range<BlockHeight>)>> {
let mut ranges = Vec::with_capacity(5); let mut ranges = Vec::with_capacity(5);
let mut i = range.end; let mut i = range.end;

View file

@ -1,7 +1,7 @@
use bytemuck::TransparentWrapper; use bytemuck::TransparentWrapper;
use monero_serai::transaction::Transaction; use monero_serai::transaction::Transaction;
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec};
use cuprate_types::VerifiedTransactionInformation; use cuprate_types::VerifiedTransactionInformation;
use crate::{ use crate::{
@ -22,7 +22,7 @@ use crate::{
pub fn add_alt_transaction_blob( pub fn add_alt_transaction_blob(
tx: &VerifiedTransactionInformation, tx: &VerifiedTransactionInformation,
tables: &mut impl TablesMut, tables: &mut impl TablesMut,
) -> Result<(), RuntimeError> { ) -> DbResult<()> {
tables.alt_transaction_infos_mut().put( tables.alt_transaction_infos_mut().put(
&tx.tx_hash, &tx.tx_hash,
&AltTransactionInfo { &AltTransactionInfo {
@ -51,7 +51,7 @@ pub fn add_alt_transaction_blob(
pub fn get_alt_transaction( pub fn get_alt_transaction(
tx_hash: &TxHash, tx_hash: &TxHash,
tables: &impl Tables, tables: &impl Tables,
) -> Result<VerifiedTransactionInformation, RuntimeError> { ) -> DbResult<VerifiedTransactionInformation> {
let tx_info = tables.alt_transaction_infos().get(tx_hash)?; let tx_info = tables.alt_transaction_infos().get(tx_hash)?;
let tx_blob = match tables.alt_transaction_blobs().get(tx_hash) { let tx_blob = match tables.alt_transaction_blobs().get(tx_hash) {

View file

@ -9,7 +9,7 @@ use monero_serai::{
}; };
use cuprate_database::{ use cuprate_database::{
RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw}, DbResult, RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw},
}; };
use cuprate_helper::cast::usize_to_u64; use cuprate_helper::cast::usize_to_u64;
use cuprate_helper::{ use cuprate_helper::{
@ -44,12 +44,9 @@ use crate::{
/// # Panics /// # Panics
/// This function will panic if: /// This function will panic if:
/// - `block.height > u32::MAX` (not normally possible) /// - `block.height > u32::MAX` (not normally possible)
/// - `block.height` is not != [`chain_height`] /// - `block.height` is != [`chain_height`]
// no inline, too big. // no inline, too big.
pub fn add_block( pub fn add_block(block: &VerifiedBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> {
block: &VerifiedBlockInformation,
tables: &mut impl TablesMut,
) -> Result<(), RuntimeError> {
//------------------------------------------------------ Check preconditions first //------------------------------------------------------ Check preconditions first
// Cast height to `u32` for storage (handled at top of function). // Cast height to `u32` for storage (handled at top of function).
@ -155,7 +152,7 @@ pub fn add_block(
pub fn pop_block( pub fn pop_block(
move_to_alt_chain: Option<ChainId>, move_to_alt_chain: Option<ChainId>,
tables: &mut impl TablesMut, tables: &mut impl TablesMut,
) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> { ) -> DbResult<(BlockHeight, BlockHash, Block)> {
//------------------------------------------------------ Block Info //------------------------------------------------------ Block Info
// Remove block data from tables. // Remove block data from tables.
let (block_height, block_info) = tables.block_infos_mut().pop_last()?; let (block_height, block_info) = tables.block_infos_mut().pop_last()?;
@ -197,7 +194,7 @@ pub fn pop_block(
tx, tx,
}) })
}) })
.collect::<Result<Vec<VerifiedTransactionInformation>, RuntimeError>>()?; .collect::<DbResult<Vec<VerifiedTransactionInformation>>>()?;
alt_block::add_alt_block( alt_block::add_alt_block(
&AltBlockInformation { &AltBlockInformation {
@ -226,6 +223,7 @@ pub fn pop_block(
Ok((block_height, block_info.block_hash, block)) Ok((block_height, block_info.block_hash, block))
} }
//---------------------------------------------------------------------------------------------------- `get_block_blob_with_tx_indexes` //---------------------------------------------------------------------------------------------------- `get_block_blob_with_tx_indexes`
/// Retrieve a block's raw bytes, the index of the miner transaction and the number of non miner-txs in the block. /// Retrieve a block's raw bytes, the index of the miner transaction and the number of non miner-txs in the block.
/// ///
@ -234,11 +232,8 @@ pub fn get_block_blob_with_tx_indexes(
block_height: &BlockHeight, block_height: &BlockHeight,
tables: &impl Tables, tables: &impl Tables,
) -> Result<(Vec<u8>, u64, usize), RuntimeError> { ) -> Result<(Vec<u8>, u64, usize), RuntimeError> {
use monero_serai::io::write_varint; let miner_tx_idx = tables.block_infos().get(block_height)?.mining_tx_index;
let block_info = tables.block_infos().get(block_height)?;
let miner_tx_idx = block_info.mining_tx_index;
let block_txs = tables.block_txs_hashes().get(block_height)?.0; let block_txs = tables.block_txs_hashes().get(block_height)?.0;
let numb_txs = block_txs.len(); let numb_txs = block_txs.len();
@ -250,10 +245,10 @@ pub fn get_block_blob_with_tx_indexes(
block.append(&mut miner_tx_blob); block.append(&mut miner_tx_blob);
// Add the blocks tx hashes. // Add the blocks tx hashes.
write_varint(&block_txs.len(), &mut block) monero_serai::io::write_varint(&block_txs.len(), &mut block)
.expect("The number of txs per block will not exceed u64::MAX"); .expect("The number of txs per block will not exceed u64::MAX");
let block_txs_bytes = bytemuck::cast_slice(&block_txs); let block_txs_bytes = bytemuck::must_cast_slice(&block_txs);
block.extend_from_slice(block_txs_bytes); block.extend_from_slice(block_txs_bytes);
Ok((block, miner_tx_idx, numb_txs)) Ok((block, miner_tx_idx, numb_txs))
@ -275,7 +270,7 @@ pub fn get_block_complete_entry(
let tx_blobs = tables let tx_blobs = tables
.tx_blobs_iter() .tx_blobs_iter()
.get_range(first_tx_idx..=usize_to_u64(numb_non_miner_txs))? .get_range(first_tx_idx..(usize_to_u64(numb_non_miner_txs) + first_tx_idx))?
.map(|tx_blob| Ok(Bytes::from(tx_blob?.0))) .map(|tx_blob| Ok(Bytes::from(tx_blob?.0)))
.collect::<Result<_, RuntimeError>>()?; .collect::<Result<_, RuntimeError>>()?;
@ -301,7 +296,7 @@ pub fn get_block_complete_entry(
pub fn get_block_extended_header( pub fn get_block_extended_header(
block_hash: &BlockHash, block_hash: &BlockHash,
tables: &impl Tables, tables: &impl Tables,
) -> Result<ExtendedBlockHeader, RuntimeError> { ) -> DbResult<ExtendedBlockHeader> {
get_block_extended_header_from_height(&tables.block_heights().get(block_hash)?, tables) get_block_extended_header_from_height(&tables.block_heights().get(block_hash)?, tables)
} }
@ -315,7 +310,7 @@ pub fn get_block_extended_header(
pub fn get_block_extended_header_from_height( pub fn get_block_extended_header_from_height(
block_height: &BlockHeight, block_height: &BlockHeight,
tables: &impl Tables, tables: &impl Tables,
) -> Result<ExtendedBlockHeader, RuntimeError> { ) -> DbResult<ExtendedBlockHeader> {
let block_info = tables.block_infos().get(block_height)?; let block_info = tables.block_infos().get(block_height)?;
let block_header_blob = tables.block_header_blobs().get(block_height)?.0; let block_header_blob = tables.block_header_blobs().get(block_height)?.0;
let block_header = BlockHeader::read(&mut block_header_blob.as_slice())?; let block_header = BlockHeader::read(&mut block_header_blob.as_slice())?;
@ -341,7 +336,7 @@ pub fn get_block_extended_header_from_height(
#[inline] #[inline]
pub fn get_block_extended_header_top( pub fn get_block_extended_header_top(
tables: &impl Tables, tables: &impl Tables,
) -> Result<(ExtendedBlockHeader, BlockHeight), RuntimeError> { ) -> DbResult<(ExtendedBlockHeader, BlockHeight)> {
let height = chain_height(tables.block_heights())?.saturating_sub(1); let height = chain_height(tables.block_heights())?.saturating_sub(1);
let header = get_block_extended_header_from_height(&height, tables)?; let header = get_block_extended_header_from_height(&height, tables)?;
Ok((header, height)) Ok((header, height))
@ -354,7 +349,7 @@ pub fn get_block_extended_header_top(
pub fn get_block_info( pub fn get_block_info(
block_height: &BlockHeight, block_height: &BlockHeight,
table_block_infos: &impl DatabaseRo<BlockInfos>, table_block_infos: &impl DatabaseRo<BlockInfos>,
) -> Result<BlockInfo, RuntimeError> { ) -> DbResult<BlockInfo> {
table_block_infos.get(block_height) table_block_infos.get(block_height)
} }
@ -364,7 +359,7 @@ pub fn get_block_info(
pub fn get_block_height( pub fn get_block_height(
block_hash: &BlockHash, block_hash: &BlockHash,
table_block_heights: &impl DatabaseRo<BlockHeights>, table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<BlockHeight, RuntimeError> { ) -> DbResult<BlockHeight> {
table_block_heights.get(block_hash) table_block_heights.get(block_hash)
} }
@ -379,7 +374,7 @@ pub fn get_block_height(
pub fn block_exists( pub fn block_exists(
block_hash: &BlockHash, block_hash: &BlockHash,
table_block_heights: &impl DatabaseRo<BlockHeights>, table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<bool, RuntimeError> { ) -> DbResult<bool> {
table_block_heights.contains(block_hash) table_block_heights.contains(block_hash)
} }

View file

@ -1,7 +1,7 @@
//! Blockchain functions - chain height, generated coins, etc. //! Blockchain functions - chain height, generated coins, etc.
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use cuprate_database::{DatabaseRo, RuntimeError}; use cuprate_database::{DatabaseRo, DbResult, RuntimeError};
use crate::{ use crate::{
ops::{block::block_exists, macros::doc_error}, ops::{block::block_exists, macros::doc_error},
@ -22,9 +22,7 @@ use crate::{
/// So the height of a new block would be `chain_height()`. /// So the height of a new block would be `chain_height()`.
#[doc = doc_error!()] #[doc = doc_error!()]
#[inline] #[inline]
pub fn chain_height( pub fn chain_height(table_block_heights: &impl DatabaseRo<BlockHeights>) -> DbResult<BlockHeight> {
table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<BlockHeight, RuntimeError> {
#[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")]
table_block_heights.len().map(|height| height as usize) table_block_heights.len().map(|height| height as usize)
} }
@ -45,7 +43,7 @@ pub fn chain_height(
#[inline] #[inline]
pub fn top_block_height( pub fn top_block_height(
table_block_heights: &impl DatabaseRo<BlockHeights>, table_block_heights: &impl DatabaseRo<BlockHeights>,
) -> Result<BlockHeight, RuntimeError> { ) -> DbResult<BlockHeight> {
match table_block_heights.len()? { match table_block_heights.len()? {
0 => Err(RuntimeError::KeyNotFound), 0 => Err(RuntimeError::KeyNotFound),
#[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")]
@ -70,7 +68,7 @@ pub fn top_block_height(
pub fn cumulative_generated_coins( pub fn cumulative_generated_coins(
block_height: &BlockHeight, block_height: &BlockHeight,
table_block_infos: &impl DatabaseRo<BlockInfos>, table_block_infos: &impl DatabaseRo<BlockInfos>,
) -> Result<u64, RuntimeError> { ) -> DbResult<u64> {
match table_block_infos.get(block_height) { match table_block_infos.get(block_height) {
Ok(block_info) => Ok(block_info.cumulative_generated_coins), Ok(block_info) => Ok(block_info.cumulative_generated_coins),
Err(RuntimeError::KeyNotFound) if block_height == &0 => Ok(0), Err(RuntimeError::KeyNotFound) if block_height == &0 => Ok(0),
@ -84,7 +82,7 @@ pub fn cumulative_generated_coins(
/// if the wrong order is specified the return value is meaningless. /// if the wrong order is specified the return value is meaningless.
/// ///
/// For chronologically ordered chains this will return the index of the first unknown, for reverse /// For chronologically ordered chains this will return the index of the first unknown, for reverse
/// chronologically ordered chains this will return the index of the fist known. /// chronologically ordered chains this will return the index of the first known.
/// ///
/// If all blocks are known for chronologically ordered chains or unknown for reverse chronologically /// If all blocks are known for chronologically ordered chains or unknown for reverse chronologically
/// ordered chains then the length of the chain will be returned. /// ordered chains then the length of the chain will be returned.
@ -98,17 +96,16 @@ pub fn find_split_point(
let mut err = None; let mut err = None;
// Do a binary search to find the first unknown/known block in the batch. // Do a binary search to find the first unknown/known block in the batch.
let idx = let idx = block_ids.partition_point(|block_id| {
block_ids.partition_point( match block_exists(block_id, table_block_heights) {
|block_id| match block_exists(block_id, table_block_heights) { Ok(exists) => exists == chronological_order,
Ok(exists) => exists & chronological_order,
Err(e) => { Err(e) => {
err.get_or_insert(e); err.get_or_insert(e);
// if this happens the search is scrapped, just return `false` back. // if this happens the search is scrapped, just return `false` back.
false false
} }
}, }
); });
if let Some(e) = err { if let Some(e) = err {
return Err(e); return Err(e);

View file

@ -1,7 +1,7 @@
//! Key image functions. //! Key image functions.
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; use cuprate_database::{DatabaseRo, DatabaseRw, DbResult};
use crate::{ use crate::{
ops::macros::{doc_add_block_inner_invariant, doc_error}, ops::macros::{doc_add_block_inner_invariant, doc_error},
@ -17,7 +17,7 @@ use crate::{
pub fn add_key_image( pub fn add_key_image(
key_image: &KeyImage, key_image: &KeyImage,
table_key_images: &mut impl DatabaseRw<KeyImages>, table_key_images: &mut impl DatabaseRw<KeyImages>,
) -> Result<(), RuntimeError> { ) -> DbResult<()> {
table_key_images.put(key_image, &()) table_key_images.put(key_image, &())
} }
@ -28,7 +28,7 @@ pub fn add_key_image(
pub fn remove_key_image( pub fn remove_key_image(
key_image: &KeyImage, key_image: &KeyImage,
table_key_images: &mut impl DatabaseRw<KeyImages>, table_key_images: &mut impl DatabaseRw<KeyImages>,
) -> Result<(), RuntimeError> { ) -> DbResult<()> {
table_key_images.delete(key_image) table_key_images.delete(key_image)
} }
@ -38,7 +38,7 @@ pub fn remove_key_image(
pub fn key_image_exists( pub fn key_image_exists(
key_image: &KeyImage, key_image: &KeyImage,
table_key_images: &impl DatabaseRo<KeyImages>, table_key_images: &impl DatabaseRo<KeyImages>,
) -> Result<bool, RuntimeError> { ) -> DbResult<bool> {
table_key_images.contains(key_image) table_key_images.contains(key_image)
} }

View file

@ -8,7 +8,7 @@
macro_rules! doc_error { macro_rules! doc_error {
() => { () => {
r#"# Errors r#"# Errors
This function returns [`RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."# This function returns [`cuprate_database::RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."#
}; };
} }
pub(super) use doc_error; pub(super) use doc_error;

View file

@ -5,7 +5,7 @@ use curve25519_dalek::edwards::CompressedEdwardsY;
use monero_serai::transaction::Timelock; use monero_serai::transaction::Timelock;
use cuprate_database::{ use cuprate_database::{
RuntimeError, {DatabaseRo, DatabaseRw}, DbResult, RuntimeError, {DatabaseRo, DatabaseRw},
}; };
use cuprate_helper::crypto::compute_zero_commitment; use cuprate_helper::crypto::compute_zero_commitment;
use cuprate_helper::map::u64_to_timelock; use cuprate_helper::map::u64_to_timelock;
@ -30,7 +30,7 @@ pub fn add_output(
amount: Amount, amount: Amount,
output: &Output, output: &Output,
tables: &mut impl TablesMut, tables: &mut impl TablesMut,
) -> Result<PreRctOutputId, RuntimeError> { ) -> DbResult<PreRctOutputId> {
// FIXME: this would be much better expressed with a // FIXME: this would be much better expressed with a
// `btree_map::Entry`-like API, fix `trait DatabaseRw`. // `btree_map::Entry`-like API, fix `trait DatabaseRw`.
let num_outputs = match tables.num_outputs().get(&amount) { let num_outputs = match tables.num_outputs().get(&amount) {
@ -61,7 +61,7 @@ pub fn add_output(
pub fn remove_output( pub fn remove_output(
pre_rct_output_id: &PreRctOutputId, pre_rct_output_id: &PreRctOutputId,
tables: &mut impl TablesMut, tables: &mut impl TablesMut,
) -> Result<(), RuntimeError> { ) -> DbResult<()> {
// Decrement the amount index by 1, or delete the entry out-right. // Decrement the amount index by 1, or delete the entry out-right.
// FIXME: this would be much better expressed with a // FIXME: this would be much better expressed with a
// `btree_map::Entry`-like API, fix `trait DatabaseRw`. // `btree_map::Entry`-like API, fix `trait DatabaseRw`.
@ -86,7 +86,7 @@ pub fn remove_output(
pub fn get_output( pub fn get_output(
pre_rct_output_id: &PreRctOutputId, pre_rct_output_id: &PreRctOutputId,
table_outputs: &impl DatabaseRo<Outputs>, table_outputs: &impl DatabaseRo<Outputs>,
) -> Result<Output, RuntimeError> { ) -> DbResult<Output> {
table_outputs.get(pre_rct_output_id) table_outputs.get(pre_rct_output_id)
} }
@ -95,7 +95,7 @@ pub fn get_output(
/// This returns the amount of pre-RCT outputs currently stored. /// This returns the amount of pre-RCT outputs currently stored.
#[doc = doc_error!()] #[doc = doc_error!()]
#[inline] #[inline]
pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> Result<u64, RuntimeError> { pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> DbResult<u64> {
table_outputs.len() table_outputs.len()
} }
@ -110,7 +110,7 @@ pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> Result<u64,
pub fn add_rct_output( pub fn add_rct_output(
rct_output: &RctOutput, rct_output: &RctOutput,
table_rct_outputs: &mut impl DatabaseRw<RctOutputs>, table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
) -> Result<AmountIndex, RuntimeError> { ) -> DbResult<AmountIndex> {
let amount_index = get_rct_num_outputs(table_rct_outputs)?; let amount_index = get_rct_num_outputs(table_rct_outputs)?;
table_rct_outputs.put(&amount_index, rct_output)?; table_rct_outputs.put(&amount_index, rct_output)?;
Ok(amount_index) Ok(amount_index)
@ -123,7 +123,7 @@ pub fn add_rct_output(
pub fn remove_rct_output( pub fn remove_rct_output(
amount_index: &AmountIndex, amount_index: &AmountIndex,
table_rct_outputs: &mut impl DatabaseRw<RctOutputs>, table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
) -> Result<(), RuntimeError> { ) -> DbResult<()> {
table_rct_outputs.delete(amount_index) table_rct_outputs.delete(amount_index)
} }
@ -133,7 +133,7 @@ pub fn remove_rct_output(
pub fn get_rct_output( pub fn get_rct_output(
amount_index: &AmountIndex, amount_index: &AmountIndex,
table_rct_outputs: &impl DatabaseRo<RctOutputs>, table_rct_outputs: &impl DatabaseRo<RctOutputs>,
) -> Result<RctOutput, RuntimeError> { ) -> DbResult<RctOutput> {
table_rct_outputs.get(amount_index) table_rct_outputs.get(amount_index)
} }
@ -142,9 +142,7 @@ pub fn get_rct_output(
/// This returns the amount of RCT outputs currently stored. /// This returns the amount of RCT outputs currently stored.
#[doc = doc_error!()] #[doc = doc_error!()]
#[inline] #[inline]
pub fn get_rct_num_outputs( pub fn get_rct_num_outputs(table_rct_outputs: &impl DatabaseRo<RctOutputs>) -> DbResult<u64> {
table_rct_outputs: &impl DatabaseRo<RctOutputs>,
) -> Result<u64, RuntimeError> {
table_rct_outputs.len() table_rct_outputs.len()
} }
@ -155,7 +153,7 @@ pub fn output_to_output_on_chain(
output: &Output, output: &Output,
amount: Amount, amount: Amount,
table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>, table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
) -> Result<OutputOnChain, RuntimeError> { ) -> DbResult<OutputOnChain> {
let commitment = compute_zero_commitment(amount); let commitment = compute_zero_commitment(amount);
let time_lock = if output let time_lock = if output
@ -191,7 +189,7 @@ pub fn output_to_output_on_chain(
pub fn rct_output_to_output_on_chain( pub fn rct_output_to_output_on_chain(
rct_output: &RctOutput, rct_output: &RctOutput,
table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>, table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
) -> Result<OutputOnChain, RuntimeError> { ) -> DbResult<OutputOnChain> {
// INVARIANT: Commitments stored are valid when stored by the database. // INVARIANT: Commitments stored are valid when stored by the database.
let commitment = CompressedEdwardsY::from_slice(&rct_output.commitment) let commitment = CompressedEdwardsY::from_slice(&rct_output.commitment)
.unwrap() .unwrap()
@ -223,10 +221,7 @@ pub fn rct_output_to_output_on_chain(
/// ///
/// Note that this still support RCT outputs, in that case, [`PreRctOutputId::amount`] should be `0`. /// Note that this still support RCT outputs, in that case, [`PreRctOutputId::amount`] should be `0`.
#[doc = doc_error!()] #[doc = doc_error!()]
pub fn id_to_output_on_chain( pub fn id_to_output_on_chain(id: &PreRctOutputId, tables: &impl Tables) -> DbResult<OutputOnChain> {
id: &PreRctOutputId,
tables: &impl Tables,
) -> Result<OutputOnChain, RuntimeError> {
// v2 transactions. // v2 transactions.
if id.amount == 0 { if id.amount == 0 {
let rct_output = get_rct_output(&id.amount_index, tables.rct_outputs())?; let rct_output = get_rct_output(&id.amount_index, tables.rct_outputs())?;

View file

@ -3,10 +3,9 @@
//! SOMEDAY: the database `properties` table is not yet implemented. //! SOMEDAY: the database `properties` table is not yet implemented.
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use cuprate_database::DbResult;
use cuprate_pruning::PruningSeed; use cuprate_pruning::PruningSeed;
use cuprate_database::RuntimeError;
use crate::ops::macros::doc_error; use crate::ops::macros::doc_error;
//---------------------------------------------------------------------------------------------------- Free Functions //---------------------------------------------------------------------------------------------------- Free Functions
@ -20,7 +19,7 @@ use crate::ops::macros::doc_error;
/// // SOMEDAY /// // SOMEDAY
/// ``` /// ```
#[inline] #[inline]
pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError> { pub const fn get_blockchain_pruning_seed() -> DbResult<PruningSeed> {
// SOMEDAY: impl pruning. // SOMEDAY: impl pruning.
// We need a DB properties table. // We need a DB properties table.
Ok(PruningSeed::NotPruned) Ok(PruningSeed::NotPruned)
@ -36,7 +35,7 @@ pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError>
/// // SOMEDAY /// // SOMEDAY
/// ``` /// ```
#[inline] #[inline]
pub const fn db_version() -> Result<u64, RuntimeError> { pub const fn db_version() -> DbResult<u64> {
// SOMEDAY: We need a DB properties table. // SOMEDAY: We need a DB properties table.
Ok(crate::constants::DATABASE_VERSION) Ok(crate::constants::DATABASE_VERSION)
} }

View file

@ -4,7 +4,7 @@
use bytemuck::TransparentWrapper; use bytemuck::TransparentWrapper;
use monero_serai::transaction::{Input, Timelock, Transaction}; use monero_serai::transaction::{Input, Timelock, Transaction};
use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec};
use cuprate_helper::crypto::compute_zero_commitment; use cuprate_helper::crypto::compute_zero_commitment;
use crate::{ use crate::{
@ -52,7 +52,7 @@ pub fn add_tx(
tx_hash: &TxHash, tx_hash: &TxHash,
block_height: &BlockHeight, block_height: &BlockHeight,
tables: &mut impl TablesMut, tables: &mut impl TablesMut,
) -> Result<TxId, RuntimeError> { ) -> DbResult<TxId> {
let tx_id = get_num_tx(tables.tx_ids_mut())?; let tx_id = get_num_tx(tables.tx_ids_mut())?;
//------------------------------------------------------ Transaction data //------------------------------------------------------ Transaction data
@ -129,7 +129,7 @@ pub fn add_tx(
)? )?
.amount_index) .amount_index)
}) })
.collect::<Result<Vec<_>, RuntimeError>>()?, .collect::<DbResult<Vec<_>>>()?,
Transaction::V2 { prefix, proofs } => prefix Transaction::V2 { prefix, proofs } => prefix
.outputs .outputs
.iter() .iter()
@ -186,10 +186,7 @@ pub fn add_tx(
/// ///
#[doc = doc_error!()] #[doc = doc_error!()]
#[inline] #[inline]
pub fn remove_tx( pub fn remove_tx(tx_hash: &TxHash, tables: &mut impl TablesMut) -> DbResult<(TxId, Transaction)> {
tx_hash: &TxHash,
tables: &mut impl TablesMut,
) -> Result<(TxId, Transaction), RuntimeError> {
//------------------------------------------------------ Transaction data //------------------------------------------------------ Transaction data
let tx_id = tables.tx_ids_mut().take(tx_hash)?; let tx_id = tables.tx_ids_mut().take(tx_hash)?;
let tx_blob = tables.tx_blobs_mut().take(&tx_id)?; let tx_blob = tables.tx_blobs_mut().take(&tx_id)?;
@ -267,7 +264,7 @@ pub fn get_tx(
tx_hash: &TxHash, tx_hash: &TxHash,
table_tx_ids: &impl DatabaseRo<TxIds>, table_tx_ids: &impl DatabaseRo<TxIds>,
table_tx_blobs: &impl DatabaseRo<TxBlobs>, table_tx_blobs: &impl DatabaseRo<TxBlobs>,
) -> Result<Transaction, RuntimeError> { ) -> DbResult<Transaction> {
get_tx_from_id(&table_tx_ids.get(tx_hash)?, table_tx_blobs) get_tx_from_id(&table_tx_ids.get(tx_hash)?, table_tx_blobs)
} }
@ -277,7 +274,7 @@ pub fn get_tx(
pub fn get_tx_from_id( pub fn get_tx_from_id(
tx_id: &TxId, tx_id: &TxId,
table_tx_blobs: &impl DatabaseRo<TxBlobs>, table_tx_blobs: &impl DatabaseRo<TxBlobs>,
) -> Result<Transaction, RuntimeError> { ) -> DbResult<Transaction> {
let tx_blob = table_tx_blobs.get(tx_id)?.0; let tx_blob = table_tx_blobs.get(tx_id)?.0;
Ok(Transaction::read(&mut tx_blob.as_slice())?) Ok(Transaction::read(&mut tx_blob.as_slice())?)
} }
@ -294,7 +291,7 @@ pub fn get_tx_from_id(
/// - etc /// - etc
#[doc = doc_error!()] #[doc = doc_error!()]
#[inline] #[inline]
pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> Result<u64, RuntimeError> { pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> DbResult<u64> {
table_tx_ids.len() table_tx_ids.len()
} }
@ -304,10 +301,7 @@ pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> Result<u64, RuntimeE
/// Returns `true` if it does, else `false`. /// Returns `true` if it does, else `false`.
#[doc = doc_error!()] #[doc = doc_error!()]
#[inline] #[inline]
pub fn tx_exists( pub fn tx_exists(tx_hash: &TxHash, table_tx_ids: &impl DatabaseRo<TxIds>) -> DbResult<bool> {
tx_hash: &TxHash,
table_tx_ids: &impl DatabaseRo<TxIds>,
) -> Result<bool, RuntimeError> {
table_tx_ids.contains(tx_hash) table_tx_ids.contains(tx_hash)
} }

View file

@ -22,12 +22,14 @@ use rayon::{
}; };
use thread_local::ThreadLocal; use thread_local::ThreadLocal;
use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_database::{
ConcreteEnv, DatabaseIter, DatabaseRo, DbResult, Env, EnvInner, RuntimeError,
};
use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads};
use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_helper::map::combine_low_high_bits_to_u128;
use cuprate_types::{ use cuprate_types::{
blockchain::{BlockchainReadRequest, BlockchainResponse}, blockchain::{BlockchainReadRequest, BlockchainResponse},
Chain, ChainId, ExtendedBlockHeader, MissingTxsInBlock, OutputHistogramInput, OutputOnChain, Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, TxsInBlock,
}; };
use crate::{ use crate::{
@ -118,10 +120,10 @@ fn map_request(
R::CompactChainHistory => compact_chain_history(env), R::CompactChainHistory => compact_chain_history(env),
R::NextChainEntry(block_hashes, amount) => next_chain_entry(env, &block_hashes, amount), R::NextChainEntry(block_hashes, amount) => next_chain_entry(env, &block_hashes, amount),
R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids),
R::MissingTxsInBlock { R::TxsInBlock {
block_hash, block_hash,
tx_indexes, tx_indexes,
} => missing_txs_in_block(env, block_hash, tx_indexes), } => txs_in_block(env, block_hash, tx_indexes),
R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id), R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id),
R::Block { height } => block(env, height), R::Block { height } => block(env, height),
R::BlockByHash(hash) => block_by_hash(env, hash), R::BlockByHash(hash) => block_by_hash(env, hash),
@ -224,7 +226,7 @@ fn block_complete_entries(env: &ConcreteEnv, block_hashes: Vec<BlockHash>) -> Re
res => res.map(Either::Right), res => res.map(Either::Right),
} }
}) })
.collect::<Result<_, _>>()?; .collect::<DbResult<_>>()?;
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
@ -345,7 +347,7 @@ fn block_extended_header_in_range(
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
get_block_extended_header_from_height(&block_height, tables) get_block_extended_header_from_height(&block_height, tables)
}) })
.collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?, .collect::<DbResult<Vec<ExtendedBlockHeader>>>()?,
Chain::Alt(chain_id) => { Chain::Alt(chain_id) => {
let ranges = { let ranges = {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
@ -375,7 +377,7 @@ fn block_extended_header_in_range(
} }
}) })
}) })
.collect::<Result<Vec<_>, _>>()? .collect::<DbResult<Vec<_>>>()?
} }
}; };
@ -421,7 +423,7 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) ->
// The 2nd mapping function. // The 2nd mapping function.
// This is pulled out from the below `map()` for readability. // This is pulled out from the below `map()` for readability.
let inner_map = |amount, amount_index| -> Result<(AmountIndex, OutputOnChain), RuntimeError> { let inner_map = |amount, amount_index| -> DbResult<(AmountIndex, OutputOnChain)> {
let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
@ -444,10 +446,10 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) ->
amount_index_set amount_index_set
.into_par_iter() .into_par_iter()
.map(|amount_index| inner_map(amount, amount_index)) .map(|amount_index| inner_map(amount, amount_index))
.collect::<Result<HashMap<AmountIndex, OutputOnChain>, RuntimeError>>()?, .collect::<DbResult<HashMap<AmountIndex, OutputOnChain>>>()?,
)) ))
}) })
.collect::<Result<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>, RuntimeError>>()?; .collect::<DbResult<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>>>()?;
Ok(BlockchainResponse::Outputs(map)) Ok(BlockchainResponse::Outputs(map))
} }
@ -496,7 +498,7 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> Respon
} }
} }
}) })
.collect::<Result<HashMap<Amount, usize>, RuntimeError>>()?; .collect::<DbResult<HashMap<Amount, usize>>>()?;
Ok(BlockchainResponse::NumberOutputsWithAmount(map)) Ok(BlockchainResponse::NumberOutputsWithAmount(map))
} }
@ -562,7 +564,7 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult {
.map(compact_history_index_to_height_offset::<INITIAL_BLOCKS>) .map(compact_history_index_to_height_offset::<INITIAL_BLOCKS>)
.map_while(|i| top_block_height.checked_sub(i)) .map_while(|i| top_block_height.checked_sub(i))
.map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash)) .map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash))
.collect::<Result<Vec<_>, RuntimeError>>()?; .collect::<DbResult<Vec<_>>>()?;
if compact_history_genesis_not_included::<INITIAL_BLOCKS>(top_block_height) { if compact_history_genesis_not_included::<INITIAL_BLOCKS>(top_block_height) {
block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash); block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash);
@ -598,7 +600,7 @@ fn next_chain_entry(
// This will happen if we have a different genesis block. // This will happen if we have a different genesis block.
if idx == block_ids.len() { if idx == block_ids.len() {
return Ok(BlockchainResponse::NextChainEntry { return Ok(BlockchainResponse::NextChainEntry {
start_height: 0, start_height: None,
chain_height: 0, chain_height: 0,
block_ids: vec![], block_ids: vec![],
block_weights: vec![], block_weights: vec![],
@ -621,7 +623,7 @@ fn next_chain_entry(
Ok((block_info.block_hash, block_info.weight)) Ok((block_info.block_hash, block_info.weight))
}) })
.collect::<Result<(Vec<_>, Vec<_>), RuntimeError>>()?; .collect::<DbResult<(Vec<_>, Vec<_>)>>()?;
let top_block_info = table_block_infos.get(&(chain_height - 1))?; let top_block_info = table_block_infos.get(&(chain_height - 1))?;
@ -632,7 +634,7 @@ fn next_chain_entry(
}; };
Ok(BlockchainResponse::NextChainEntry { Ok(BlockchainResponse::NextChainEntry {
start_height: first_known_height, start_height: std::num::NonZero::new(first_known_height),
chain_height, chain_height,
block_ids, block_ids,
block_weights, block_weights,
@ -669,12 +671,8 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
}) })
} }
/// [`BlockchainReadRequest::MissingTxsInBlock`] /// [`BlockchainReadRequest::TxsInBlock`]
fn missing_txs_in_block( fn txs_in_block(env: &ConcreteEnv, block_hash: [u8; 32], missing_txs: Vec<u64>) -> ResponseResult {
env: &ConcreteEnv,
block_hash: [u8; 32],
missing_txs: Vec<u64>,
) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required. // Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner(); let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?; let tx_ro = env_inner.tx_ro()?;
@ -686,17 +684,18 @@ fn missing_txs_in_block(
let first_tx_index = miner_tx_index + 1; let first_tx_index = miner_tx_index + 1;
if numb_txs < missing_txs.len() { if numb_txs < missing_txs.len() {
return Ok(BlockchainResponse::MissingTxsInBlock(None)); return Ok(BlockchainResponse::TxsInBlock(None));
} }
let txs = missing_txs let txs = missing_txs
.into_iter() .into_iter()
.map(|index_offset| Ok(tables.tx_blobs().get(&(first_tx_index + index_offset))?.0)) .map(|index_offset| Ok(tables.tx_blobs().get(&(first_tx_index + index_offset))?.0))
.collect::<Result<_, RuntimeError>>()?; .collect::<DbResult<_>>()?;
Ok(BlockchainResponse::MissingTxsInBlock(Some( Ok(BlockchainResponse::TxsInBlock(Some(TxsInBlock {
MissingTxsInBlock { block, txs }, block,
))) txs,
})))
} }
/// [`BlockchainReadRequest::AltBlocksInChain`] /// [`BlockchainReadRequest::AltBlocksInChain`]
@ -736,7 +735,7 @@ fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult {
) )
}) })
}) })
.collect::<Result<_, _>>()?; .collect::<DbResult<_>>()?;
Ok(BlockchainResponse::AltBlocksInChain(blocks)) Ok(BlockchainResponse::AltBlocksInChain(blocks))
} }

View file

@ -1,7 +1,7 @@
//! Database service type aliases. //! Database service type aliases.
//---------------------------------------------------------------------------------------------------- Use //---------------------------------------------------------------------------------------------------- Use
use cuprate_database::RuntimeError; use cuprate_database::DbResult;
use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle}; use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle};
use cuprate_types::blockchain::{ use cuprate_types::blockchain::{
BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest, BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest,
@ -11,7 +11,7 @@ use cuprate_types::blockchain::{
/// The actual type of the response. /// The actual type of the response.
/// ///
/// Either our [`BlockchainResponse`], or a database error occurred. /// Either our [`BlockchainResponse`], or a database error occurred.
pub(super) type ResponseResult = Result<BlockchainResponse, RuntimeError>; pub(super) type ResponseResult = DbResult<BlockchainResponse>;
/// The blockchain database write service. /// The blockchain database write service.
pub type BlockchainWriteHandle = DatabaseWriteHandle<BlockchainWriteRequest, BlockchainResponse>; pub type BlockchainWriteHandle = DatabaseWriteHandle<BlockchainWriteRequest, BlockchainResponse>;

View file

@ -2,7 +2,7 @@
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::sync::Arc; use std::sync::Arc;
use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError, TxRw}; use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, TxRw};
use cuprate_database_service::DatabaseWriteHandle; use cuprate_database_service::DatabaseWriteHandle;
use cuprate_types::{ use cuprate_types::{
blockchain::{BlockchainResponse, BlockchainWriteRequest}, blockchain::{BlockchainResponse, BlockchainWriteRequest},
@ -36,7 +36,7 @@ pub fn init_write_service(env: Arc<ConcreteEnv>) -> BlockchainWriteHandle {
fn handle_blockchain_request( fn handle_blockchain_request(
env: &ConcreteEnv, env: &ConcreteEnv,
req: &BlockchainWriteRequest, req: &BlockchainWriteRequest,
) -> Result<BlockchainResponse, RuntimeError> { ) -> DbResult<BlockchainResponse> {
match req { match req {
BlockchainWriteRequest::WriteBlock(block) => write_block(env, block), BlockchainWriteRequest::WriteBlock(block) => write_block(env, block),
BlockchainWriteRequest::WriteAltBlock(alt_block) => write_alt_block(env, alt_block), BlockchainWriteRequest::WriteAltBlock(alt_block) => write_alt_block(env, alt_block),

View file

@ -6,7 +6,7 @@ use std::{cell::RefCell, ops::RangeBounds};
use crate::{ use crate::{
backend::heed::types::HeedDb, backend::heed::types::HeedDb,
database::{DatabaseIter, DatabaseRo, DatabaseRw}, database::{DatabaseIter, DatabaseRo, DatabaseRw},
error::RuntimeError, error::{DbResult, RuntimeError},
table::Table, table::Table,
}; };
@ -54,16 +54,13 @@ fn get<T: Table>(
db: &HeedDb<T::Key, T::Value>, db: &HeedDb<T::Key, T::Value>,
tx_ro: &heed::RoTxn<'_>, tx_ro: &heed::RoTxn<'_>,
key: &T::Key, key: &T::Key,
) -> Result<T::Value, RuntimeError> { ) -> DbResult<T::Value> {
db.get(tx_ro, key)?.ok_or(RuntimeError::KeyNotFound) db.get(tx_ro, key)?.ok_or(RuntimeError::KeyNotFound)
} }
/// Shared [`DatabaseRo::len()`]. /// Shared [`DatabaseRo::len()`].
#[inline] #[inline]
fn len<T: Table>( fn len<T: Table>(db: &HeedDb<T::Key, T::Value>, tx_ro: &heed::RoTxn<'_>) -> DbResult<u64> {
db: &HeedDb<T::Key, T::Value>,
tx_ro: &heed::RoTxn<'_>,
) -> Result<u64, RuntimeError> {
Ok(db.len(tx_ro)?) Ok(db.len(tx_ro)?)
} }
@ -72,7 +69,7 @@ fn len<T: Table>(
fn first<T: Table>( fn first<T: Table>(
db: &HeedDb<T::Key, T::Value>, db: &HeedDb<T::Key, T::Value>,
tx_ro: &heed::RoTxn<'_>, tx_ro: &heed::RoTxn<'_>,
) -> Result<(T::Key, T::Value), RuntimeError> { ) -> DbResult<(T::Key, T::Value)> {
db.first(tx_ro)?.ok_or(RuntimeError::KeyNotFound) db.first(tx_ro)?.ok_or(RuntimeError::KeyNotFound)
} }
@ -81,16 +78,13 @@ fn first<T: Table>(
fn last<T: Table>( fn last<T: Table>(
db: &HeedDb<T::Key, T::Value>, db: &HeedDb<T::Key, T::Value>,
tx_ro: &heed::RoTxn<'_>, tx_ro: &heed::RoTxn<'_>,
) -> Result<(T::Key, T::Value), RuntimeError> { ) -> DbResult<(T::Key, T::Value)> {
db.last(tx_ro)?.ok_or(RuntimeError::KeyNotFound) db.last(tx_ro)?.ok_or(RuntimeError::KeyNotFound)
} }
/// Shared [`DatabaseRo::is_empty()`]. /// Shared [`DatabaseRo::is_empty()`].
#[inline] #[inline]
fn is_empty<T: Table>( fn is_empty<T: Table>(db: &HeedDb<T::Key, T::Value>, tx_ro: &heed::RoTxn<'_>) -> DbResult<bool> {
db: &HeedDb<T::Key, T::Value>,
tx_ro: &heed::RoTxn<'_>,
) -> Result<bool, RuntimeError> {
Ok(db.is_empty(tx_ro)?) Ok(db.is_empty(tx_ro)?)
} }
@ -100,7 +94,7 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
fn get_range<'a, Range>( fn get_range<'a, Range>(
&'a self, &'a self,
range: Range, range: Range,
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError> ) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
where where
Range: RangeBounds<T::Key> + 'a, Range: RangeBounds<T::Key> + 'a,
{ {
@ -108,24 +102,17 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
} }
#[inline] #[inline]
fn iter( fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_> {
&self,
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>
{
Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?))) Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?)))
} }
#[inline] #[inline]
fn keys( fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_> {
&self,
) -> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError> {
Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.0))) Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.0)))
} }
#[inline] #[inline]
fn values( fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_> {
&self,
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError> {
Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.1))) Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.1)))
} }
} }
@ -134,27 +121,27 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
// SAFETY: `HeedTableRo: !Send` as it holds a reference to `heed::RoTxn: Send + !Sync`. // SAFETY: `HeedTableRo: !Send` as it holds a reference to `heed::RoTxn: Send + !Sync`.
unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> { unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
#[inline] #[inline]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> { fn get(&self, key: &T::Key) -> DbResult<T::Value> {
get::<T>(&self.db, self.tx_ro, key) get::<T>(&self.db, self.tx_ro, key)
} }
#[inline] #[inline]
fn len(&self) -> Result<u64, RuntimeError> { fn len(&self) -> DbResult<u64> {
len::<T>(&self.db, self.tx_ro) len::<T>(&self.db, self.tx_ro)
} }
#[inline] #[inline]
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { fn first(&self) -> DbResult<(T::Key, T::Value)> {
first::<T>(&self.db, self.tx_ro) first::<T>(&self.db, self.tx_ro)
} }
#[inline] #[inline]
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { fn last(&self) -> DbResult<(T::Key, T::Value)> {
last::<T>(&self.db, self.tx_ro) last::<T>(&self.db, self.tx_ro)
} }
#[inline] #[inline]
fn is_empty(&self) -> Result<bool, RuntimeError> { fn is_empty(&self) -> DbResult<bool> {
is_empty::<T>(&self.db, self.tx_ro) is_empty::<T>(&self.db, self.tx_ro)
} }
} }
@ -164,45 +151,45 @@ unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
// `HeedTableRw`'s write transaction is `!Send`. // `HeedTableRw`'s write transaction is `!Send`.
unsafe impl<T: Table> DatabaseRo<T> for HeedTableRw<'_, '_, T> { unsafe impl<T: Table> DatabaseRo<T> for HeedTableRw<'_, '_, T> {
#[inline] #[inline]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> { fn get(&self, key: &T::Key) -> DbResult<T::Value> {
get::<T>(&self.db, &self.tx_rw.borrow(), key) get::<T>(&self.db, &self.tx_rw.borrow(), key)
} }
#[inline] #[inline]
fn len(&self) -> Result<u64, RuntimeError> { fn len(&self) -> DbResult<u64> {
len::<T>(&self.db, &self.tx_rw.borrow()) len::<T>(&self.db, &self.tx_rw.borrow())
} }
#[inline] #[inline]
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { fn first(&self) -> DbResult<(T::Key, T::Value)> {
first::<T>(&self.db, &self.tx_rw.borrow()) first::<T>(&self.db, &self.tx_rw.borrow())
} }
#[inline] #[inline]
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { fn last(&self) -> DbResult<(T::Key, T::Value)> {
last::<T>(&self.db, &self.tx_rw.borrow()) last::<T>(&self.db, &self.tx_rw.borrow())
} }
#[inline] #[inline]
fn is_empty(&self) -> Result<bool, RuntimeError> { fn is_empty(&self) -> DbResult<bool> {
is_empty::<T>(&self.db, &self.tx_rw.borrow()) is_empty::<T>(&self.db, &self.tx_rw.borrow())
} }
} }
impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> { impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
#[inline] #[inline]
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> { fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> {
Ok(self.db.put(&mut self.tx_rw.borrow_mut(), key, value)?) Ok(self.db.put(&mut self.tx_rw.borrow_mut(), key, value)?)
} }
#[inline] #[inline]
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> { fn delete(&mut self, key: &T::Key) -> DbResult<()> {
self.db.delete(&mut self.tx_rw.borrow_mut(), key)?; self.db.delete(&mut self.tx_rw.borrow_mut(), key)?;
Ok(()) Ok(())
} }
#[inline] #[inline]
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> { fn take(&mut self, key: &T::Key) -> DbResult<T::Value> {
// LMDB/heed does not return the value on deletion. // LMDB/heed does not return the value on deletion.
// So, fetch it first - then delete. // So, fetch it first - then delete.
let value = get::<T>(&self.db, &self.tx_rw.borrow(), key)?; let value = get::<T>(&self.db, &self.tx_rw.borrow(), key)?;
@ -216,7 +203,7 @@ impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
} }
#[inline] #[inline]
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> {
let tx_rw = &mut self.tx_rw.borrow_mut(); let tx_rw = &mut self.tx_rw.borrow_mut();
// Get the value first... // Get the value first...
@ -235,7 +222,7 @@ impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
} }
#[inline] #[inline]
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> {
let tx_rw = &mut self.tx_rw.borrow_mut(); let tx_rw = &mut self.tx_rw.borrow_mut();
// Get the value first... // Get the value first...

View file

@ -18,7 +18,7 @@ use crate::{
config::{Config, SyncMode}, config::{Config, SyncMode},
database::{DatabaseIter, DatabaseRo, DatabaseRw}, database::{DatabaseIter, DatabaseRo, DatabaseRw},
env::{Env, EnvInner}, env::{Env, EnvInner},
error::{InitError, RuntimeError}, error::{DbResult, InitError, RuntimeError},
key::{Key, KeyCompare}, key::{Key, KeyCompare},
resize::ResizeAlgorithm, resize::ResizeAlgorithm,
table::Table, table::Table,
@ -203,7 +203,7 @@ impl Env for ConcreteEnv {
&self.config &self.config
} }
fn sync(&self) -> Result<(), RuntimeError> { fn sync(&self) -> DbResult<()> {
Ok(self.env.read().unwrap().force_sync()?) Ok(self.env.read().unwrap().force_sync()?)
} }
@ -253,12 +253,12 @@ where
type Rw<'a> = RefCell<heed::RwTxn<'a>>; type Rw<'a> = RefCell<heed::RwTxn<'a>>;
#[inline] #[inline]
fn tx_ro(&self) -> Result<Self::Ro<'_>, RuntimeError> { fn tx_ro(&self) -> DbResult<Self::Ro<'_>> {
Ok(self.read_txn()?) Ok(self.read_txn()?)
} }
#[inline] #[inline]
fn tx_rw(&self) -> Result<Self::Rw<'_>, RuntimeError> { fn tx_rw(&self) -> DbResult<Self::Rw<'_>> {
Ok(RefCell::new(self.write_txn()?)) Ok(RefCell::new(self.write_txn()?))
} }
@ -266,7 +266,7 @@ where
fn open_db_ro<T: Table>( fn open_db_ro<T: Table>(
&self, &self,
tx_ro: &Self::Ro<'_>, tx_ro: &Self::Ro<'_>,
) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError> { ) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>> {
// Open up a read-only database using our table's const metadata. // Open up a read-only database using our table's const metadata.
// //
// INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`], // INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`],
@ -281,10 +281,7 @@ where
} }
#[inline] #[inline]
fn open_db_rw<T: Table>( fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>> {
&self,
tx_rw: &Self::Rw<'_>,
) -> Result<impl DatabaseRw<T>, RuntimeError> {
// Open up a read/write database using our table's const metadata. // Open up a read/write database using our table's const metadata.
// //
// INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`], // INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`],
@ -296,7 +293,7 @@ where
}) })
} }
fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError> { fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()> {
// Create a database using our: // Create a database using our:
// - [`Table`]'s const metadata. // - [`Table`]'s const metadata.
// - (potentially) our [`Key`] comparison function // - (potentially) our [`Key`] comparison function
@ -328,7 +325,7 @@ where
} }
#[inline] #[inline]
fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError> { fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()> {
let tx_rw = tx_rw.get_mut(); let tx_rw = tx_rw.get_mut();
// Open the table. We don't care about flags or key // Open the table. We don't care about flags or key

View file

@ -4,31 +4,31 @@ use std::cell::RefCell;
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use crate::{ use crate::{
error::RuntimeError, error::DbResult,
transaction::{TxRo, TxRw}, transaction::{TxRo, TxRw},
}; };
//---------------------------------------------------------------------------------------------------- TxRo //---------------------------------------------------------------------------------------------------- TxRo
impl TxRo<'_> for heed::RoTxn<'_> { impl TxRo<'_> for heed::RoTxn<'_> {
fn commit(self) -> Result<(), RuntimeError> { fn commit(self) -> DbResult<()> {
Ok(heed::RoTxn::commit(self)?) Ok(heed::RoTxn::commit(self)?)
} }
} }
//---------------------------------------------------------------------------------------------------- TxRw //---------------------------------------------------------------------------------------------------- TxRw
impl TxRo<'_> for RefCell<heed::RwTxn<'_>> { impl TxRo<'_> for RefCell<heed::RwTxn<'_>> {
fn commit(self) -> Result<(), RuntimeError> { fn commit(self) -> DbResult<()> {
TxRw::commit(self) TxRw::commit(self)
} }
} }
impl TxRw<'_> for RefCell<heed::RwTxn<'_>> { impl TxRw<'_> for RefCell<heed::RwTxn<'_>> {
fn commit(self) -> Result<(), RuntimeError> { fn commit(self) -> DbResult<()> {
Ok(heed::RwTxn::commit(self.into_inner())?) Ok(heed::RwTxn::commit(self.into_inner())?)
} }
/// This function is infallible. /// This function is infallible.
fn abort(self) -> Result<(), RuntimeError> { fn abort(self) -> DbResult<()> {
heed::RwTxn::abort(self.into_inner()); heed::RwTxn::abort(self.into_inner());
Ok(()) Ok(())
} }

View file

@ -11,7 +11,7 @@ use crate::{
types::{RedbTableRo, RedbTableRw}, types::{RedbTableRo, RedbTableRw},
}, },
database::{DatabaseIter, DatabaseRo, DatabaseRw}, database::{DatabaseIter, DatabaseRo, DatabaseRw},
error::RuntimeError, error::{DbResult, RuntimeError},
table::Table, table::Table,
}; };
@ -25,7 +25,7 @@ use crate::{
fn get<T: Table + 'static>( fn get<T: Table + 'static>(
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
key: &T::Key, key: &T::Key,
) -> Result<T::Value, RuntimeError> { ) -> DbResult<T::Value> {
Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value()) Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value())
} }
@ -33,7 +33,7 @@ fn get<T: Table + 'static>(
#[inline] #[inline]
fn len<T: Table>( fn len<T: Table>(
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<u64, RuntimeError> { ) -> DbResult<u64> {
Ok(db.len()?) Ok(db.len()?)
} }
@ -41,7 +41,7 @@ fn len<T: Table>(
#[inline] #[inline]
fn first<T: Table>( fn first<T: Table>(
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<(T::Key, T::Value), RuntimeError> { ) -> DbResult<(T::Key, T::Value)> {
let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?; let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?;
Ok((key.value(), value.value())) Ok((key.value(), value.value()))
} }
@ -50,7 +50,7 @@ fn first<T: Table>(
#[inline] #[inline]
fn last<T: Table>( fn last<T: Table>(
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<(T::Key, T::Value), RuntimeError> { ) -> DbResult<(T::Key, T::Value)> {
let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?; let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?;
Ok((key.value(), value.value())) Ok((key.value(), value.value()))
} }
@ -59,7 +59,7 @@ fn last<T: Table>(
#[inline] #[inline]
fn is_empty<T: Table>( fn is_empty<T: Table>(
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<bool, RuntimeError> { ) -> DbResult<bool> {
Ok(db.is_empty()?) Ok(db.is_empty()?)
} }
@ -69,7 +69,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
fn get_range<'a, Range>( fn get_range<'a, Range>(
&'a self, &'a self,
range: Range, range: Range,
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError> ) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
where where
Range: RangeBounds<T::Key> + 'a, Range: RangeBounds<T::Key> + 'a,
{ {
@ -80,10 +80,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
} }
#[inline] #[inline]
fn iter( fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_> {
&self,
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>
{
Ok(ReadableTable::iter(self)?.map(|result| { Ok(ReadableTable::iter(self)?.map(|result| {
let (key, value) = result?; let (key, value) = result?;
Ok((key.value(), value.value())) Ok((key.value(), value.value()))
@ -91,9 +88,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
} }
#[inline] #[inline]
fn keys( fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_> {
&self,
) -> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError> {
Ok(ReadableTable::iter(self)?.map(|result| { Ok(ReadableTable::iter(self)?.map(|result| {
let (key, _value) = result?; let (key, _value) = result?;
Ok(key.value()) Ok(key.value())
@ -101,9 +96,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
} }
#[inline] #[inline]
fn values( fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_> {
&self,
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError> {
Ok(ReadableTable::iter(self)?.map(|result| { Ok(ReadableTable::iter(self)?.map(|result| {
let (_key, value) = result?; let (_key, value) = result?;
Ok(value.value()) Ok(value.value())
@ -115,27 +108,27 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
// SAFETY: Both `redb`'s transaction and table types are `Send + Sync`. // SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> { unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
#[inline] #[inline]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> { fn get(&self, key: &T::Key) -> DbResult<T::Value> {
get::<T>(self, key) get::<T>(self, key)
} }
#[inline] #[inline]
fn len(&self) -> Result<u64, RuntimeError> { fn len(&self) -> DbResult<u64> {
len::<T>(self) len::<T>(self)
} }
#[inline] #[inline]
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { fn first(&self) -> DbResult<(T::Key, T::Value)> {
first::<T>(self) first::<T>(self)
} }
#[inline] #[inline]
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { fn last(&self) -> DbResult<(T::Key, T::Value)> {
last::<T>(self) last::<T>(self)
} }
#[inline] #[inline]
fn is_empty(&self) -> Result<bool, RuntimeError> { fn is_empty(&self) -> DbResult<bool> {
is_empty::<T>(self) is_empty::<T>(self)
} }
} }
@ -144,27 +137,27 @@ unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value>
// SAFETY: Both `redb`'s transaction and table types are `Send + Sync`. // SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRw<'_, T::Key, T::Value> { unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRw<'_, T::Key, T::Value> {
#[inline] #[inline]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> { fn get(&self, key: &T::Key) -> DbResult<T::Value> {
get::<T>(self, key) get::<T>(self, key)
} }
#[inline] #[inline]
fn len(&self) -> Result<u64, RuntimeError> { fn len(&self) -> DbResult<u64> {
len::<T>(self) len::<T>(self)
} }
#[inline] #[inline]
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { fn first(&self) -> DbResult<(T::Key, T::Value)> {
first::<T>(self) first::<T>(self)
} }
#[inline] #[inline]
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { fn last(&self) -> DbResult<(T::Key, T::Value)> {
last::<T>(self) last::<T>(self)
} }
#[inline] #[inline]
fn is_empty(&self) -> Result<bool, RuntimeError> { fn is_empty(&self) -> DbResult<bool> {
is_empty::<T>(self) is_empty::<T>(self)
} }
} }
@ -173,19 +166,19 @@ impl<T: Table + 'static> DatabaseRw<T> for RedbTableRw<'_, T::Key, T::Value> {
// `redb` returns the value after function calls so we end with Ok(()) instead. // `redb` returns the value after function calls so we end with Ok(()) instead.
#[inline] #[inline]
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> { fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> {
redb::Table::insert(self, key, value)?; redb::Table::insert(self, key, value)?;
Ok(()) Ok(())
} }
#[inline] #[inline]
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> { fn delete(&mut self, key: &T::Key) -> DbResult<()> {
redb::Table::remove(self, key)?; redb::Table::remove(self, key)?;
Ok(()) Ok(())
} }
#[inline] #[inline]
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> { fn take(&mut self, key: &T::Key) -> DbResult<T::Value> {
if let Some(value) = redb::Table::remove(self, key)? { if let Some(value) = redb::Table::remove(self, key)? {
Ok(value.value()) Ok(value.value())
} else { } else {
@ -194,13 +187,13 @@ impl<T: Table + 'static> DatabaseRw<T> for RedbTableRw<'_, T::Key, T::Value> {
} }
#[inline] #[inline]
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> {
let (key, value) = redb::Table::pop_first(self)?.ok_or(RuntimeError::KeyNotFound)?; let (key, value) = redb::Table::pop_first(self)?.ok_or(RuntimeError::KeyNotFound)?;
Ok((key.value(), value.value())) Ok((key.value(), value.value()))
} }
#[inline] #[inline]
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> {
let (key, value) = redb::Table::pop_last(self)?.ok_or(RuntimeError::KeyNotFound)?; let (key, value) = redb::Table::pop_last(self)?.ok_or(RuntimeError::KeyNotFound)?;
Ok((key.value(), value.value())) Ok((key.value(), value.value()))
} }

View file

@ -6,7 +6,7 @@ use crate::{
config::{Config, SyncMode}, config::{Config, SyncMode},
database::{DatabaseIter, DatabaseRo, DatabaseRw}, database::{DatabaseIter, DatabaseRo, DatabaseRw},
env::{Env, EnvInner}, env::{Env, EnvInner},
error::{InitError, RuntimeError}, error::{DbResult, InitError, RuntimeError},
table::Table, table::Table,
TxRw, TxRw,
}; };
@ -105,7 +105,7 @@ impl Env for ConcreteEnv {
&self.config &self.config
} }
fn sync(&self) -> Result<(), RuntimeError> { fn sync(&self) -> DbResult<()> {
// `redb`'s syncs are tied with write transactions, // `redb`'s syncs are tied with write transactions,
// so just create one, don't do anything and commit. // so just create one, don't do anything and commit.
let mut tx_rw = self.env.begin_write()?; let mut tx_rw = self.env.begin_write()?;
@ -127,12 +127,12 @@ where
type Rw<'a> = redb::WriteTransaction; type Rw<'a> = redb::WriteTransaction;
#[inline] #[inline]
fn tx_ro(&self) -> Result<redb::ReadTransaction, RuntimeError> { fn tx_ro(&self) -> DbResult<redb::ReadTransaction> {
Ok(self.0.begin_read()?) Ok(self.0.begin_read()?)
} }
#[inline] #[inline]
fn tx_rw(&self) -> Result<redb::WriteTransaction, RuntimeError> { fn tx_rw(&self) -> DbResult<redb::WriteTransaction> {
// `redb` has sync modes on the TX level, unlike heed, // `redb` has sync modes on the TX level, unlike heed,
// which sets it at the Environment level. // which sets it at the Environment level.
// //
@ -146,7 +146,7 @@ where
fn open_db_ro<T: Table>( fn open_db_ro<T: Table>(
&self, &self,
tx_ro: &Self::Ro<'_>, tx_ro: &Self::Ro<'_>,
) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError> { ) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>> {
// Open up a read-only database using our `T: Table`'s const metadata. // Open up a read-only database using our `T: Table`'s const metadata.
let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> = let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> =
redb::TableDefinition::new(T::NAME); redb::TableDefinition::new(T::NAME);
@ -155,10 +155,7 @@ where
} }
#[inline] #[inline]
fn open_db_rw<T: Table>( fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>> {
&self,
tx_rw: &Self::Rw<'_>,
) -> Result<impl DatabaseRw<T>, RuntimeError> {
// Open up a read/write database using our `T: Table`'s const metadata. // Open up a read/write database using our `T: Table`'s const metadata.
let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> = let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> =
redb::TableDefinition::new(T::NAME); redb::TableDefinition::new(T::NAME);
@ -168,14 +165,14 @@ where
Ok(tx_rw.open_table(table)?) Ok(tx_rw.open_table(table)?)
} }
fn create_db<T: Table>(&self, tx_rw: &redb::WriteTransaction) -> Result<(), RuntimeError> { fn create_db<T: Table>(&self, tx_rw: &redb::WriteTransaction) -> DbResult<()> {
// INVARIANT: `redb` creates tables if they don't exist. // INVARIANT: `redb` creates tables if they don't exist.
self.open_db_rw::<T>(tx_rw)?; self.open_db_rw::<T>(tx_rw)?;
Ok(()) Ok(())
} }
#[inline] #[inline]
fn clear_db<T: Table>(&self, tx_rw: &mut redb::WriteTransaction) -> Result<(), RuntimeError> { fn clear_db<T: Table>(&self, tx_rw: &mut redb::WriteTransaction) -> DbResult<()> {
let table: redb::TableDefinition< let table: redb::TableDefinition<
'static, 'static,
StorableRedb<<T as Table>::Key>, StorableRedb<<T as Table>::Key>,

View file

@ -34,8 +34,14 @@ impl<T> redb::Value for StorableRedb<T>
where where
T: Storable + 'static, T: Storable + 'static,
{ {
type SelfType<'a> = T where Self: 'a; type SelfType<'a>
type AsBytes<'a> = &'a [u8] where Self: 'a; = T
where
Self: 'a;
type AsBytes<'a>
= &'a [u8]
where
Self: 'a;
#[inline] #[inline]
fn fixed_width() -> Option<usize> { fn fixed_width() -> Option<usize> {

View file

@ -2,14 +2,14 @@
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use crate::{ use crate::{
error::RuntimeError, error::DbResult,
transaction::{TxRo, TxRw}, transaction::{TxRo, TxRw},
}; };
//---------------------------------------------------------------------------------------------------- TxRo //---------------------------------------------------------------------------------------------------- TxRo
impl TxRo<'_> for redb::ReadTransaction { impl TxRo<'_> for redb::ReadTransaction {
/// This function is infallible. /// This function is infallible.
fn commit(self) -> Result<(), RuntimeError> { fn commit(self) -> DbResult<()> {
// `redb`'s read transactions cleanup automatically when all references are dropped. // `redb`'s read transactions cleanup automatically when all references are dropped.
// //
// There is `close()`: // There is `close()`:
@ -22,11 +22,11 @@ impl TxRo<'_> for redb::ReadTransaction {
//---------------------------------------------------------------------------------------------------- TxRw //---------------------------------------------------------------------------------------------------- TxRw
impl TxRw<'_> for redb::WriteTransaction { impl TxRw<'_> for redb::WriteTransaction {
fn commit(self) -> Result<(), RuntimeError> { fn commit(self) -> DbResult<()> {
Ok(self.commit()?) Ok(self.commit()?)
} }
fn abort(self) -> Result<(), RuntimeError> { fn abort(self) -> DbResult<()> {
Ok(self.abort()?) Ok(self.abort()?)
} }
} }

View file

@ -9,7 +9,6 @@
//! based on these values. //! based on these values.
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};

View file

@ -3,7 +3,10 @@
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::ops::RangeBounds; use std::ops::RangeBounds;
use crate::{error::RuntimeError, table::Table}; use crate::{
error::{DbResult, RuntimeError},
table::Table,
};
//---------------------------------------------------------------------------------------------------- DatabaseIter //---------------------------------------------------------------------------------------------------- DatabaseIter
/// Generic post-fix documentation for `DatabaseIter` methods. /// Generic post-fix documentation for `DatabaseIter` methods.
@ -48,27 +51,22 @@ pub trait DatabaseIter<T: Table> {
fn get_range<'a, Range>( fn get_range<'a, Range>(
&'a self, &'a self,
range: Range, range: Range,
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError> ) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
where where
Range: RangeBounds<T::Key> + 'a; Range: RangeBounds<T::Key> + 'a;
/// Get an [`Iterator`] that returns the `(key, value)` types for this database. /// Get an [`Iterator`] that returns the `(key, value)` types for this database.
#[doc = doc_iter!()] #[doc = doc_iter!()]
#[expect(clippy::iter_not_returning_iterator)] #[expect(clippy::iter_not_returning_iterator)]
fn iter( fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_>;
&self,
) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>;
/// Get an [`Iterator`] that returns _only_ the `key` type for this database. /// Get an [`Iterator`] that returns _only_ the `key` type for this database.
#[doc = doc_iter!()] #[doc = doc_iter!()]
fn keys(&self) fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_>;
-> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError>;
/// Get an [`Iterator`] that returns _only_ the `value` type for this database. /// Get an [`Iterator`] that returns _only_ the `value` type for this database.
#[doc = doc_iter!()] #[doc = doc_iter!()]
fn values( fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_>;
&self,
) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError>;
} }
//---------------------------------------------------------------------------------------------------- DatabaseRo //---------------------------------------------------------------------------------------------------- DatabaseRo
@ -76,7 +74,7 @@ pub trait DatabaseIter<T: Table> {
macro_rules! doc_database { macro_rules! doc_database {
() => { () => {
r"# Errors r"# Errors
This will return [`RuntimeError::KeyNotFound`] if: This will return [`crate::RuntimeError::KeyNotFound`] if:
- Input does not exist OR - Input does not exist OR
- Database is empty" - Database is empty"
}; };
@ -111,7 +109,7 @@ This will return [`RuntimeError::KeyNotFound`] if:
pub unsafe trait DatabaseRo<T: Table> { pub unsafe trait DatabaseRo<T: Table> {
/// Get the value corresponding to a key. /// Get the value corresponding to a key.
#[doc = doc_database!()] #[doc = doc_database!()]
fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError>; fn get(&self, key: &T::Key) -> DbResult<T::Value>;
/// Returns `true` if the database contains a value for the specified key. /// Returns `true` if the database contains a value for the specified key.
/// ///
@ -120,7 +118,7 @@ pub unsafe trait DatabaseRo<T: Table> {
/// as in that case, `Ok(false)` will be returned. /// as in that case, `Ok(false)` will be returned.
/// ///
/// Other errors may still occur. /// Other errors may still occur.
fn contains(&self, key: &T::Key) -> Result<bool, RuntimeError> { fn contains(&self, key: &T::Key) -> DbResult<bool> {
match self.get(key) { match self.get(key) {
Ok(_) => Ok(true), Ok(_) => Ok(true),
Err(RuntimeError::KeyNotFound) => Ok(false), Err(RuntimeError::KeyNotFound) => Ok(false),
@ -132,21 +130,21 @@ pub unsafe trait DatabaseRo<T: Table> {
/// ///
/// # Errors /// # Errors
/// This will never return [`RuntimeError::KeyNotFound`]. /// This will never return [`RuntimeError::KeyNotFound`].
fn len(&self) -> Result<u64, RuntimeError>; fn len(&self) -> DbResult<u64>;
/// Returns the first `(key, value)` pair in the database. /// Returns the first `(key, value)` pair in the database.
#[doc = doc_database!()] #[doc = doc_database!()]
fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>; fn first(&self) -> DbResult<(T::Key, T::Value)>;
/// Returns the last `(key, value)` pair in the database. /// Returns the last `(key, value)` pair in the database.
#[doc = doc_database!()] #[doc = doc_database!()]
fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>; fn last(&self) -> DbResult<(T::Key, T::Value)>;
/// Returns `true` if the database contains no `(key, value)` pairs. /// Returns `true` if the database contains no `(key, value)` pairs.
/// ///
/// # Errors /// # Errors
/// This can only return [`RuntimeError::Io`] on errors. /// This can only return [`RuntimeError::Io`] on errors.
fn is_empty(&self) -> Result<bool, RuntimeError>; fn is_empty(&self) -> DbResult<bool>;
} }
//---------------------------------------------------------------------------------------------------- DatabaseRw //---------------------------------------------------------------------------------------------------- DatabaseRw
@ -161,7 +159,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
#[doc = doc_database!()] #[doc = doc_database!()]
/// ///
/// This will never [`RuntimeError::KeyExists`]. /// This will never [`RuntimeError::KeyExists`].
fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>; fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()>;
/// Delete a key-value pair in the database. /// Delete a key-value pair in the database.
/// ///
@ -170,7 +168,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
#[doc = doc_database!()] #[doc = doc_database!()]
/// ///
/// This will never [`RuntimeError::KeyExists`]. /// This will never [`RuntimeError::KeyExists`].
fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>; fn delete(&mut self, key: &T::Key) -> DbResult<()>;
/// Delete and return a key-value pair in the database. /// Delete and return a key-value pair in the database.
/// ///
@ -178,7 +176,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
/// it will serialize the `T::Value` and return it. /// it will serialize the `T::Value` and return it.
/// ///
#[doc = doc_database!()] #[doc = doc_database!()]
fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError>; fn take(&mut self, key: &T::Key) -> DbResult<T::Value>;
/// Fetch the value, and apply a function to it - or delete the entry. /// Fetch the value, and apply a function to it - or delete the entry.
/// ///
@ -192,7 +190,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
/// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d /// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d
/// ///
#[doc = doc_database!()] #[doc = doc_database!()]
fn update<F>(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError> fn update<F>(&mut self, key: &T::Key, mut f: F) -> DbResult<()>
where where
F: FnMut(T::Value) -> Option<T::Value>, F: FnMut(T::Value) -> Option<T::Value>,
{ {
@ -207,10 +205,10 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
/// Removes and returns the first `(key, value)` pair in the database. /// Removes and returns the first `(key, value)` pair in the database.
/// ///
#[doc = doc_database!()] #[doc = doc_database!()]
fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>; fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)>;
/// Removes and returns the last `(key, value)` pair in the database. /// Removes and returns the last `(key, value)` pair in the database.
/// ///
#[doc = doc_database!()] #[doc = doc_database!()]
fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>; fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)>;
} }

View file

@ -6,7 +6,7 @@ use std::num::NonZeroUsize;
use crate::{ use crate::{
config::Config, config::Config,
database::{DatabaseIter, DatabaseRo, DatabaseRw}, database::{DatabaseIter, DatabaseRo, DatabaseRw},
error::{InitError, RuntimeError}, error::{DbResult, InitError},
resize::ResizeAlgorithm, resize::ResizeAlgorithm,
table::Table, table::Table,
transaction::{TxRo, TxRw}, transaction::{TxRo, TxRw},
@ -39,7 +39,7 @@ pub trait Env: Sized {
/// ///
/// # Invariant /// # Invariant
/// If this is `false`, that means this [`Env`] /// If this is `false`, that means this [`Env`]
/// must _never_ return a [`RuntimeError::ResizeNeeded`]. /// must _never_ return a [`crate::RuntimeError::ResizeNeeded`].
/// ///
/// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`] /// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`]
/// _must_ be re-implemented, as it just panics by default. /// _must_ be re-implemented, as it just panics by default.
@ -88,7 +88,7 @@ pub trait Env: Sized {
/// This will error if the database file could not be opened. /// This will error if the database file could not be opened.
/// ///
/// This is the only [`Env`] function that will return /// This is the only [`Env`] function that will return
/// an [`InitError`] instead of a [`RuntimeError`]. /// an [`InitError`] instead of a [`crate::RuntimeError`].
fn open(config: Config) -> Result<Self, InitError>; fn open(config: Config) -> Result<Self, InitError>;
/// Return the [`Config`] that this database was [`Env::open`]ed with. /// Return the [`Config`] that this database was [`Env::open`]ed with.
@ -107,7 +107,7 @@ pub trait Env: Sized {
/// ///
/// # Errors /// # Errors
/// If there is a synchronization error, this should return an error. /// If there is a synchronization error, this should return an error.
fn sync(&self) -> Result<(), RuntimeError>; fn sync(&self) -> DbResult<()>;
/// Resize the database's memory map to a /// Resize the database's memory map to a
/// new (bigger) size using a [`ResizeAlgorithm`]. /// new (bigger) size using a [`ResizeAlgorithm`].
@ -218,14 +218,14 @@ pub trait EnvInner<'env> {
/// Create a read-only transaction. /// Create a read-only transaction.
/// ///
/// # Errors /// # Errors
/// This will only return [`RuntimeError::Io`] if it errors. /// This will only return [`crate::RuntimeError::Io`] if it errors.
fn tx_ro(&self) -> Result<Self::Ro<'_>, RuntimeError>; fn tx_ro(&self) -> DbResult<Self::Ro<'_>>;
/// Create a read/write transaction. /// Create a read/write transaction.
/// ///
/// # Errors /// # Errors
/// This will only return [`RuntimeError::Io`] if it errors. /// This will only return [`crate::RuntimeError::Io`] if it errors.
fn tx_rw(&self) -> Result<Self::Rw<'_>, RuntimeError>; fn tx_rw(&self) -> DbResult<Self::Rw<'_>>;
/// Open a database in read-only mode. /// Open a database in read-only mode.
/// ///
@ -269,17 +269,17 @@ pub trait EnvInner<'env> {
/// ``` /// ```
/// ///
/// # Errors /// # Errors
/// This will only return [`RuntimeError::Io`] on normal errors. /// This will only return [`crate::RuntimeError::Io`] on normal errors.
/// ///
/// If the specified table is not created upon before this function is called, /// If the specified table is not created upon before this function is called,
/// this will return [`RuntimeError::TableNotFound`]. /// this will return [`crate::RuntimeError::TableNotFound`].
/// ///
/// # Invariant /// # Invariant
#[doc = doc_heed_create_db_invariant!()] #[doc = doc_heed_create_db_invariant!()]
fn open_db_ro<T: Table>( fn open_db_ro<T: Table>(
&self, &self,
tx_ro: &Self::Ro<'_>, tx_ro: &Self::Ro<'_>,
) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError>; ) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>>;
/// Open a database in read/write mode. /// Open a database in read/write mode.
/// ///
@ -293,25 +293,22 @@ pub trait EnvInner<'env> {
/// passed as a generic to this function. /// passed as a generic to this function.
/// ///
/// # Errors /// # Errors
/// This will only return [`RuntimeError::Io`] on errors. /// This will only return [`crate::RuntimeError::Io`] on errors.
/// ///
/// # Invariant /// # Invariant
#[doc = doc_heed_create_db_invariant!()] #[doc = doc_heed_create_db_invariant!()]
fn open_db_rw<T: Table>( fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>>;
&self,
tx_rw: &Self::Rw<'_>,
) -> Result<impl DatabaseRw<T>, RuntimeError>;
/// Create a database table. /// Create a database table.
/// ///
/// This will create the database [`Table`] passed as a generic to this function. /// This will create the database [`Table`] passed as a generic to this function.
/// ///
/// # Errors /// # Errors
/// This will only return [`RuntimeError::Io`] on errors. /// This will only return [`crate::RuntimeError::Io`] on errors.
/// ///
/// # Invariant /// # Invariant
#[doc = doc_heed_create_db_invariant!()] #[doc = doc_heed_create_db_invariant!()]
fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError>; fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()>;
/// Clear all `(key, value)`'s from a database table. /// Clear all `(key, value)`'s from a database table.
/// ///
@ -322,9 +319,9 @@ pub trait EnvInner<'env> {
/// function's effects can be aborted using [`TxRw::abort`]. /// function's effects can be aborted using [`TxRw::abort`].
/// ///
/// # Errors /// # Errors
/// This will return [`RuntimeError::Io`] on normal errors. /// This will return [`crate::RuntimeError::Io`] on normal errors.
/// ///
/// If the specified table is not created upon before this function is called, /// If the specified table is not created upon before this function is called,
/// this will return [`RuntimeError::TableNotFound`]. /// this will return [`crate::RuntimeError::TableNotFound`].
fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError>; fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()>;
} }

View file

@ -7,6 +7,9 @@ use std::fmt::Debug;
/// Alias for a thread-safe boxed error. /// Alias for a thread-safe boxed error.
type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>; type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;
/// [`Result`] with [`RuntimeError`] as the error.
pub type DbResult<T> = Result<T, RuntimeError>;
//---------------------------------------------------------------------------------------------------- InitError //---------------------------------------------------------------------------------------------------- InitError
/// Errors that occur during ([`Env::open`](crate::env::Env::open)). /// Errors that occur during ([`Env::open`](crate::env::Env::open)).
/// ///

View file

@ -50,7 +50,7 @@ pub use constants::{
}; };
pub use database::{DatabaseIter, DatabaseRo, DatabaseRw}; pub use database::{DatabaseIter, DatabaseRo, DatabaseRw};
pub use env::{Env, EnvInner}; pub use env::{Env, EnvInner};
pub use error::{InitError, RuntimeError}; pub use error::{DbResult, InitError, RuntimeError};
pub use key::{Key, KeyCompare}; pub use key::{Key, KeyCompare};
pub use storable::{Storable, StorableBytes, StorableStr, StorableVec}; pub use storable::{Storable, StorableBytes, StorableStr, StorableVec};
pub use table::Table; pub use table::Table;

View file

@ -1,7 +1,6 @@
//! Database table abstraction; `trait Table`. //! Database table abstraction; `trait Table`.
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use crate::{key::Key, storable::Storable}; use crate::{key::Key, storable::Storable};
//---------------------------------------------------------------------------------------------------- Table //---------------------------------------------------------------------------------------------------- Table

View file

@ -211,7 +211,7 @@ macro_rules! define_tables {
/// ///
/// # Errors /// # Errors
/// This returns errors on regular database errors. /// This returns errors on regular database errors.
fn all_tables_empty(&self) -> Result<bool, $crate::RuntimeError>; fn all_tables_empty(&self) -> $crate::DbResult<bool>;
} }
/// Object containing all opened [`Table`](cuprate_database::Table)s in read + iter mode. /// Object containing all opened [`Table`](cuprate_database::Table)s in read + iter mode.
@ -293,7 +293,7 @@ macro_rules! define_tables {
} }
)* )*
fn all_tables_empty(&self) -> Result<bool, $crate::RuntimeError> { fn all_tables_empty(&self) -> $crate::DbResult<bool> {
$( $(
if !$crate::DatabaseRo::is_empty(&self.$index)? { if !$crate::DatabaseRo::is_empty(&self.$index)? {
return Ok(false); return Ok(false);
@ -369,7 +369,7 @@ macro_rules! define_tables {
/// ///
/// # Errors /// # Errors
/// This will only return [`cuprate_database::RuntimeError::Io`] if it errors. /// This will only return [`cuprate_database::RuntimeError::Io`] if it errors.
fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result<impl TablesIter, $crate::RuntimeError>; fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult<impl TablesIter>;
/// Open all tables in read-write mode. /// Open all tables in read-write mode.
/// ///
@ -378,7 +378,7 @@ macro_rules! define_tables {
/// ///
/// # Errors /// # Errors
/// This will only return [`cuprate_database::RuntimeError::Io`] on errors. /// This will only return [`cuprate_database::RuntimeError::Io`] on errors.
fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result<impl TablesMut, $crate::RuntimeError>; fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<impl TablesMut>;
/// Create all database tables. /// Create all database tables.
/// ///
@ -386,7 +386,7 @@ macro_rules! define_tables {
/// ///
/// # Errors /// # Errors
/// This will only return [`cuprate_database::RuntimeError::Io`] on errors. /// This will only return [`cuprate_database::RuntimeError::Io`] on errors.
fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError>; fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()>;
} }
impl<'env, Ei> OpenTables<'env> for Ei impl<'env, Ei> OpenTables<'env> for Ei
@ -396,19 +396,19 @@ macro_rules! define_tables {
type Ro<'tx> = <Ei as $crate::EnvInner<'env>>::Ro<'tx>; type Ro<'tx> = <Ei as $crate::EnvInner<'env>>::Ro<'tx>;
type Rw<'tx> = <Ei as $crate::EnvInner<'env>>::Rw<'tx>; type Rw<'tx> = <Ei as $crate::EnvInner<'env>>::Rw<'tx>;
fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result<impl TablesIter, $crate::RuntimeError> { fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult<impl TablesIter> {
Ok(($( Ok(($(
Self::open_db_ro::<[<$table:camel>]>(self, tx_ro)?, Self::open_db_ro::<[<$table:camel>]>(self, tx_ro)?,
)*)) )*))
} }
fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result<impl TablesMut, $crate::RuntimeError> { fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<impl TablesMut> {
Ok(($( Ok(($(
Self::open_db_rw::<[<$table:camel>]>(self, tx_rw)?, Self::open_db_rw::<[<$table:camel>]>(self, tx_rw)?,
)*)) )*))
} }
fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError> { fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()> {
let result = Ok(($( let result = Ok(($(
Self::create_db::<[<$table:camel>]>(self, tx_rw), Self::create_db::<[<$table:camel>]>(self, tx_rw),
)*)); )*));

View file

@ -1,7 +1,7 @@
//! Database transaction abstraction; `trait TxRo`, `trait TxRw`. //! Database transaction abstraction; `trait TxRo`, `trait TxRw`.
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use crate::error::RuntimeError; use crate::error::DbResult;
//---------------------------------------------------------------------------------------------------- TxRo //---------------------------------------------------------------------------------------------------- TxRo
/// Read-only database transaction. /// Read-only database transaction.
@ -16,7 +16,7 @@ pub trait TxRo<'tx> {
/// ///
/// # Errors /// # Errors
/// This operation will always return `Ok(())` with the `redb` backend. /// This operation will always return `Ok(())` with the `redb` backend.
fn commit(self) -> Result<(), RuntimeError>; fn commit(self) -> DbResult<()>;
} }
//---------------------------------------------------------------------------------------------------- TxRw //---------------------------------------------------------------------------------------------------- TxRw
@ -32,12 +32,12 @@ pub trait TxRw<'tx> {
/// This operation will always return `Ok(())` with the `redb` backend. /// This operation will always return `Ok(())` with the `redb` backend.
/// ///
/// If `Env::MANUAL_RESIZE == true`, /// If `Env::MANUAL_RESIZE == true`,
/// [`RuntimeError::ResizeNeeded`] may be returned. /// [`crate::RuntimeError::ResizeNeeded`] may be returned.
fn commit(self) -> Result<(), RuntimeError>; fn commit(self) -> DbResult<()>;
/// Abort the transaction, erasing any writes that have occurred. /// Abort the transaction, erasing any writes that have occurred.
/// ///
/// # Errors /// # Errors
/// This operation will always return `Ok(())` with the `heed` backend. /// This operation will always return `Ok(())` with the `heed` backend.
fn abort(self) -> Result<(), RuntimeError>; fn abort(self) -> DbResult<()>;
} }

View file

@ -7,7 +7,7 @@ use futures::channel::oneshot;
use rayon::ThreadPool; use rayon::ThreadPool;
use tower::Service; use tower::Service;
use cuprate_database::{ConcreteEnv, RuntimeError}; use cuprate_database::{ConcreteEnv, DbResult, RuntimeError};
use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_helper::asynch::InfallibleOneshotReceiver;
/// The [`rayon::ThreadPool`] service. /// The [`rayon::ThreadPool`] service.
@ -24,7 +24,7 @@ pub struct DatabaseReadService<Req, Res> {
pool: Arc<ThreadPool>, pool: Arc<ThreadPool>,
/// The function used to handle request. /// The function used to handle request.
inner_handler: Arc<dyn Fn(Req) -> Result<Res, RuntimeError> + Send + Sync + 'static>, inner_handler: Arc<dyn Fn(Req) -> DbResult<Res> + Send + Sync + 'static>,
} }
// Deriving [`Clone`] means `Req` & `Res` need to be `Clone`, even if they aren't. // Deriving [`Clone`] means `Req` & `Res` need to be `Clone`, even if they aren't.
@ -51,7 +51,7 @@ where
pub fn new( pub fn new(
env: Arc<ConcreteEnv>, env: Arc<ConcreteEnv>,
pool: Arc<ThreadPool>, pool: Arc<ThreadPool>,
req_handler: impl Fn(&ConcreteEnv, Req) -> Result<Res, RuntimeError> + Send + Sync + 'static, req_handler: impl Fn(&ConcreteEnv, Req) -> DbResult<Res> + Send + Sync + 'static,
) -> Self { ) -> Self {
let inner_handler = Arc::new(move |req| req_handler(&env, req)); let inner_handler = Arc::new(move |req| req_handler(&env, req));
@ -69,9 +69,9 @@ where
{ {
type Response = Res; type Response = Res;
type Error = RuntimeError; type Error = RuntimeError;
type Future = InfallibleOneshotReceiver<Result<Self::Response, Self::Error>>; type Future = InfallibleOneshotReceiver<DbResult<Self::Response>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<DbResult<()>> {
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} }

View file

@ -6,7 +6,7 @@ use std::{
use futures::channel::oneshot; use futures::channel::oneshot;
use cuprate_database::{ConcreteEnv, Env, RuntimeError}; use cuprate_database::{ConcreteEnv, DbResult, Env, RuntimeError};
use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_helper::asynch::InfallibleOneshotReceiver;
//---------------------------------------------------------------------------------------------------- Constants //---------------------------------------------------------------------------------------------------- Constants
@ -26,8 +26,7 @@ pub struct DatabaseWriteHandle<Req, Res> {
/// Sender channel to the database write thread-pool. /// Sender channel to the database write thread-pool.
/// ///
/// We provide the response channel for the thread-pool. /// We provide the response channel for the thread-pool.
pub(super) sender: pub(super) sender: crossbeam::channel::Sender<(Req, oneshot::Sender<DbResult<Res>>)>,
crossbeam::channel::Sender<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>,
} }
impl<Req, Res> Clone for DatabaseWriteHandle<Req, Res> { impl<Req, Res> Clone for DatabaseWriteHandle<Req, Res> {
@ -48,7 +47,7 @@ where
#[inline(never)] // Only called once. #[inline(never)] // Only called once.
pub fn init( pub fn init(
env: Arc<ConcreteEnv>, env: Arc<ConcreteEnv>,
inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result<Res, RuntimeError> + Send + 'static, inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult<Res> + Send + 'static,
) -> Self { ) -> Self {
// Initialize `Request/Response` channels. // Initialize `Request/Response` channels.
let (sender, receiver) = crossbeam::channel::unbounded(); let (sender, receiver) = crossbeam::channel::unbounded();
@ -66,10 +65,10 @@ where
impl<Req, Res> tower::Service<Req> for DatabaseWriteHandle<Req, Res> { impl<Req, Res> tower::Service<Req> for DatabaseWriteHandle<Req, Res> {
type Response = Res; type Response = Res;
type Error = RuntimeError; type Error = RuntimeError;
type Future = InfallibleOneshotReceiver<Result<Res, RuntimeError>>; type Future = InfallibleOneshotReceiver<DbResult<Res>>;
#[inline] #[inline]
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<DbResult<()>> {
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} }
@ -89,8 +88,8 @@ impl<Req, Res> tower::Service<Req> for DatabaseWriteHandle<Req, Res> {
/// The main function of the writer thread. /// The main function of the writer thread.
fn database_writer<Req, Res>( fn database_writer<Req, Res>(
env: &ConcreteEnv, env: &ConcreteEnv,
receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>, receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender<DbResult<Res>>)>,
inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result<Res, RuntimeError>, inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult<Res>,
) where ) where
Req: Send + 'static, Req: Send + 'static,
Res: Debug + Send + 'static, Res: Debug + Send + 'static,

View file

@ -1,7 +1,7 @@
//! Tx-pool key image ops. //! Tx-pool key image ops.
use monero_serai::transaction::Input; use monero_serai::transaction::Input;
use cuprate_database::{DatabaseRw, RuntimeError}; use cuprate_database::{DatabaseRw, DbResult};
use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHash}; use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHash};
@ -34,7 +34,7 @@ pub(super) fn add_tx_key_images(
pub(super) fn remove_tx_key_images( pub(super) fn remove_tx_key_images(
inputs: &[Input], inputs: &[Input],
kis_table: &mut impl DatabaseRw<SpentKeyImages>, kis_table: &mut impl DatabaseRw<SpentKeyImages>,
) -> Result<(), RuntimeError> { ) -> DbResult<()> {
for ki in inputs.iter().map(ki_from_input) { for ki in inputs.iter().map(ki_from_input) {
kis_table.delete(&ki)?; kis_table.delete(&ki)?;
} }

Some files were not shown because too many files have changed in this diff Show more