diff --git a/Cargo.lock b/Cargo.lock
index d5d41b03..a3213304 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -29,6 +29,15 @@ dependencies = [
  "zerocopy",
 ]
 
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
 [[package]]
 name = "android-tzdata"
 version = "0.1.1"
@@ -44,6 +53,12 @@ dependencies = [
  "libc",
 ]
 
+[[package]]
+name = "anes"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
+
 [[package]]
 name = "anstyle"
 version = "1.0.10"
@@ -68,6 +83,16 @@ version = "0.7.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
 
+[[package]]
+name = "assert-json-diff"
+version = "2.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
 [[package]]
 name = "async-stream"
 version = "0.3.6"
@@ -337,6 +362,12 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "cast"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
+
 [[package]]
 name = "cc"
 version = "1.1.31"
@@ -370,6 +401,33 @@ dependencies = [
  "windows-targets 0.52.6",
 ]
 
+[[package]]
+name = "ciborium"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
+dependencies = [
+ "ciborium-io",
+ "ciborium-ll",
+ "serde",
+]
+
+[[package]]
+name = "ciborium-io"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
+
+[[package]]
+name = "ciborium-ll"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
+dependencies = [
+ "ciborium-io",
+ "half",
+]
+
 [[package]]
 name = "clap"
 version = "4.5.20"
@@ -469,6 +527,42 @@ dependencies = [
  "cfg-if",
 ]
 
+[[package]]
+name = "criterion"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
+dependencies = [
+ "anes",
+ "cast",
+ "ciborium",
+ "clap",
+ "criterion-plot",
+ "is-terminal",
+ "itertools",
+ "num-traits",
+ "once_cell",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
+dependencies = [
+ "cast",
+ "itertools",
+]
+
 [[package]]
 name = "crossbeam"
 version = "0.8.4"
@@ -525,6 +619,12 @@ version = "0.8.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
 
+[[package]]
+name = "crunchy"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+
 [[package]]
 name = "crypto-bigint"
 version = "0.5.5"
@@ -574,6 +674,30 @@ dependencies = [
  "tokio",
 ]
 
+[[package]]
+name = "cuprate-benchmark"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "cuprate-benchmark-example",
+ "cuprate-benchmark-lib",
+ "serde",
+ "serde_json",
+ "tracing",
+ "tracing-subscriber",
+]
+
+[[package]]
+name = "cuprate-benchmark-example"
+version = "0.0.0"
+dependencies = [
+ "cuprate-benchmark-lib",
+]
+
+[[package]]
+name = "cuprate-benchmark-lib"
+version = "0.0.0"
+
 [[package]]
 name = "cuprate-blockchain"
 version = "0.0.0"
@@ -677,6 +801,25 @@ dependencies = [
 name = "cuprate-constants"
 version = "0.1.0"
 
+[[package]]
+name = "cuprate-criterion-example"
+version = "0.0.0"
+dependencies = [
+ "criterion",
+ "function_name",
+ "serde_json",
+]
+
+[[package]]
+name = "cuprate-criterion-json-rpc"
+version = "0.0.0"
+dependencies = [
+ "criterion",
+ "cuprate-json-rpc",
+ "function_name",
+ "serde_json",
+]
+
 [[package]]
 name = "cuprate-cryptonight"
 version = "0.1.0"
@@ -1015,6 +1158,17 @@ dependencies = [
  "thiserror",
 ]
 
+[[package]]
+name = "cuprate-zmq-types"
+version = "0.1.0"
+dependencies = [
+ "assert-json-diff",
+ "cuprate-types",
+ "hex",
+ "serde",
+ "serde_json",
+]
+
 [[package]]
 name = "cuprated"
 version = "0.0.1"
@@ -1296,6 +1450,21 @@ dependencies = [
  "percent-encoding",
 ]
 
+[[package]]
+name = "function_name"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1ab577a896d09940b5fe12ec5ae71f9d8211fff62c919c03a3750a9901e98a7"
+dependencies = [
+ "function_name-proc-macro",
+]
+
+[[package]]
+name = "function_name-proc-macro"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "673464e1e314dd67a0fd9544abc99e8eb28d0c7e3b69b033bcff9b2d00b87333"
+
 [[package]]
 name = "funty"
 version = "2.0.0"
@@ -1445,6 +1614,16 @@ dependencies = [
  "tracing",
 ]
 
+[[package]]
+name = "half"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
+dependencies = [
+ "cfg-if",
+ "crunchy",
+]
+
 [[package]]
 name = "hashbrown"
 version = "0.14.5"
@@ -1672,6 +1851,26 @@ dependencies = [
  "hashbrown 0.15.0",
 ]
 
+[[package]]
+name = "is-terminal"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
 [[package]]
 name = "itoa"
 version = "1.0.11"
@@ -1768,6 +1967,15 @@ version = "0.4.22"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
 
+[[package]]
+name = "matchers"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
+dependencies = [
+ "regex-automata 0.1.10",
+]
+
 [[package]]
 name = "matchit"
 version = "0.7.3"
@@ -2027,6 +2235,12 @@ version = "1.20.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
 
+[[package]]
+name = "oorandom"
+version = "11.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
+
 [[package]]
 name = "openssl-probe"
 version = "0.1.5"
@@ -2164,6 +2378,34 @@ version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
 
+[[package]]
+name = "plotters"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705"
+dependencies = [
+ "plotters-backend",
+]
+
 [[package]]
 name = "powerfmt"
 version = "0.2.0"
@@ -2244,7 +2486,7 @@ dependencies = [
  "rand",
  "rand_chacha",
  "rand_xorshift",
- "regex-syntax",
+ "regex-syntax 0.8.5",
  "rusty-fork",
  "tempfile",
  "unarray",
@@ -2410,6 +2652,44 @@ dependencies = [
  "syn",
 ]
 
+[[package]]
+name = "regex"
+version = "1.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata 0.4.7",
+ "regex-syntax 0.8.5",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
+dependencies = [
+ "regex-syntax 0.6.29",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax 0.8.5",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
+
 [[package]]
 name = "regex-syntax"
 version = "0.8.5"
@@ -2537,6 +2817,15 @@ version = "1.0.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
 
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
 [[package]]
 name = "schannel"
 version = "0.1.26"
@@ -2912,9 +3201,9 @@ checksum = "a693d0c8cf16973fac5a93fbe47b8c6452e7097d4fcac49f3d7a18e39c76e62e"
 
 [[package]]
 name = "time"
-version = "0.3.36"
+version = "0.3.37"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
+checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
 dependencies = [
  "deranged",
  "itoa",
@@ -2933,14 +3222,24 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
 
 [[package]]
 name = "time-macros"
-version = "0.2.18"
+version = "0.2.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
+checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
 dependencies = [
  "num-conv",
  "time-core",
 ]
 
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
 [[package]]
 name = "tinyvec"
 version = "1.8.0"
@@ -3188,10 +3487,14 @@ version = "0.3.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
 dependencies = [
+ "matchers",
  "nu-ansi-term",
+ "once_cell",
+ "regex",
  "sharded-slab",
  "smallvec",
  "thread_local",
+ "tracing",
  "tracing-core",
  "tracing-log",
 ]
@@ -3297,6 +3600,16 @@ dependencies = [
  "libc",
 ]
 
+[[package]]
+name = "walkdir"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
 [[package]]
 name = "want"
 version = "0.3.1"
@@ -3367,6 +3680,16 @@ version = "0.2.95"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
 
+[[package]]
+name = "web-sys"
+version = "0.3.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
 [[package]]
 name = "webpki-roots"
 version = "0.26.6"
@@ -3392,6 +3715,15 @@ version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 
+[[package]]
+name = "winapi-util"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b"
+dependencies = [
+ "windows-sys 0.52.0",
+]
+
 [[package]]
 name = "winapi-x86_64-pc-windows-gnu"
 version = "0.4.0"
diff --git a/Cargo.toml b/Cargo.toml
index 304669a2..87f5221d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,39 +1,61 @@
 [workspace]
 resolver = "2"
-
 members = [
+	# Binaries
 	"binaries/cuprated",
-	"constants",
+
+	# Benchmarks
+	"benches/benchmark/bin",
+	"benches/benchmark/lib",
+	"benches/benchmark/example",
+	"benches/criterion/example",
+	"benches/criterion/cuprate-json-rpc",
+
+	# Consensus
 	"consensus",
 	"consensus/context",
 	"consensus/fast-sync",
 	"consensus/rules",
-	"cryptonight",
-	"helper",
+
+	# Net
 	"net/epee-encoding",
 	"net/fixed-bytes",
 	"net/levin",
 	"net/wire",
+
+	# P2P
 	"p2p/p2p",
 	"p2p/p2p-core",
 	"p2p/bucket",
 	"p2p/dandelion-tower",
 	"p2p/async-buffer",
 	"p2p/address-book",
+
+	# Storage
 	"storage/blockchain",
 	"storage/service",
 	"storage/txpool",
 	"storage/database",
-	"pruning",
-	"test-utils",
-	"types",
+
+	# RPC
 	"rpc/json-rpc",
 	"rpc/types",
 	"rpc/interface",
+
+	# ZMQ
+	"zmq/types",
+
+	# Misc
+	"constants",
+	"cryptonight",
+	"helper",
+	"pruning",
+	"test-utils",
+	"types",
 ]
 
 [profile.release]
-panic         = "abort" 
+panic         = "abort"
 lto           = true   # Build with LTO
 strip         = "none" # Keep panic stack traces
 codegen-units = 1      # Optimize for binary speed over compile times
@@ -54,33 +76,36 @@ opt-level = 3
 
 [workspace.dependencies]
 # Cuprate members
-cuprate-fast-sync           = { path = "consensus/fast-sync", default-features = false }
-cuprate-consensus-rules     = { path = "consensus/rules",     default-features = false }
-cuprate-constants           = { path = "constants",           default-features = false }
-cuprate-consensus           = { path = "consensus",           default-features = false }
-cuprate-consensus-context   = { path = "consensus/context",   default-features = false }
-cuprate-cryptonight         = { path = "cryptonight",         default-features = false }
-cuprate-helper              = { path = "helper",              default-features = false }
-cuprate-epee-encoding       = { path = "net/epee-encoding",   default-features = false }
-cuprate-fixed-bytes         = { path = "net/fixed-bytes",     default-features = false }
-cuprate-levin               = { path = "net/levin",           default-features = false }
-cuprate-wire                = { path = "net/wire",            default-features = false }
-cuprate-p2p                 = { path = "p2p/p2p",             default-features = false }
-cuprate-p2p-core            = { path = "p2p/p2p-core",        default-features = false }
-cuprate-p2p-bucket          = { path = "p2p/p2p-bucket",      default-features = false }
-cuprate-dandelion-tower     = { path = "p2p/dandelion-tower", default-features = false }
-cuprate-async-buffer        = { path = "p2p/async-buffer",    default-features = false }
-cuprate-address-book        = { path = "p2p/address-book",    default-features = false }
-cuprate-blockchain          = { path = "storage/blockchain",  default-features = false }
-cuprate-database            = { path = "storage/database",    default-features = false }
-cuprate-database-service    = { path = "storage/service",     default-features = false }
-cuprate-txpool              = { path = "storage/txpool",      default-features = false }
-cuprate-pruning             = { path = "pruning",             default-features = false }
-cuprate-test-utils          = { path = "test-utils",          default-features = false }
-cuprate-types               = { path = "types",               default-features = false }
-cuprate-json-rpc            = { path = "rpc/json-rpc",        default-features = false }
-cuprate-rpc-types           = { path = "rpc/types",           default-features = false }
-cuprate-rpc-interface       = { path = "rpc/interface",       default-features = false }
+cuprate-benchmark-lib     = { path = "benches/benchmark/lib",     default-features = false }
+cuprate-benchmark-example = { path = "benches/benchmark/example", default-features = false }
+cuprate-fast-sync         = { path = "consensus/fast-sync",       default-features = false }
+cuprate-consensus-rules   = { path = "consensus/rules",           default-features = false }
+cuprate-constants         = { path = "constants",                 default-features = false }
+cuprate-consensus         = { path = "consensus",                 default-features = false }
+cuprate-consensus-context = { path = "consensus/context",         default-features = false }
+cuprate-cryptonight       = { path = "cryptonight",               default-features = false }
+cuprate-helper            = { path = "helper",                    default-features = false }
+cuprate-epee-encoding     = { path = "net/epee-encoding",         default-features = false }
+cuprate-fixed-bytes       = { path = "net/fixed-bytes",           default-features = false }
+cuprate-levin             = { path = "net/levin",                 default-features = false }
+cuprate-wire              = { path = "net/wire",                  default-features = false }
+cuprate-p2p               = { path = "p2p/p2p",                   default-features = false }
+cuprate-p2p-core          = { path = "p2p/p2p-core",              default-features = false }
+cuprate-p2p-bucket        = { path = "p2p/p2p-bucket",            default-features = false }
+cuprate-dandelion-tower   = { path = "p2p/dandelion-tower",       default-features = false }
+cuprate-async-buffer      = { path = "p2p/async-buffer",          default-features = false }
+cuprate-address-book      = { path = "p2p/address-book",          default-features = false }
+cuprate-blockchain        = { path = "storage/blockchain",        default-features = false }
+cuprate-database          = { path = "storage/database",          default-features = false }
+cuprate-database-service  = { path = "storage/service",           default-features = false }
+cuprate-txpool            = { path = "storage/txpool",            default-features = false }
+cuprate-pruning           = { path = "pruning",                   default-features = false }
+cuprate-test-utils        = { path = "test-utils",                default-features = false }
+cuprate-types             = { path = "types",                     default-features = false }
+cuprate-json-rpc          = { path = "rpc/json-rpc",              default-features = false }
+cuprate-rpc-types         = { path = "rpc/types",                 default-features = false }
+cuprate-rpc-interface     = { path = "rpc/interface",             default-features = false }
+cuprate-zmq-types         = { path = "zmq/types",                 default-features = false }
 
 # External dependencies
 anyhow                = { version = "1", default-features = false }
@@ -127,6 +152,8 @@ tracing-subscriber    = { version = "0.3", default-features = false }
 tracing               = { version = "0.1", default-features = false }
 
 ## workspace.dev-dependencies
+criterion                 = { version = "0.5" }
+function_name             = { version = "0.3" }
 monero-rpc                = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6" }
 monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6" }
 tempfile                  = { version = "3" }
@@ -256,6 +283,9 @@ rest_pat_in_fully_bound_structs = "deny"
 redundant_type_annotations = "deny"
 infinite_loop = "deny"
 zero_repeat_side_effects = "deny"
+non_zero_suggestions = "deny"
+manual_is_power_of_two = "deny"
+used_underscore_items = "deny"
 
 # Warm
 cast_possible_truncation = "deny"
diff --git a/benches/README.md b/benches/README.md
index 46409041..af6bb932 100644
--- a/benches/README.md
+++ b/benches/README.md
@@ -1 +1,5 @@
-# TODO
+# Benches
+This directory contains Cuprate's benchmarks and benchmarking utilities.
+
+See the [`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html)
+to see how to create and run these benchmarks.
\ No newline at end of file
diff --git a/benches/benchmark/bin/Cargo.toml b/benches/benchmark/bin/Cargo.toml
new file mode 100644
index 00000000..36d0b2c7
--- /dev/null
+++ b/benches/benchmark/bin/Cargo.toml
@@ -0,0 +1,43 @@
+[package]
+name        = "cuprate-benchmark"
+version     = "0.0.0"
+edition     = "2021"
+description = "Cuprate's benchmarking binary"
+license     = "MIT"
+authors     = ["hinto-janai"]
+repository  = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin"
+keywords    = ["cuprate", "benchmarking", "binary"]
+
+[features]
+# All new benchmarks should be added here!
+all = ["example"]
+
+# Non-benchmark features.
+default = []
+json    = []
+trace   = []
+debug   = []
+warn    = []
+info    = []
+error   = []
+
+# Benchmark features.
+# New benchmarks should be added here!
+example = [
+	"dep:cuprate-benchmark-example"
+]
+
+[dependencies]
+cuprate-benchmark-lib     = { workspace = true }
+cuprate-benchmark-example = { workspace = true, optional = true }
+
+cfg-if             = { workspace = true }
+serde              = { workspace = true, features = ["derive"] }
+serde_json         = { workspace = true, features = ["std"] }
+tracing            = { workspace = true, features = ["std", "attributes"] }
+tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"] }
+
+[dev-dependencies]
+
+[lints]
+workspace = true
\ No newline at end of file
diff --git a/benches/benchmark/bin/README.md b/benches/benchmark/bin/README.md
new file mode 100644
index 00000000..ad0700fc
--- /dev/null
+++ b/benches/benchmark/bin/README.md
@@ -0,0 +1,27 @@
+## `cuprate-benchmark`
+This crate links all benchmarks together into a single binary that can be run as: `cuprate-benchmark`.
+
+`cuprate-benchmark` will run all enabled benchmarks sequentially and print data at the end.
+
+## Benchmarks
+Benchmarks are opt-in and enabled via features.
+
+| Feature  | Enables which benchmark crate? |
+|----------|--------------------------------|
+| example  | cuprate-benchmark-example      |
+| database | cuprate-benchmark-database     |
+
+## Features
+These are features that aren't for enabling benchmarks, but rather for other things.
+
+Since `cuprate-benchmark` is built right before it is ran,
+these features almost act like command line arguments.
+
+| Features | Does what |
+|----------|-----------|
+| json     | Prints JSON timings instead of a markdown table
+| trace    | Use the `trace` log-level
+| debug    | Use the `debug` log-level
+| warn     | Use the `warn` log-level
+| info     | Use the `info` log-level (default)
+| error    | Use the `error` log-level
\ No newline at end of file
diff --git a/benches/benchmark/bin/src/log.rs b/benches/benchmark/bin/src/log.rs
new file mode 100644
index 00000000..455f1309
--- /dev/null
+++ b/benches/benchmark/bin/src/log.rs
@@ -0,0 +1,29 @@
+use cfg_if::cfg_if;
+use tracing::{info, instrument, Level};
+use tracing_subscriber::FmtSubscriber;
+
+/// Initializes the `tracing` logger.
+#[instrument]
+pub(crate) fn init_logger() {
+    const LOG_LEVEL: Level = {
+        cfg_if! {
+            if #[cfg(feature = "trace")] {
+                Level::TRACE
+            } else if #[cfg(feature = "debug")] {
+                Level::DEBUG
+            } else if #[cfg(feature = "warn")] {
+                Level::WARN
+            } else if #[cfg(feature = "info")] {
+                Level::INFO
+            } else if #[cfg(feature = "error")] {
+                Level::ERROR
+            } else {
+                Level::INFO
+            }
+        }
+    };
+
+    FmtSubscriber::builder().with_max_level(LOG_LEVEL).init();
+
+    info!("Log level: {LOG_LEVEL}");
+}
diff --git a/benches/benchmark/bin/src/main.rs b/benches/benchmark/bin/src/main.rs
new file mode 100644
index 00000000..02c480a0
--- /dev/null
+++ b/benches/benchmark/bin/src/main.rs
@@ -0,0 +1,49 @@
+#![doc = include_str!("../README.md")]
+#![allow(
+    unused_crate_dependencies,
+    reason = "this crate imports many potentially unused dependencies"
+)]
+
+mod log;
+mod print;
+mod run;
+mod timings;
+
+use cfg_if::cfg_if;
+
+/// What `main()` does:
+/// 1. Run all enabled benchmarks
+/// 2. Record benchmark timings
+/// 3. Print timing data
+///
+/// To add a new benchmark to be ran here:
+/// 1. Copy + paste a `cfg_if` block
+/// 2. Change it to your benchmark's feature flag
+/// 3. Change it to your benchmark's type
+#[allow(
+    clippy::allow_attributes,
+    unused_variables,
+    unused_mut,
+    unreachable_code,
+    reason = "clippy does not account for all cfg()s"
+)]
+fn main() {
+    log::init_logger();
+
+    let mut timings = timings::Timings::new();
+
+    cfg_if! {
+        if #[cfg(not(any(feature = "example")))] {
+            println!("No feature specified. Use `--features $BENCHMARK_FEATURE` when building.");
+            return;
+        }
+    }
+
+    cfg_if! {
+        if #[cfg(feature = "example")] {
+            run::run_benchmark::<cuprate_benchmark_example::Example>(&mut timings);
+        }
+    }
+
+    print::print_timings(&timings);
+}
diff --git a/benches/benchmark/bin/src/print.rs b/benches/benchmark/bin/src/print.rs
new file mode 100644
index 00000000..36a5f05a
--- /dev/null
+++ b/benches/benchmark/bin/src/print.rs
@@ -0,0 +1,38 @@
+#![expect(dead_code, reason = "code hidden behind feature flags")]
+
+use cfg_if::cfg_if;
+
+use crate::timings::Timings;
+
+/// Print the final the final markdown table of benchmark timings.
+pub(crate) fn print_timings(timings: &Timings) {
+    println!("\nFinished all benchmarks, printing results:");
+
+    cfg_if! {
+        if #[cfg(feature = "json")] {
+            print_timings_json(timings);
+        } else {
+            print_timings_markdown(timings);
+        }
+    }
+}
+
+/// Default timing formatting.
+pub(crate) fn print_timings_markdown(timings: &Timings) {
+    let mut s = String::new();
+    s.push_str("| Benchmark                          | Time (seconds) |\n");
+    s.push_str("|------------------------------------|----------------|");
+
+    #[expect(clippy::iter_over_hash_type)]
+    for (k, v) in timings {
+        s += &format!("\n| {k:<34} | {v:<14} |");
+    }
+
+    println!("\n{s}");
+}
+
+/// Enabled via `json` feature.
+pub(crate) fn print_timings_json(timings: &Timings) {
+    let json = serde_json::to_string_pretty(timings).unwrap();
+    println!("\n{json}");
+}
diff --git a/benches/benchmark/bin/src/run.rs b/benches/benchmark/bin/src/run.rs
new file mode 100644
index 00000000..05a220f9
--- /dev/null
+++ b/benches/benchmark/bin/src/run.rs
@@ -0,0 +1,36 @@
+use tracing::{info, instrument, trace};
+
+use cuprate_benchmark_lib::Benchmark;
+
+use crate::timings::Timings;
+
+/// Run a [`Benchmark`] and record its timing.
+#[instrument(skip_all)]
+pub(crate) fn run_benchmark<B: Benchmark>(timings: &mut Timings) {
+    // Get the benchmark name.
+    let name = B::name();
+    trace!("Running benchmark: {name}");
+
+    // Setup the benchmark input.
+    let input = B::SETUP();
+
+    // Sleep before running the benchmark.
+    trace!("Pre-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
+    std::thread::sleep(B::PRE_SLEEP_DURATION);
+
+    // Run/time the benchmark.
+    let now = std::time::Instant::now();
+    B::MAIN(input);
+    let time = now.elapsed().as_secs_f32();
+
+    // Print the benchmark timings.
+    info!("{name:>34} ... {time}");
+    assert!(
+        timings.insert(name, time).is_none(),
+        "There were 2 benchmarks with the same name - this collides the final output: {name}",
+    );
+
+    // Sleep for a cooldown period after the benchmark run.
+    trace!("Post-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION);
+    std::thread::sleep(B::POST_SLEEP_DURATION);
+}
diff --git a/benches/benchmark/bin/src/timings.rs b/benches/benchmark/bin/src/timings.rs
new file mode 100644
index 00000000..34a07952
--- /dev/null
+++ b/benches/benchmark/bin/src/timings.rs
@@ -0,0 +1,5 @@
+/// Benchmark timing data.
+///
+/// - Key = benchmark name
+/// - Value = benchmark time in seconds
+pub(crate) type Timings = std::collections::HashMap<&'static str, f32>;
diff --git a/benches/benchmark/example/Cargo.toml b/benches/benchmark/example/Cargo.toml
new file mode 100644
index 00000000..5728bcd4
--- /dev/null
+++ b/benches/benchmark/example/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name        = "cuprate-benchmark-example"
+version     = "0.0.0"
+edition     = "2021"
+description = "Example showcasing Cuprate's benchmarking harness"
+license     = "MIT"
+authors     = ["hinto-janai"]
+repository  = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example"
+keywords    = ["cuprate", "benchmarking", "example"]
+
+[dependencies]
+cuprate-benchmark-lib = { path = "../lib" }
+
+[dev-dependencies]
+
+[lints]
+workspace = true
\ No newline at end of file
diff --git a/benches/benchmark/example/README.md b/benches/benchmark/example/README.md
new file mode 100644
index 00000000..be6b7167
--- /dev/null
+++ b/benches/benchmark/example/README.md
@@ -0,0 +1,3 @@
+## `cuprate-benchmark-example`
+This crate contains a short example benchmark that shows how to implement and use
+`cuprate-benchmark-lib` so that it can be ran by `cuprate-benchmark`.
\ No newline at end of file
diff --git a/benches/benchmark/example/src/lib.rs b/benches/benchmark/example/src/lib.rs
new file mode 100644
index 00000000..cc704a7f
--- /dev/null
+++ b/benches/benchmark/example/src/lib.rs
@@ -0,0 +1,42 @@
+#![doc = include_str!("../README.md")]
+
+use std::hint::black_box;
+
+use cuprate_benchmark_lib::Benchmark;
+
+/// Marker struct that implements [`Benchmark`]
+pub struct Example;
+
+/// The input to our benchmark function.
+pub type ExampleBenchmarkInput = u64;
+
+/// The setup function that creates the input.
+pub const fn example_benchmark_setup() -> ExampleBenchmarkInput {
+    1
+}
+
+/// The main benchmarking function.
+#[expect(clippy::unit_arg)]
+pub fn example_benchmark_main(input: ExampleBenchmarkInput) {
+    // In this case, we're simply benchmarking the
+    // performance of simple arithmetic on the input data.
+
+    fn math(input: ExampleBenchmarkInput, number: u64) {
+        let x = input;
+        let x = black_box(x * number);
+        let x = black_box(x / number);
+        let x = black_box(x + number);
+        let _ = black_box(x - number);
+    }
+
+    for number in 1..100_000_000 {
+        black_box(math(input, number));
+    }
+}
+
+// This implementation will be run by `cuprate-benchmark`.
+impl Benchmark for Example {
+    type Input = ExampleBenchmarkInput;
+    const SETUP: fn() -> Self::Input = example_benchmark_setup;
+    const MAIN: fn(Self::Input) = example_benchmark_main;
+}
diff --git a/benches/benchmark/lib/Cargo.toml b/benches/benchmark/lib/Cargo.toml
new file mode 100644
index 00000000..b0771f09
--- /dev/null
+++ b/benches/benchmark/lib/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name        = "cuprate-benchmark-lib"
+version     = "0.0.0"
+edition     = "2021"
+description = "Cuprate's benchmarking library"
+license     = "MIT"
+authors     = ["hinto-janai"]
+repository  = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib"
+keywords    = ["cuprate", "benchmarking", "library"]
+
+[features]
+
+[dependencies]
+
+[dev-dependencies]
+
+[lints]
+workspace = true
\ No newline at end of file
diff --git a/benches/benchmark/lib/README.md b/benches/benchmark/lib/README.md
new file mode 100644
index 00000000..9ea79ae4
--- /dev/null
+++ b/benches/benchmark/lib/README.md
@@ -0,0 +1,15 @@
+## `cuprate-benchmark-lib`
+This crate is the glue between
+[`cuprate-benchmark`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/bin)
+and all the benchmark crates.
+
+It defines the [`crate::Benchmark`] trait, which is the behavior of all benchmarks.
+
+See the [`cuprate-benchmark-example`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/example)
+crate to see an example implementation of this trait.
+
+After implementing this trait, a few steps must
+be done such that the `cuprate-benchmark` binary
+can actually run your benchmark crate; see the
+[`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html)
+to see how to do this.
\ No newline at end of file
diff --git a/benches/benchmark/lib/src/benchmark.rs b/benches/benchmark/lib/src/benchmark.rs
new file mode 100644
index 00000000..4dca5504
--- /dev/null
+++ b/benches/benchmark/lib/src/benchmark.rs
@@ -0,0 +1,45 @@
+//! Benchmarking trait.
+
+use std::time::Duration;
+
+/// A benchmarking function and its inputs.
+pub trait Benchmark {
+    /// The benchmark's name.
+    ///
+    /// This is automatically implemented
+    /// as the name of the [`Self`] type.
+    //
+    // FIXME: use `const` instead of `fn` when stable
+    // <https://github.com/rust-lang/rust/issues/63084>
+    fn name() -> &'static str {
+        std::any::type_name::<Self>()
+    }
+
+    /// Input to the main benchmarking function.
+    ///
+    /// This is passed to [`Self::MAIN`].
+    type Input;
+
+    /// Setup function to generate the input.
+    ///
+    /// This function is not timed.
+    const SETUP: fn() -> Self::Input;
+
+    /// The main function to benchmark.
+    ///
+    /// The start of the timer begins right before
+    /// this function is called and ends after the
+    /// function returns.
+    const MAIN: fn(Self::Input);
+
+    /// `cuprate-benchmark` will sleep for this [`Duration`] after
+    /// creating the [`Self::Input`], but before starting [`Self::MAIN`].
+    ///
+    /// 1 second by default.
+    const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1);
+
+    /// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::MAIN`].
+    ///
+    /// 1 second by default.
+    const POST_SLEEP_DURATION: Duration = Duration::from_secs(1);
+}
diff --git a/benches/benchmark/lib/src/lib.rs b/benches/benchmark/lib/src/lib.rs
new file mode 100644
index 00000000..a6bec820
--- /dev/null
+++ b/benches/benchmark/lib/src/lib.rs
@@ -0,0 +1,5 @@
+#![doc = include_str!("../README.md")]
+
+mod benchmark;
+
+pub use benchmark::Benchmark;
diff --git a/benches/criterion/cuprate-json-rpc/Cargo.toml b/benches/criterion/cuprate-json-rpc/Cargo.toml
new file mode 100644
index 00000000..a0cae64e
--- /dev/null
+++ b/benches/criterion/cuprate-json-rpc/Cargo.toml
@@ -0,0 +1,23 @@
+[package]
+name        = "cuprate-criterion-json-rpc"
+version     = "0.0.0"
+edition     = "2021"
+description = "Criterion benchmarking for cuprate-json-rpc"
+license     = "MIT"
+authors     = ["hinto-janai"]
+repository  = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc"
+keywords    = ["cuprate", "json-rpc", "criterion", "benchmark"]
+
+[dependencies]
+cuprate-json-rpc = { workspace = true }
+
+criterion     = { workspace = true }
+function_name = { workspace = true }
+serde_json    = { workspace = true, features = ["default"] }
+
+[[bench]]
+name    = "main"
+harness = false
+
+[lints]
+workspace = true
\ No newline at end of file
diff --git a/benches/criterion/cuprate-json-rpc/benches/main.rs b/benches/criterion/cuprate-json-rpc/benches/main.rs
new file mode 100644
index 00000000..a7249430
--- /dev/null
+++ b/benches/criterion/cuprate-json-rpc/benches/main.rs
@@ -0,0 +1,8 @@
+//! Benchmarks for `cuprate-json-rpc`.
+#![allow(unused_crate_dependencies)]
+
+mod response;
+
+criterion::criterion_main! {
+    response::serde,
+}
diff --git a/benches/criterion/cuprate-json-rpc/benches/response.rs b/benches/criterion/cuprate-json-rpc/benches/response.rs
new file mode 100644
index 00000000..890958ec
--- /dev/null
+++ b/benches/criterion/cuprate-json-rpc/benches/response.rs
@@ -0,0 +1,110 @@
+//! Benchmarks for [`Response`].
+#![allow(unused_attributes, unused_crate_dependencies)]
+
+use criterion::{black_box, criterion_group, criterion_main, Criterion};
+use function_name::named;
+use serde_json::{from_str, to_string_pretty};
+
+use cuprate_json_rpc::{Id, Response};
+
+// `serde` benchmarks on `Response`.
+//
+// These are benchmarked as `Response` has a custom serde implementation.
+criterion_group! {
+    name = serde;
+    config = Criterion::default();
+    targets =
+    response_from_str_u8,
+    response_from_str_u64,
+    response_from_str_string_5_len,
+    response_from_str_string_10_len,
+    response_from_str_string_100_len,
+    response_from_str_string_500_len,
+    response_to_string_pretty_u8,
+    response_to_string_pretty_u64,
+    response_to_string_pretty_string_5_len,
+    response_to_string_pretty_string_10_len,
+    response_to_string_pretty_string_100_len,
+    response_to_string_pretty_string_500_len,
+    response_from_str_bad_field_1,
+    response_from_str_bad_field_5,
+    response_from_str_bad_field_10,
+    response_from_str_bad_field_100,
+    response_from_str_missing_field,
+}
+criterion_main!(serde);
+
+/// Generate `from_str` deserialization benchmark functions for [`Response`].
+macro_rules! impl_from_str_benchmark {
+    (
+        $(
+            $fn_name:ident => $request_type:ty => $request_string:literal,
+        )*
+    ) => {
+        $(
+            #[named]
+            fn $fn_name(c: &mut Criterion) {
+                let request_string = $request_string;
+
+                c.bench_function(function_name!(), |b| {
+                    b.iter(|| {
+                        let _r = from_str::<Response<$request_type>>(
+                            black_box(request_string)
+                        );
+                    });
+                });
+            }
+        )*
+    };
+}
+
+impl_from_str_benchmark! {
+    response_from_str_u8 => u8 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#,
+    response_from_str_u64 => u64 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#,
+    response_from_str_string_5_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hello"}"#,
+    response_from_str_string_10_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hellohello"}"#,
+    response_from_str_string_100_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#,
+    response_from_str_string_500_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#,
+
+    // The custom serde currently looks at all fields.
+    // These are for testing the performance if the serde
+    // has to parse through a bunch of unrelated fields.
+    response_from_str_bad_field_1 => u8 => r#"{"bad_field":0,"jsonrpc":"2.0","id":123,"result":0}"#,
+    response_from_str_bad_field_5 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"jsonrpc":"2.0","id":123,"result":0}"#,
+    response_from_str_bad_field_10 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"bad_field_6":0,"bad_field_7":0,"bad_field_8":0,"bad_field_9":0,"bad_field_10":0,"jsonrpc":"2.0","id":123,"result":0}"#,
+    response_from_str_bad_field_100 => u8 => r#"{"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0,"81":0,"82":0,"83":0,"84":0,"85":0,"86":0,"87":0,"88":0,"89":0,"90":0,"91":0,"92":0,"93":0,"94":0,"95":0,"96":0,"97":0,"98":0,"99":0,"100":0,"jsonrpc":"2.0","id":123,"result":0}"#,
+
+    // These are missing the `jsonrpc` field.
+    response_from_str_missing_field => u8 => r#"{"id":123,"result":0}"#,
+}
+
+/// Generate `to_string_pretty` serialization benchmark functions for [`Response`].
+macro_rules! impl_to_string_pretty_benchmark {
+    (
+        $(
+            $fn_name:ident => $request_constructor:expr_2021,
+        )*
+    ) => {
+        $(
+            #[named]
+            fn $fn_name(c: &mut Criterion) {
+                let request = $request_constructor;
+
+                c.bench_function(function_name!(), |b| {
+                    b.iter(|| {
+                        let _s = to_string_pretty(black_box(&request)).unwrap();
+                    });
+                });
+            }
+        )*
+    };
+}
+
+impl_to_string_pretty_benchmark! {
+    response_to_string_pretty_u8 => Response::<u8>::ok(Id::Null, 0),
+    response_to_string_pretty_u64 => Response::<u64>::ok(Id::Null, 0),
+    response_to_string_pretty_string_5_len => Response::ok(Id::Null, String::from("hello")),
+    response_to_string_pretty_string_10_len => Response::ok(Id::Null, String::from("hellohello")),
+    response_to_string_pretty_string_100_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")),
+    response_to_string_pretty_string_500_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")),
+}
diff --git a/benches/criterion/cuprate-json-rpc/src/lib.rs b/benches/criterion/cuprate-json-rpc/src/lib.rs
new file mode 100644
index 00000000..b29887aa
--- /dev/null
+++ b/benches/criterion/cuprate-json-rpc/src/lib.rs
@@ -0,0 +1,2 @@
+//! Benchmark lib for `cuprate-json-rpc`.
+#![allow(unused_crate_dependencies, reason = "used in benchmarks")]
diff --git a/benches/criterion/example/Cargo.toml b/benches/criterion/example/Cargo.toml
new file mode 100644
index 00000000..43e60119
--- /dev/null
+++ b/benches/criterion/example/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name        = "cuprate-criterion-example"
+version     = "0.0.0"
+edition     = "2021"
+description = "Criterion benchmarking example for Cuprate"
+license     = "MIT"
+authors     = ["hinto-janai"]
+repository  = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example"
+keywords    = ["cuprate", "criterion", "benchmark", "example"]
+
+[dependencies]
+criterion     = { workspace = true }
+function_name = { workspace = true }
+serde_json    = { workspace = true, features = ["default"] }
+
+[[bench]]
+name    = "main"
+harness = false
+
+[lints]
+workspace = true
\ No newline at end of file
diff --git a/benches/criterion/example/README.md b/benches/criterion/example/README.md
new file mode 100644
index 00000000..cf1983ff
--- /dev/null
+++ b/benches/criterion/example/README.md
@@ -0,0 +1,14 @@
+## `cuprate-criterion-example`
+An example of using Criterion for benchmarking Cuprate crates.
+
+Consider copy+pasting this crate to use as a base when creating new Criterion benchmark crates.
+
+## `src/`
+Benchmark crates have a `benches/` ran by `cargo bench`, but they are also crates themselves,
+as in, they have a `src` folder that `benches/` can pull code from.
+
+The `src` directories in these benchmarking crates are usually filled with
+helper functions, types, etc, that are used repeatedly in the benchmarks.
+
+## `benches/`
+These are the actual benchmarks ran by `cargo bench`.
diff --git a/benches/criterion/example/benches/example.rs b/benches/criterion/example/benches/example.rs
new file mode 100644
index 00000000..7ea8e9a1
--- /dev/null
+++ b/benches/criterion/example/benches/example.rs
@@ -0,0 +1,48 @@
+//! Benchmarks.
+#![allow(unused_attributes, unused_crate_dependencies)]
+
+use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
+use function_name::named;
+
+use cuprate_criterion_example::SomeHardToCreateObject;
+
+// This is how you register criterion benchmarks.
+criterion_group! {
+    name = benches;
+    config = Criterion::default();
+    targets = benchmark_1, benchmark_range,
+}
+criterion_main!(benches);
+
+/// Benchmark a single input.
+///
+/// <https://bheisler.github.io/criterion.rs/book/user_guide/benchmarking_with_inputs.html#benchmarking-with-one-input>
+#[named]
+fn benchmark_1(c: &mut Criterion) {
+    // It is recommended to use `function_name!()` as a benchmark
+    // identifier instead of manually re-typing the function name.
+    c.bench_function(function_name!(), |b| {
+        b.iter(|| {
+            black_box(SomeHardToCreateObject::from(1));
+        });
+    });
+}
+
+/// Benchmark a range of inputs.
+///
+/// <https://bheisler.github.io/criterion.rs/book/user_guide/benchmarking_with_inputs.html#benchmarking-with-a-range-of-values>
+#[named]
+fn benchmark_range(c: &mut Criterion) {
+    let mut group = c.benchmark_group(function_name!());
+
+    for i in 0..4 {
+        group.throughput(Throughput::Elements(i));
+        group.bench_with_input(BenchmarkId::from_parameter(i), &i, |b, &i| {
+            b.iter(|| {
+                black_box(SomeHardToCreateObject::from(i));
+            });
+        });
+    }
+
+    group.finish();
+}
diff --git a/benches/criterion/example/benches/main.rs b/benches/criterion/example/benches/main.rs
new file mode 100644
index 00000000..d4f0bf80
--- /dev/null
+++ b/benches/criterion/example/benches/main.rs
@@ -0,0 +1,10 @@
+//! Benchmarks examples.
+#![allow(unused_crate_dependencies)]
+
+// All modules within `benches/` are `mod`ed here.
+mod example;
+
+// And all the Criterion benchmarks are registered like so:
+criterion::criterion_main! {
+    example::benches,
+}
diff --git a/benches/criterion/example/src/lib.rs b/benches/criterion/example/src/lib.rs
new file mode 100644
index 00000000..0f732a47
--- /dev/null
+++ b/benches/criterion/example/src/lib.rs
@@ -0,0 +1,13 @@
+#![doc = include_str!("../README.md")] // See the README for crate documentation.
+#![allow(unused_crate_dependencies, reason = "used in benchmarks")]
+
+/// Shared type that all benchmarks can use.
+#[expect(dead_code)]
+pub struct SomeHardToCreateObject(u64);
+
+impl From<u64> for SomeHardToCreateObject {
+    /// Shared function that all benchmarks can use.
+    fn from(value: u64) -> Self {
+        Self(value)
+    }
+}
diff --git a/binaries/cuprated/Cuprated.toml b/binaries/cuprated/Cuprated.toml
new file mode 100644
index 00000000..d248ce1f
--- /dev/null
+++ b/binaries/cuprated/Cuprated.toml
@@ -0,0 +1,67 @@
+#     ____                      _
+#    / ___|   _ _ __  _ __ __ _| |_ ___
+#   | |  | | | | '_ \| '__/ _` | __/ _ \
+#   | |__| |_| | |_) | | | (_| | ||  __/
+#    \____\__,_| .__/|_|  \__,_|\__\___|
+#              |_|
+#
+
+## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet".
+network = "Mainnet"
+
+## Tracing config.
+[tracing]
+## The minimum level for log events to be displayed.
+level = "info"
+
+## Clear-net config.
+[p2p.clear_net]
+## The number of outbound connections we should make and maintain.
+outbound_connections = 64
+## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing.
+extra_outbound_connections = 8
+## The maximum number of incoming we should allow.
+max_inbound_connections = 128
+## The percent of outbound connections that should be to nodes we have not connected to before.
+gray_peers_percent = 0.7
+## The port to accept connections on, if left `0` no connections will be accepted.
+p2p_port = 0
+## The IP address to listen to connections on.
+listen_on = "0.0.0.0"
+
+## The Clear-net addressbook config.
+[p2p.clear_net.address_book_config]
+## The size of the white peer list, which contains peers we have made a connection to before.
+max_white_list_length = 1_000
+## The size of the gray peer list, which contains peers we have not made a connection to before.
+max_gray_list_length = 5_000
+## The amount of time between address book saves.
+peer_save_period = { secs = 90, nanos = 0 }
+
+## The block downloader config.
+[p2p.block_downloader]
+## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes).
+buffer_bytes = 50_000_000
+## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes).
+in_progress_queue_bytes = 50_000_000
+## The target size of a batch of blocks (bytes), must not exceed 100MB.
+target_batch_bytes= 5_000_000
+## The amount of time between checking the pool of connected peers for free peers to download blocks.
+check_client_pool_interval = { secs = 30, nanos = 0 }
+
+## Storage config
+[storage]
+## The amount of reader threads to spawn.
+reader_threads = "OnePerThread"
+
+## Txpool storage config.
+[storage.txpool]
+## The database sync mode for the txpool.
+sync_mode = "Async"
+## The maximum size of all the txs in the pool (bytes).
+max_txpool_byte_size = 100_000_000
+
+## Blockchain storage config.
+[storage.blockchain]
+## The database sync mode for the blockchain.
+sync_mode = "Async"
diff --git a/binaries/cuprated/src/config.rs b/binaries/cuprated/src/config.rs
index 77688e8c..36d5092a 100644
--- a/binaries/cuprated/src/config.rs
+++ b/binaries/cuprated/src/config.rs
@@ -18,7 +18,6 @@ use cuprate_p2p::block_downloader::BlockDownloaderConfig;
 use cuprate_p2p_core::{ClearNet, ClearNetServerCfg};
 
 mod args;
-mod default;
 mod fs;
 mod p2p;
 mod storage;
@@ -32,13 +31,14 @@ use tracing_config::TracingConfig;
 /// Reads the args & config file, returning a [`Config`].
 pub fn read_config_and_args() -> Config {
     let args = args::Args::parse();
+    args.do_quick_requests();
 
     let config: Config = if let Some(config_file) = &args.config_file {
         // If a config file was set in the args try to read it and exit if we can't.
         match Config::read_from_path(config_file) {
             Ok(config) => config,
             Err(e) => {
-                tracing::error!("Failed to read config from file: {e}");
+                eprintln!("Failed to read config from file: {e}");
                 std::process::exit(1);
             }
         }
@@ -56,7 +56,7 @@ pub fn read_config_and_args() -> Config {
             })
             .inspect_err(|e| {
                 tracing::debug!("Failed to read config from config dir: {e}");
-                println!("Failed to find/read config file, using default config.");
+                eprintln!("Failed to find/read config file, using default config.");
             })
             .unwrap_or_default()
     };
@@ -93,10 +93,10 @@ impl Config {
         let file_text = read_to_string(file.as_ref())?;
 
         Ok(toml::from_str(&file_text)
-            .inspect(|_| println!("Using config at: {}", file.as_ref().to_string_lossy()))
+            .inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy()))
             .inspect_err(|e| {
-                println!("{e}");
-                println!(
+                eprintln!("{e}");
+                eprintln!(
                     "Failed to parse config file at: {}",
                     file.as_ref().to_string_lossy()
                 );
diff --git a/binaries/cuprated/src/config/args.rs b/binaries/cuprated/src/config/args.rs
index c2cd6233..c4c2f9fd 100644
--- a/binaries/cuprated/src/config/args.rs
+++ b/binaries/cuprated/src/config/args.rs
@@ -1,10 +1,10 @@
-use std::{io::Write, path::PathBuf};
+use std::{io::Write, path::PathBuf, process::exit};
 
 use clap::builder::TypedValueParser;
 
 use cuprate_helper::network::Network;
 
-use crate::config::{default::create_default_config_file, Config};
+use crate::{config::Config, constants::EXAMPLE_CONFIG};
 
 /// Cuprate Args.
 #[derive(clap::Parser, Debug)]
@@ -24,21 +24,26 @@ pub struct Args {
     /// The PATH of the `cuprated` config file.
     #[arg(long)]
     pub config_file: Option<PathBuf>,
-    /// Generate a config file and place it in the given PATH.
+    /// Generate a config file and print it to stdout.
     #[arg(long)]
-    pub generate_config: Option<PathBuf>,
+    pub generate_config: bool,
 }
 
 impl Args {
+    /// Complete any quick requests asked for in [`Args`].
+    ///
+    /// May cause the process to [`exit`].
+    pub fn do_quick_requests(&self) {
+        if self.generate_config {
+            println!("{EXAMPLE_CONFIG}");
+            exit(0);
+        }
+    }
+
     /// Apply the [`Args`] to the given [`Config`].
     ///
     /// This may exit the program if a config value was set that requires an early exit.
-    pub fn apply_args(&self, mut config: Config) -> Config {
-        if let Some(config_folder) = self.generate_config.as_ref() {
-            // This will create the config file and exit.
-            create_default_config_file(config_folder)
-        };
-
+    pub const fn apply_args(&self, mut config: Config) -> Config {
         config.network = self.network;
 
         if let Some(outbound_connections) = self.outbound_connections {
diff --git a/binaries/cuprated/src/config/p2p.rs b/binaries/cuprated/src/config/p2p.rs
index aaf7dda7..51f8d0d6 100644
--- a/binaries/cuprated/src/config/p2p.rs
+++ b/binaries/cuprated/src/config/p2p.rs
@@ -6,7 +6,7 @@ use std::{
 
 use serde::{Deserialize, Serialize};
 
-use cuprate_helper::{fs::addressbook_path, network::Network};
+use cuprate_helper::{fs::address_book_path, network::Network};
 
 /// P2P config.
 #[derive(Default, Deserialize, Serialize)]
@@ -23,22 +23,22 @@ pub struct P2PConfig {
 pub struct BlockDownloaderConfig {
     /// The size in bytes of the buffer between the block downloader and the place which
     /// is consuming the downloaded blocks.
-    pub buffer_size: usize,
+    pub buffer_bytes: usize,
     /// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
-    pub in_progress_queue_size: usize,
+    pub in_progress_queue_bytes: usize,
     /// The [`Duration`] between checking the client pool for free peers.
     pub check_client_pool_interval: Duration,
     /// The target size of a single batch of blocks (in bytes).
-    pub target_batch_size: usize,
+    pub target_batch_bytes: usize,
 }
 
 impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloaderConfig {
     fn from(value: BlockDownloaderConfig) -> Self {
         Self {
-            buffer_size: value.buffer_size,
-            in_progress_queue_size: value.in_progress_queue_size,
+            buffer_bytes: value.buffer_bytes,
+            in_progress_queue_bytes: value.in_progress_queue_bytes,
             check_client_pool_interval: value.check_client_pool_interval,
-            target_batch_size: value.target_batch_size,
+            target_batch_bytes: value.target_batch_bytes,
             initial_batch_len: 1,
         }
     }
@@ -47,10 +47,10 @@ impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloa
 impl Default for BlockDownloaderConfig {
     fn default() -> Self {
         Self {
-            buffer_size: 50_000_000,
-            in_progress_queue_size: 50_000_000,
+            buffer_bytes: 50_000_000,
+            in_progress_queue_bytes: 50_000_000,
             check_client_pool_interval: Duration::from_secs(30),
-            target_batch_size: 5_000_000,
+            target_batch_bytes: 5_000_000,
         }
     }
 }
@@ -102,7 +102,7 @@ impl SharedNetConfig {
         cuprate_address_book::AddressBookConfig {
             max_white_list_length: self.address_book_config.max_white_list_length,
             max_gray_list_length: self.address_book_config.max_gray_list_length,
-            peer_store_directory: addressbook_path(cache_dir, network),
+            peer_store_directory: address_book_path(cache_dir, network),
             peer_save_period: self.address_book_config.peer_save_period,
         }
     }
diff --git a/binaries/cuprated/src/constants.rs b/binaries/cuprated/src/constants.rs
index a42248b6..057e8bd0 100644
--- a/binaries/cuprated/src/constants.rs
+++ b/binaries/cuprated/src/constants.rs
@@ -18,11 +18,12 @@ pub const VERSION_BUILD: &str = if cfg!(debug_assertions) {
 pub const PANIC_CRITICAL_SERVICE_ERROR: &str =
     "A service critical to Cuprate's function returned an unexpected error.";
 
-pub const EXAMPLE_CONFIG: &str = include_str!("../../../Cuprated.toml");
+pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml");
 
 #[cfg(test)]
 mod test {
     use super::*;
+    use crate::config::Config;
 
     #[test]
     fn version() {
@@ -37,4 +38,9 @@ mod test {
             assert_eq!(VERSION_BUILD, "0.0.1-release");
         }
     }
+
+    #[test]
+    fn generate_config_text_is_valid() {
+        let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap();
+    }
 }
diff --git a/binaries/cuprated/src/p2p/request_handler.rs b/binaries/cuprated/src/p2p/request_handler.rs
index cb0757c6..7d72fa37 100644
--- a/binaries/cuprated/src/p2p/request_handler.rs
+++ b/binaries/cuprated/src/p2p/request_handler.rs
@@ -38,7 +38,7 @@ use cuprate_p2p_core::{
 use cuprate_txpool::service::TxpoolReadHandle;
 use cuprate_types::{
     blockchain::{BlockchainReadRequest, BlockchainResponse},
-    BlockCompleteEntry, MissingTxsInBlock, TransactionBlobs,
+    BlockCompleteEntry, TransactionBlobs, TxsInBlock,
 };
 use cuprate_wire::protocol::{
     ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest,
@@ -56,9 +56,7 @@ use crate::{
 #[derive(Clone)]
 pub struct P2pProtocolRequestHandlerMaker {
     pub blockchain_read_handle: BlockchainReadHandle,
-
     pub blockchain_context_service: BlockChainContextService,
-
     pub txpool_read_handle: TxpoolReadHandle,
 
     /// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`]
@@ -115,13 +113,9 @@ where
 #[derive(Clone)]
 pub struct P2pProtocolRequestHandler<N: NetZoneAddress> {
     peer_information: PeerInformation<N>,
-
     blockchain_read_handle: BlockchainReadHandle,
-
     blockchain_context_service: BlockChainContextService,
-
     txpool_read_handle: TxpoolReadHandle,
-
     incoming_tx_handler: IncomingTxHandler,
 }
 
@@ -196,7 +190,7 @@ async fn get_objects(
         .call(BlockchainReadRequest::BlockCompleteEntries(block_hashes))
         .await?
     else {
-        panic!("blockchain returned wrong response!");
+        unreachable!();
     };
 
     Ok(ProtocolResponse::GetObjects(GetObjectsResponse {
@@ -233,18 +227,18 @@ async fn get_chain(
         .call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000))
         .await?
     else {
-        panic!("blockchain returned wrong response!");
+        unreachable!();
     };
 
-    if start_height == 0 {
+    let Some(start_height) = start_height else {
         anyhow::bail!("The peers chain has a different genesis block than ours.");
-    }
+    };
 
     let (cumulative_difficulty_low64, cumulative_difficulty_top64) =
         split_u128_into_low_high_bits(cumulative_difficulty);
 
     Ok(ProtocolResponse::GetChain(ChainResponse {
-        start_height: usize_to_u64(start_height),
+        start_height: usize_to_u64(std::num::NonZero::get(start_height)),
         total_height: usize_to_u64(chain_height),
         cumulative_difficulty_low64,
         cumulative_difficulty_top64,
@@ -271,19 +265,19 @@ async fn fluffy_missing_txs(
     // deallocate the backing `Bytes`.
     drop(request);
 
-    let BlockchainResponse::MissingTxsInBlock(res) = blockchain_read_handle
+    let BlockchainResponse::TxsInBlock(res) = blockchain_read_handle
         .ready()
         .await?
-        .call(BlockchainReadRequest::MissingTxsInBlock {
+        .call(BlockchainReadRequest::TxsInBlock {
             block_hash,
             tx_indexes,
         })
         .await?
     else {
-        panic!("blockchain returned wrong response!");
+        unreachable!();
     };
 
-    let Some(MissingTxsInBlock { block, txs }) = res else {
+    let Some(TxsInBlock { block, txs }) = res else {
         anyhow::bail!("The peer requested txs out of range.");
     };
 
@@ -412,11 +406,7 @@ where
     };
 
     // Drop all the data except the stuff we still need.
-    let NewTransactions {
-        txs,
-        dandelionpp_fluff: _,
-        padding: _,
-    } = request;
+    let NewTransactions { txs, .. } = request;
 
     let res = incoming_tx_handler
         .ready()
diff --git a/binaries/cuprated/src/statics.rs b/binaries/cuprated/src/statics.rs
index 9839608f..2d7338da 100644
--- a/binaries/cuprated/src/statics.rs
+++ b/binaries/cuprated/src/statics.rs
@@ -13,7 +13,7 @@ use std::{
 macro_rules! define_init_lazylock_statics {
     ($(
         $( #[$attr:meta] )*
-        $name:ident: $t:ty = $init_fn:expr;
+        $name:ident: $t:ty = $init_fn:expr_2021;
     )*) => {
         /// Initialize global static `LazyLock` data.
         pub fn init_lazylock_statics() {
diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md
index 0961d8fc..a99d099f 100644
--- a/books/architecture/src/SUMMARY.md
+++ b/books/architecture/src/SUMMARY.md
@@ -143,9 +143,16 @@
 
 ---
 
-- [⚪️ Benchmarking](benchmarking/intro.md)
-    - [⚪️ Criterion](benchmarking/criterion.md)
-    - [⚪️ Harness](benchmarking/harness.md)
+- [🟢 Benchmarking](benchmarking/intro.md)
+    - [🟢 Criterion](benchmarking/criterion/intro.md)
+        - [🟢 Creating](benchmarking/criterion/creating.md)
+        - [🟢 Running](benchmarking/criterion/running.md)
+    - [🟢 `cuprate-benchmark`](benchmarking/cuprate/intro.md)
+        - [🟢 Creating](benchmarking/cuprate/creating.md)
+        - [🟢 Running](benchmarking/cuprate/running.md)
+
+---
+
 - [⚪️ Testing](testing/intro.md)
     - [⚪️ Monero data](testing/monero-data.md)
     - [⚪️ RPC client](testing/rpc-client.md)
diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md
index ac2780e1..5124180f 100644
--- a/books/architecture/src/appendix/crates.md
+++ b/books/architecture/src/appendix/crates.md
@@ -54,6 +54,11 @@ cargo doc --open --package cuprate-blockchain
 | [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing
 | [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers
 
+## ZMQ
+| Crate | In-tree path | Purpose |
+|-------|--------------|---------|
+| [`cuprate-zmq-types`](https://doc.cuprate.org/cuprate_zmq_types) | [`zmq/types/`](https://github.com/Cuprate/cuprate/tree/main/zmq/types) | Message types for ZMQ Pub/Sub interface
+
 ## 1-off crates
 | Crate | In-tree path | Purpose |
 |-------|--------------|---------|
@@ -63,3 +68,11 @@ cargo doc --open --package cuprate-blockchain
 | [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate
 | [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate
 | [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate
+
+## Benchmarks
+| Crate | In-tree path | Purpose |
+|-------|--------------|---------|
+| [`cuprate-benchmark`](https://doc.cuprate.org/cuprate_benchmark) | [`benches/benchmark/bin/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | Cuprate benchmarking binary
+| [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) | [`benches/benchmark/lib/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | Cuprate benchmarking library
+| `cuprate-benchmark-*` | [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/) | Benchmark for a Cuprate crate that uses `cuprate-benchmark`
+| `cuprate-criterion-*` | [`benches/criterion/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Benchmark for a Cuprate crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book)
\ No newline at end of file
diff --git a/books/architecture/src/benchmarking/criterion.md b/books/architecture/src/benchmarking/criterion.md
deleted file mode 100644
index e9d61e65..00000000
--- a/books/architecture/src/benchmarking/criterion.md
+++ /dev/null
@@ -1 +0,0 @@
-# ⚪️ Criterion
diff --git a/books/architecture/src/benchmarking/criterion/creating.md b/books/architecture/src/benchmarking/criterion/creating.md
new file mode 100644
index 00000000..01009042
--- /dev/null
+++ b/books/architecture/src/benchmarking/criterion/creating.md
@@ -0,0 +1,21 @@
+# Creating
+Creating a new Criterion-based benchmarking crate for one of Cuprate's crates is relatively simple,
+although, it requires knowledge of how to use Criterion first:
+
+1. Read the `Getting Started` section of <https://bheisler.github.io/criterion.rs/book>
+2. Copy [`benches/criterion/example`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example) as base
+3. Get started
+
+## Naming
+New benchmark crates using Criterion should:
+- Be in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/)
+- Be in the `cuprate-criterion-$CRATE_NAME` format
+
+For a real example, see:
+[`cuprate-criterion-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc).
+
+## Workspace
+Finally, make sure to add the benchmark crate to the workspace
+[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
+
+Your benchmark is now ready to be ran.
\ No newline at end of file
diff --git a/books/architecture/src/benchmarking/criterion/intro.md b/books/architecture/src/benchmarking/criterion/intro.md
new file mode 100644
index 00000000..b7a79b21
--- /dev/null
+++ b/books/architecture/src/benchmarking/criterion/intro.md
@@ -0,0 +1,4 @@
+# Criterion
+Each sub-directory in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) is a crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) for timing single functions and/or groups of functions.
+
+They are generally be small in scope.
\ No newline at end of file
diff --git a/books/architecture/src/benchmarking/criterion/running.md b/books/architecture/src/benchmarking/criterion/running.md
new file mode 100644
index 00000000..14067f67
--- /dev/null
+++ b/books/architecture/src/benchmarking/criterion/running.md
@@ -0,0 +1,15 @@
+# Running
+To run all Criterion benchmarks, run this from the repository root:
+```bash
+cargo bench
+```
+
+To run specific package(s), use:
+```bash
+cargo bench --package $CRITERION_BENCHMARK_CRATE_NAME
+```
+
+For example:
+```bash
+cargo bench --package cuprate-criterion-json-rpc
+```
\ No newline at end of file
diff --git a/books/architecture/src/benchmarking/cuprate/creating.md b/books/architecture/src/benchmarking/cuprate/creating.md
new file mode 100644
index 00000000..76eab789
--- /dev/null
+++ b/books/architecture/src/benchmarking/cuprate/creating.md
@@ -0,0 +1,57 @@
+# Creating
+New benchmarks are plugged into `cuprate-benchmark` by:
+1. Implementing `cuprate_benchmark_lib::Benchmark`
+1. Registering the benchmark in the `cuprate_benchmark` binary
+
+See [`benches/benchmark/example`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example)
+for an example.
+
+## Creating the benchmark crate
+Before plugging into `cuprate-benchmark`, your actual benchmark crate must be created:
+
+1. Create a new crate inside `benches/benchmark` (consider copying `benches/benchmark/example` as a base)
+1. Pull in `cuprate_benchmark_lib` as a dependency
+1. Create a benchmark
+1. Implement `cuprate_benchmark_lib::Benchmark`
+
+New benchmark crates using `cuprate-database` should:
+- Be in [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/)
+- Be in the `cuprate-benchmark-$CRATE_NAME` format
+
+For a real example, see:
+[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database).
+
+## `cuprate_benchmark_lib::Benchmark`
+This is the trait that standardizes all benchmarks ran under `cuprate-benchmark`.
+
+It must be implemented by your benchmarking crate.
+
+See `cuprate-benchmark-lib` crate documentation for a user-guide: <https://doc.cuprate.org/cuprate_benchmark_lib>.
+
+## Adding a feature to `cuprate-benchmark`
+After your benchmark's behavior is defined, it must be registered
+in the binary that is actually ran: `cuprate-benchmark`.
+
+If your benchmark is new, add a new crate feature to [`cuprate-benchmark`'s Cargo.toml file](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/Cargo.toml) with an optional dependency to your benchmarking crate.
+
+Please remember to edit the feature table in the
+[`README.md`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/README.md) as well!
+
+## Adding to `cuprate-benchmark`'s `main()`
+After adding your crate's feature, add a conditional line that run the benchmark
+if the feature is enabled to the `main()` function:
+
+For example, if your crate's name is `egg`:
+```rust
+cfg_if! {
+	if #[cfg(feature = "egg")] {
+		run::run_benchmark::<cuprate_benchmark_egg::Benchmark>(&mut timings);
+	}
+}
+```
+
+## Workspace
+Finally, make sure to add the benchmark crate to the workspace
+[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file.
+
+Your benchmark is now ready to be ran.
\ No newline at end of file
diff --git a/books/architecture/src/benchmarking/cuprate/intro.md b/books/architecture/src/benchmarking/cuprate/intro.md
new file mode 100644
index 00000000..25efb460
--- /dev/null
+++ b/books/architecture/src/benchmarking/cuprate/intro.md
@@ -0,0 +1,37 @@
+# cuprate-benchmark
+Cuprate has 2 custom crates for general benchmarking:
+- `cuprate-benchmark`; the actual binary crate ran
+- `cuprate-benchmark-lib`; the library that other crates hook into
+
+The abstract purpose of `cuprate-benchmark` is very simple:
+1. Set-up the benchmark
+1. Start timer
+1. Run benchmark
+1. Output data
+
+`cuprate-benchmark` runs the benchmarks found in [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark).
+
+`cuprate-benchmark-lib` defines the `Benchmark` trait that all
+benchmark crates implement to "plug-in" to the benchmarking harness.
+
+## Diagram
+A diagram displaying the relation between `cuprate-benchmark` and related crates.
+
+```
+                    ┌─────────────────────┐
+                    │ cuprate_benchmark   │
+                    │ (actual binary ran) │
+                    └──────────┬──────────┘
+            ┌──────────────────┴───────────────────┐
+            │ cuprate_benchmark_lib                │
+            │ ┌───────────────────────────────────┐│
+            │ │ trait Benchmark                   ││
+            │ └───────────────────────────────────┘│
+            └──────────────────┬───────────────────┘
+┌───────────────────────────┐  │   ┌───────────────────────────┐
+│ cuprate_benchmark_example ├──┼───┤ cuprate_benchmark_*       │
+└───────────────────────────┘  │   └───────────────────────────┘
+┌───────────────────────────┐  │   ┌───────────────────────────┐
+│ cuprate_benchmark_*       ├──┴───┤ cuprate_benchmark_*       │
+└───────────────────────────┘      └───────────────────────────┘
+```
\ No newline at end of file
diff --git a/books/architecture/src/benchmarking/cuprate/running.md b/books/architecture/src/benchmarking/cuprate/running.md
new file mode 100644
index 00000000..b776163e
--- /dev/null
+++ b/books/architecture/src/benchmarking/cuprate/running.md
@@ -0,0 +1,16 @@
+# Running
+`cuprate-benchmark` benchmarks are ran with this command:
+```bash
+cargo run --release --package cuprate-benchmark --features $BENCHMARK_CRATE_FEATURE
+```
+
+For example, to run the example benchmark:
+```bash
+cargo run --release --package cuprate-benchmark --features example
+```
+
+Use the `all` feature to run all benchmarks:
+```bash
+# Run all benchmarks
+cargo run --release --package cuprate-benchmark --features all
+```
diff --git a/books/architecture/src/benchmarking/harness.md b/books/architecture/src/benchmarking/harness.md
deleted file mode 100644
index 6f82b523..00000000
--- a/books/architecture/src/benchmarking/harness.md
+++ /dev/null
@@ -1 +0,0 @@
-# ⚪️ Harness
diff --git a/books/architecture/src/benchmarking/intro.md b/books/architecture/src/benchmarking/intro.md
index f043a0ba..e6ab6b12 100644
--- a/books/architecture/src/benchmarking/intro.md
+++ b/books/architecture/src/benchmarking/intro.md
@@ -1 +1,22 @@
-# ⚪️ Benchmarking
+# Benchmarking
+Cuprate has 2 types of benchmarks:
+- [Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) benchmarks
+- `cuprate-benchmark` benchmarks
+
+Criterion is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope.
+
+`cuprate-benchmark` and [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't well-suited for micro benchmarks.
+
+## File layout and purpose
+All benchmarking related files are in the [`benches/`](https://github.com/Cuprate/cuprate/tree/main/benches) folder.
+
+This directory is organized like such:
+
+| Directory                     | Purpose |
+|-------------------------------|---------|
+| [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Criterion (micro) benchmarks
+| `benches/criterion/cuprate-*` | Criterion benchmarks for the crate with the same name
+| [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark) | Cuprate's custom benchmarking files
+| [`benches/benchmark/bin`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks
+| [`benches/benchmark/lib`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into
+| `benches/benchmark/cuprate-*` | `cuprate-benchmark` benchmarks for the crate with the same name
diff --git a/consensus/context/src/difficulty.rs b/consensus/context/src/difficulty.rs
index 1b61eb9e..3bbcb059 100644
--- a/consensus/context/src/difficulty.rs
+++ b/consensus/context/src/difficulty.rs
@@ -328,8 +328,8 @@ fn next_difficulty(
         time_span = 1;
     }
 
-    // TODO: do checked operations here and unwrap so we don't silently overflow?
-    (windowed_work * u128::from(hf.block_time().as_secs()) + time_span - 1) / time_span
+    // TODO: do `checked_mul` here and unwrap so we don't silently overflow?
+    (windowed_work * u128::from(hf.block_time().as_secs())).div_ceil(time_span)
 }
 
 /// Get the start and end of the window to calculate difficulty.
diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs
index 8c47b8e5..9410f60a 100644
--- a/consensus/fast-sync/src/create.rs
+++ b/consensus/fast-sync/src/create.rs
@@ -9,7 +9,7 @@ use clap::Parser;
 use tower::{Service, ServiceExt};
 
 use cuprate_blockchain::{
-    config::ConfigBuilder, cuprate_database::RuntimeError, service::BlockchainReadHandle,
+    config::ConfigBuilder, cuprate_database::DbResult, service::BlockchainReadHandle,
 };
 use cuprate_types::{
     blockchain::{BlockchainReadRequest, BlockchainResponse},
@@ -23,7 +23,7 @@ const BATCH_SIZE: usize = 512;
 async fn read_batch(
     handle: &mut BlockchainReadHandle,
     height_from: usize,
-) -> Result<Vec<BlockId>, RuntimeError> {
+) -> DbResult<Vec<BlockId>> {
     let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE);
 
     for height in height_from..(height_from + BATCH_SIZE) {
diff --git a/cryptonight/src/util.rs b/cryptonight/src/util.rs
index 7fbf5cb4..de8b70b2 100644
--- a/cryptonight/src/util.rs
+++ b/cryptonight/src/util.rs
@@ -49,7 +49,7 @@ pub(crate) fn subarray_copy<T: AsRef<[U]> + ?Sized, U: Copy, const LEN: usize>(
 /// A mutable reference to a fixed-size subarray of type `[U; LEN]`.
 ///
 /// # Panics
-/// Panics if `start + LEN > array.as_ref().len()`.
+/// Panics if `start + LEN > array.as_mut().len()`.
 #[inline]
 pub(crate) fn subarray_mut<T: AsMut<[U]> + ?Sized, U, const LEN: usize>(
     array: &mut T,
diff --git a/helper/src/fs.rs b/helper/src/fs.rs
index f1a64ecd..3eb168e3 100644
--- a/helper/src/fs.rs
+++ b/helper/src/fs.rs
@@ -220,13 +220,13 @@ pub fn logs_path(data_dir: &Path, network: Network) -> PathBuf {
 /// This is the PATH used for any Cuprate address-book files.
 ///
 /// ```rust
-/// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, addressbook_path}};
+/// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, address_book_path}};
 ///
-/// assert_eq!(addressbook_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook"));
-/// assert_eq!(addressbook_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook"));
-/// assert_eq!(addressbook_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook"));
+/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook"));
+/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook"));
+/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook"));
 /// ```
-pub fn addressbook_path(cache_dir: &Path, network: Network) -> PathBuf {
+pub fn address_book_path(cache_dir: &Path, network: Network) -> PathBuf {
     path_with_network(cache_dir, network).join("addressbook")
 }
 
diff --git a/net/epee-encoding/src/macros.rs b/net/epee-encoding/src/macros.rs
index 38dcc45d..bb1afefd 100644
--- a/net/epee-encoding/src/macros.rs
+++ b/net/epee-encoding/src/macros.rs
@@ -76,14 +76,14 @@ macro_rules! epee_object {
     // All this does is return the second (right) arg if present otherwise the left is returned.
     (
         @internal_try_right_then_left
-        $a:expr, $b:expr
+        $a:expr_2021, $b:expr_2021
     ) => {
         $b
     };
 
     (
         @internal_try_right_then_left
-        $a:expr,
+        $a:expr_2021,
     ) => {
         $a
     };
@@ -122,7 +122,7 @@ macro_rules! epee_object {
     // ------------------------------------------------------------------------ Entry Point
     (
         $obj:ident,
-        $($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr)?  $(=> $read_fn:expr, $write_fn:expr, $should_write_fn:expr)?, )*
+        $($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr_2021)?  $(=> $read_fn:expr_2021, $write_fn:expr_2021, $should_write_fn:expr_2021)?, )*
         $(!flatten: $flat_field: ident: $flat_ty:ty ,)*
 
     ) => {
diff --git a/p2p/address-book/src/store.rs b/p2p/address-book/src/store.rs
index 98521e97..47994ae5 100644
--- a/p2p/address-book/src/store.rs
+++ b/p2p/address-book/src/store.rs
@@ -42,11 +42,7 @@ pub(crate) fn save_peers_to_disk<Z: BorshNetworkZone>(
     let file = cfg
         .peer_store_directory
         .join(format!("{}_p2p_state", Z::NAME));
-
-    spawn_blocking(move || {
-        fs::create_dir_all(file.parent().unwrap())?;
-        fs::write(&file, &data)
-    })
+    spawn_blocking(move || fs::write(&file, &data))
 }
 
 pub(crate) async fn read_peers_from_disk<Z: BorshNetworkZone>(
diff --git a/p2p/async-buffer/src/lib.rs b/p2p/async-buffer/src/lib.rs
index 0e2ced24..8174481e 100644
--- a/p2p/async-buffer/src/lib.rs
+++ b/p2p/async-buffer/src/lib.rs
@@ -157,7 +157,7 @@ pub struct BufferSinkSend<'a, T> {
     item: Option<T>,
 }
 
-impl<'a, T> Future for BufferSinkSend<'a, T> {
+impl<T> Future for BufferSinkSend<'_, T> {
     type Output = Result<(), BufferError>;
 
     fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
@@ -183,7 +183,7 @@ pub struct BufferSinkReady<'a, T> {
     size_needed: usize,
 }
 
-impl<'a, T> Future for BufferSinkReady<'a, T> {
+impl<T> Future for BufferSinkReady<'_, T> {
     type Output = ();
 
     fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
diff --git a/p2p/dandelion-tower/src/tests/mod.rs b/p2p/dandelion-tower/src/tests/mod.rs
index 601ee252..ae9bee1e 100644
--- a/p2p/dandelion-tower/src/tests/mod.rs
+++ b/p2p/dandelion-tower/src/tests/mod.rs
@@ -12,6 +12,7 @@ use crate::{
     OutboundPeer, State,
 };
 
+#[expect(clippy::type_complexity)]
 pub(crate) fn mock_discover_svc<Req: Send + 'static>() -> (
     impl Stream<
         Item = Result<
diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs
index 26e10686..e5746932 100644
--- a/p2p/p2p-core/src/lib.rs
+++ b/p2p/p2p-core/src/lib.rs
@@ -121,7 +121,6 @@ pub trait NetZoneAddress:
     ///
     /// - TODO: IP zone banning?
     /// - TODO: rename this to Host.
-
     type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static;
 
     /// Changes the port of this address to `port`.
diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs
index ee335c9e..db103000 100644
--- a/p2p/p2p/src/block_downloader.rs
+++ b/p2p/p2p/src/block_downloader.rs
@@ -62,13 +62,13 @@ pub struct BlockBatch {
 pub struct BlockDownloaderConfig {
     /// The size in bytes of the buffer between the block downloader and the place which
     /// is consuming the downloaded blocks.
-    pub buffer_size: usize,
+    pub buffer_bytes: usize,
     /// The size of the in progress queue (in bytes) at which we stop requesting more blocks.
-    pub in_progress_queue_size: usize,
+    pub in_progress_queue_bytes: usize,
     /// The [`Duration`] between checking the client pool for free peers.
     pub check_client_pool_interval: Duration,
     /// The target size of a single batch of blocks (in bytes).
-    pub target_batch_size: usize,
+    pub target_batch_bytes: usize,
     /// The initial amount of blocks to request (in number of blocks)
     pub initial_batch_len: usize,
 }
@@ -145,7 +145,7 @@ where
         + 'static,
     C::Future: Send + 'static,
 {
-    let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size);
+    let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_bytes);
 
     let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config);
 
@@ -381,7 +381,7 @@ where
         }
 
         // If our ready queue is too large send duplicate requests for the blocks we are waiting on.
-        if self.block_queue.size() >= self.config.in_progress_queue_size {
+        if self.block_queue.size() >= self.config.in_progress_queue_bytes {
             return self.request_inflight_batch_again(client);
         }
 
@@ -565,7 +565,7 @@ where
                     self.amount_of_blocks_to_request = calculate_next_block_batch_size(
                         block_batch.size,
                         block_batch.blocks.len(),
-                        self.config.target_batch_size,
+                        self.config.target_batch_bytes,
                     );
 
                     tracing::debug!(
diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs
index ef621ce8..7b6e4c96 100644
--- a/p2p/p2p/src/block_downloader/download_batch.rs
+++ b/p2p/p2p/src/block_downloader/download_batch.rs
@@ -146,9 +146,9 @@ fn deserialize_batch(
 
             // Check the height lines up as expected.
             // This must happen after the hash check.
-            if !block
+            if block
                 .number()
-                .is_some_and(|height| height == expected_height)
+                .is_none_or(|height| height != expected_height)
             {
                 tracing::warn!(
                     "Invalid chain, expected height: {expected_height}, got height: {:?}",
diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs
index 2d00358e..707b858d 100644
--- a/p2p/p2p/src/block_downloader/tests.rs
+++ b/p2p/p2p/src/block_downloader/tests.rs
@@ -66,10 +66,10 @@ proptest! {
                         genesis: *blockchain.blocks.first().unwrap().0
                     },
                     BlockDownloaderConfig {
-                        buffer_size: 1_000,
-                        in_progress_queue_size: 10_000,
+                        buffer_bytes: 1_000,
+                        in_progress_queue_bytes: 10_000,
                         check_client_pool_interval: Duration::from_secs(5),
-                        target_batch_size: 5_000,
+                        target_batch_bytes: 5_000,
                         initial_batch_len: 1,
                 });
 
diff --git a/p2p/p2p/src/broadcast.rs b/p2p/p2p/src/broadcast.rs
index fc73efbc..38aba323 100644
--- a/p2p/p2p/src/broadcast.rs
+++ b/p2p/p2p/src/broadcast.rs
@@ -57,6 +57,7 @@ impl Default for BroadcastConfig {
 /// - The [`BroadcastSvc`]
 /// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers.
 /// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers.
+#[expect(clippy::type_complexity)]
 pub(crate) fn init_broadcast_channels<N: NetworkZone>(
     config: BroadcastConfig,
 ) -> (
diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs
index cd31598a..e49aedb1 100644
--- a/pruning/src/lib.rs
+++ b/pruning/src/lib.rs
@@ -327,7 +327,7 @@ impl DecompressedPruningSeed {
     ///
     /// This function will also error if `block_height` > `blockchain_height`
     ///
-    pub fn get_next_unpruned_block(
+    pub const fn get_next_unpruned_block(
         &self,
         block_height: usize,
         blockchain_height: usize,
diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs
index f7e3a01c..2fd9963c 100644
--- a/rpc/interface/src/route/bin.rs
+++ b/rpc/interface/src/route/bin.rs
@@ -68,7 +68,7 @@ macro_rules! generate_endpoints_with_no_input {
 /// - [`generate_endpoints_with_input`]
 /// - [`generate_endpoints_with_no_input`]
 macro_rules! generate_endpoints_inner {
-    ($variant:ident, $handler:ident, $request:expr) => {
+    ($variant:ident, $handler:ident, $request:expr_2021) => {
         paste::paste! {
             {
                 // Check if restricted.
diff --git a/rpc/interface/src/route/other.rs b/rpc/interface/src/route/other.rs
index 3ff84487..19a58d93 100644
--- a/rpc/interface/src/route/other.rs
+++ b/rpc/interface/src/route/other.rs
@@ -71,7 +71,7 @@ macro_rules! generate_endpoints_with_no_input {
 /// - [`generate_endpoints_with_input`]
 /// - [`generate_endpoints_with_no_input`]
 macro_rules! generate_endpoints_inner {
-    ($variant:ident, $handler:ident, $request:expr) => {
+    ($variant:ident, $handler:ident, $request:expr_2021) => {
         paste::paste! {
             {
                 // Check if restricted.
diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs
index 7b941918..414214ca 100644
--- a/rpc/types/src/bin.rs
+++ b/rpc/types/src/bin.rs
@@ -9,26 +9,19 @@ use cuprate_fixed_bytes::ByteArrayVec;
 use serde::{Deserialize, Serialize};
 
 #[cfg(feature = "epee")]
-use cuprate_epee_encoding::{
-    container_as_blob::ContainerAsBlob,
-    epee_object, error,
-    macros::bytes::{Buf, BufMut},
-    read_epee_value, write_field, EpeeObject, EpeeObjectBuilder,
-};
+use cuprate_epee_encoding::container_as_blob::ContainerAsBlob;
 
 use cuprate_types::BlockCompleteEntry;
 
 use crate::{
     base::AccessResponseBase,
-    macros::{define_request, define_request_and_response, define_request_and_response_doc},
-    misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolTxInfo, Status},
+    macros::define_request_and_response,
+    misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfo},
     rpc_call::RpcCallValue,
 };
 
 #[cfg(any(feature = "epee", feature = "serde"))]
 use crate::defaults::{default_false, default_zero};
-#[cfg(feature = "epee")]
-use crate::misc::PoolInfoExtent;
 
 //---------------------------------------------------------------------------------------------------- Definitions
 define_request_and_response! {
@@ -115,15 +108,14 @@ define_request_and_response! {
     }
 }
 
-//---------------------------------------------------------------------------------------------------- GetBlocks
-define_request! {
-    #[doc = define_request_and_response_doc!(
-        "response" => GetBlocksResponse,
-        get_blocksbin,
-        cc73fe71162d564ffda8e549b79a350bca53c454,
-        core_rpc_server_commands_defs, h, 162, 262,
-    )]
-    GetBlocksRequest {
+define_request_and_response! {
+    get_blocksbin,
+    cc73fe71162d564ffda8e549b79a350bca53c454 =>
+    core_rpc_server_commands_defs.h => 162..=262,
+
+    GetBlocks,
+
+    Request {
         requested_info: u8 = default_zero::<u8>(), "default_zero",
         // FIXME: This is a `std::list` in `monerod` because...?
         block_ids: ByteArrayVec<32>,
@@ -131,259 +123,17 @@ define_request! {
         prune: bool,
         no_miner_tx: bool = default_false(), "default_false",
         pool_info_since: u64 = default_zero::<u64>(), "default_zero",
-    }
-}
+    },
 
-#[doc = define_request_and_response_doc!(
-    "request" => GetBlocksRequest,
-    get_blocksbin,
-    cc73fe71162d564ffda8e549b79a350bca53c454,
-    core_rpc_server_commands_defs, h, 162, 262,
-)]
-///
-/// This response's variant depends upon [`PoolInfoExtent`].
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub enum GetBlocksResponse {
-    /// Will always serialize a [`PoolInfoExtent::None`] field.
-    PoolInfoNone(GetBlocksResponsePoolInfoNone),
-    /// Will always serialize a [`PoolInfoExtent::Incremental`] field.
-    PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental),
-    /// Will always serialize a [`PoolInfoExtent::Full`] field.
-    PoolInfoFull(GetBlocksResponsePoolInfoFull),
-}
-
-impl Default for GetBlocksResponse {
-    fn default() -> Self {
-        Self::PoolInfoNone(GetBlocksResponsePoolInfoNone::default())
-    }
-}
-
-/// Data within [`GetBlocksResponse::PoolInfoNone`].
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct GetBlocksResponsePoolInfoNone {
-    pub status: Status,
-    pub untrusted: bool,
-    pub blocks: Vec<BlockCompleteEntry>,
-    pub start_height: u64,
-    pub current_height: u64,
-    pub output_indices: Vec<BlockOutputIndices>,
-    pub daemon_time: u64,
-}
-
-#[cfg(feature = "epee")]
-epee_object! {
-    GetBlocksResponsePoolInfoNone,
-    status: Status,
-    untrusted: bool,
-    blocks: Vec<BlockCompleteEntry>,
-    start_height: u64,
-    current_height: u64,
-    output_indices: Vec<BlockOutputIndices>,
-    daemon_time: u64,
-}
-
-/// Data within [`GetBlocksResponse::PoolInfoIncremental`].
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct GetBlocksResponsePoolInfoIncremental {
-    pub status: Status,
-    pub untrusted: bool,
-    pub blocks: Vec<BlockCompleteEntry>,
-    pub start_height: u64,
-    pub current_height: u64,
-    pub output_indices: Vec<BlockOutputIndices>,
-    pub daemon_time: u64,
-    pub added_pool_txs: Vec<PoolTxInfo>,
-    pub remaining_added_pool_txids: ByteArrayVec<32>,
-    pub removed_pool_txids: ByteArrayVec<32>,
-}
-
-#[cfg(feature = "epee")]
-epee_object! {
-    GetBlocksResponsePoolInfoIncremental,
-    status: Status,
-    untrusted: bool,
-    blocks: Vec<BlockCompleteEntry>,
-    start_height: u64,
-    current_height: u64,
-    output_indices: Vec<BlockOutputIndices>,
-    daemon_time: u64,
-    added_pool_txs: Vec<PoolTxInfo>,
-    remaining_added_pool_txids: ByteArrayVec<32>,
-    removed_pool_txids: ByteArrayVec<32>,
-}
-
-/// Data within [`GetBlocksResponse::PoolInfoFull`].
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct GetBlocksResponsePoolInfoFull {
-    pub status: Status,
-    pub untrusted: bool,
-    pub blocks: Vec<BlockCompleteEntry>,
-    pub start_height: u64,
-    pub current_height: u64,
-    pub output_indices: Vec<BlockOutputIndices>,
-    pub daemon_time: u64,
-    pub added_pool_txs: Vec<PoolTxInfo>,
-    pub remaining_added_pool_txids: ByteArrayVec<32>,
-}
-
-#[cfg(feature = "epee")]
-epee_object! {
-    GetBlocksResponsePoolInfoFull,
-    status: Status,
-    untrusted: bool,
-    blocks: Vec<BlockCompleteEntry>,
-    start_height: u64,
-    current_height: u64,
-    output_indices: Vec<BlockOutputIndices>,
-    daemon_time: u64,
-    added_pool_txs: Vec<PoolTxInfo>,
-    remaining_added_pool_txids: ByteArrayVec<32>,
-}
-
-#[cfg(feature = "epee")]
-/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`].
-///
-/// Not for public usage.
-#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-pub struct __GetBlocksResponseEpeeBuilder {
-    pub status: Option<Status>,
-    pub untrusted: Option<bool>,
-    pub blocks: Option<Vec<BlockCompleteEntry>>,
-    pub start_height: Option<u64>,
-    pub current_height: Option<u64>,
-    pub output_indices: Option<Vec<BlockOutputIndices>>,
-    pub daemon_time: Option<u64>,
-    pub pool_info_extent: Option<PoolInfoExtent>,
-    pub added_pool_txs: Option<Vec<PoolTxInfo>>,
-    pub remaining_added_pool_txids: Option<ByteArrayVec<32>>,
-    pub removed_pool_txids: Option<ByteArrayVec<32>>,
-}
-
-#[cfg(feature = "epee")]
-impl EpeeObjectBuilder<GetBlocksResponse> for __GetBlocksResponseEpeeBuilder {
-    fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> error::Result<bool> {
-        macro_rules! read_epee_field {
-            ($($field:ident),*) => {
-                match name {
-                    $(
-                        stringify!($field) => { self.$field = Some(read_epee_value(r)?); },
-                    )*
-                    _ => return Ok(false),
-                }
-            };
-        }
-
-        read_epee_field! {
-            status,
-            untrusted,
-            blocks,
-            start_height,
-            current_height,
-            output_indices,
-            daemon_time,
-            pool_info_extent,
-            added_pool_txs,
-            remaining_added_pool_txids,
-            removed_pool_txids
-        }
-
-        Ok(true)
-    }
-
-    fn finish(self) -> error::Result<GetBlocksResponse> {
-        const ELSE: error::Error = error::Error::Format("Required field was not found!");
-
-        let status = self.status.ok_or(ELSE)?;
-        let untrusted = self.untrusted.ok_or(ELSE)?;
-        let blocks = self.blocks.ok_or(ELSE)?;
-        let start_height = self.start_height.ok_or(ELSE)?;
-        let current_height = self.current_height.ok_or(ELSE)?;
-        let output_indices = self.output_indices.ok_or(ELSE)?;
-        let daemon_time = self.daemon_time.ok_or(ELSE)?;
-        let pool_info_extent = self.pool_info_extent.ok_or(ELSE)?;
-
-        let this = match pool_info_extent {
-            PoolInfoExtent::None => {
-                GetBlocksResponse::PoolInfoNone(GetBlocksResponsePoolInfoNone {
-                    status,
-                    untrusted,
-                    blocks,
-                    start_height,
-                    current_height,
-                    output_indices,
-                    daemon_time,
-                })
-            }
-            PoolInfoExtent::Incremental => {
-                GetBlocksResponse::PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental {
-                    status,
-                    untrusted,
-                    blocks,
-                    start_height,
-                    current_height,
-                    output_indices,
-                    daemon_time,
-                    added_pool_txs: self.added_pool_txs.ok_or(ELSE)?,
-                    remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?,
-                    removed_pool_txids: self.removed_pool_txids.ok_or(ELSE)?,
-                })
-            }
-            PoolInfoExtent::Full => {
-                GetBlocksResponse::PoolInfoFull(GetBlocksResponsePoolInfoFull {
-                    status,
-                    untrusted,
-                    blocks,
-                    start_height,
-                    current_height,
-                    output_indices,
-                    daemon_time,
-                    added_pool_txs: self.added_pool_txs.ok_or(ELSE)?,
-                    remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?,
-                })
-            }
-        };
-
-        Ok(this)
-    }
-}
-
-#[cfg(feature = "epee")]
-impl EpeeObject for GetBlocksResponse {
-    type Builder = __GetBlocksResponseEpeeBuilder;
-
-    fn number_of_fields(&self) -> u64 {
-        // [`PoolInfoExtent`] + inner struct fields.
-        let inner_fields = match self {
-            Self::PoolInfoNone(s) => s.number_of_fields(),
-            Self::PoolInfoIncremental(s) => s.number_of_fields(),
-            Self::PoolInfoFull(s) => s.number_of_fields(),
-        };
-
-        1 + inner_fields
-    }
-
-    fn write_fields<B: BufMut>(self, w: &mut B) -> error::Result<()> {
-        match self {
-            Self::PoolInfoNone(s) => {
-                s.write_fields(w)?;
-                write_field(PoolInfoExtent::None.to_u8(), "pool_info_extent", w)?;
-            }
-            Self::PoolInfoIncremental(s) => {
-                s.write_fields(w)?;
-                write_field(PoolInfoExtent::Incremental.to_u8(), "pool_info_extent", w)?;
-            }
-            Self::PoolInfoFull(s) => {
-                s.write_fields(w)?;
-                write_field(PoolInfoExtent::Full.to_u8(), "pool_info_extent", w)?;
-            }
-        }
-
-        Ok(())
+    // TODO: add `top_block_hash` field
+    // <https://github.com/monero-project/monero/blame/893916ad091a92e765ce3241b94e706ad012b62a/src/rpc/core_rpc_server_commands_defs.h#L263>
+    AccessResponseBase {
+        blocks: Vec<BlockCompleteEntry>,
+        start_height: u64,
+        current_height: u64,
+        output_indices: Vec<BlockOutputIndices>,
+        daemon_time: u64,
+        pool_info: PoolInfo,
     }
 }
 
diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs
index cb55e64a..a454cab4 100644
--- a/rpc/types/src/json.rs
+++ b/rpc/types/src/json.rs
@@ -37,7 +37,7 @@ macro_rules! serde_doc_test {
     (
         // `const` string from `cuprate_test_utils::rpc::data`
         //  v
-        $cuprate_test_utils_rpc_const:ident => $expected:expr
+        $cuprate_test_utils_rpc_const:ident => $expected:expr_2021
         //                                     ^
         //                     Expected value as an expression
     ) => {
diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs
index 85f4272e..db1b5d8d 100644
--- a/rpc/types/src/macros.rs
+++ b/rpc/types/src/macros.rs
@@ -77,7 +77,7 @@ macro_rules! define_request_and_response {
                 $( #[$request_field_attr:meta] )* // Field attribute.
                 $request_field:ident: $request_field_type:ty // field_name: field type
                 $(as $request_field_type_as:ty)? // (optional) alternative type (de)serialization
-                $(= $request_field_type_default:expr, $request_field_type_default_string:literal)?, // (optional) default value
+                $(= $request_field_type_default:expr_2021, $request_field_type_default_string:literal)?, // (optional) default value
             )*
         },
 
@@ -89,7 +89,7 @@ macro_rules! define_request_and_response {
                 $( #[$response_field_attr:meta] )*
                 $response_field:ident: $response_field_type:ty
                 $(as $response_field_type_as:ty)?
-                $(= $response_field_type_default:expr, $response_field_type_default_string:literal)?,
+                $(= $response_field_type_default:expr_2021, $response_field_type_default_string:literal)?,
             )*
         }
     ) => { paste::paste! {
@@ -229,7 +229,7 @@ macro_rules! define_request {
                 // field_name: FieldType
                 $field:ident: $field_type:ty
                 $(as $field_as:ty)?
-                $(= $field_default:expr, $field_default_string:literal)?,
+                $(= $field_default:expr_2021, $field_default_string:literal)?,
                 // The $field_default is an optional extra token that represents
                 // a default value to pass to [`cuprate_epee_encoding::epee_object`],
                 // see it for usage.
@@ -286,7 +286,7 @@ macro_rules! define_response {
                 $( #[$field_attr:meta] )*
                 $field:ident: $field_type:ty
                 $(as $field_as:ty)?
-                $(= $field_default:expr, $field_default_string:literal)?,
+                $(= $field_default:expr_2021, $field_default_string:literal)?,
             )*
         }
     ) => {
@@ -323,7 +323,7 @@ macro_rules! define_response {
                 $( #[$field_attr:meta] )*
                 $field:ident: $field_type:ty
                 $(as $field_as:ty)?
-                $(= $field_default:expr, $field_default_string:literal)?,
+                $(= $field_default:expr_2021, $field_default_string:literal)?,
             )*
         }
     ) => {
diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs
index 8f7467ba..2d88f2a4 100644
--- a/rpc/types/src/misc/misc.rs
+++ b/rpc/types/src/misc/misc.rs
@@ -11,11 +11,11 @@ use serde::{Deserialize, Serialize};
 #[cfg(feature = "epee")]
 use cuprate_epee_encoding::epee_object;
 
-use crate::macros::monero_definition_link;
-
 #[cfg(any(feature = "epee", feature = "serde"))]
 use crate::defaults::default_zero;
 
+use crate::macros::monero_definition_link;
+
 //---------------------------------------------------------------------------------------------------- Macros
 /// This macro (local to this file) defines all the misc types.
 ///
@@ -37,7 +37,7 @@ macro_rules! define_struct_and_impl_epee {
             $(
                 $( #[$field_attr:meta] )* // Field attributes
                 // Field name => the type => optional `epee_object` default value.
-                $field_name:ident: $field_type:ty $(= $field_default:expr)?,
+                $field_name:ident: $field_type:ty $(= $field_default:expr_2021)?,
             )*
         }
     ) => {
diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs
index e09f8477..49767560 100644
--- a/rpc/types/src/misc/mod.rs
+++ b/rpc/types/src/misc/mod.rs
@@ -17,6 +17,7 @@ mod distribution;
 mod key_image_spent_status;
 #[expect(clippy::module_inception)]
 mod misc;
+mod pool_info;
 mod pool_info_extent;
 mod status;
 mod tx_entry;
@@ -30,6 +31,7 @@ pub use misc::{
     OutputDistributionData, Peer, PoolTxInfo, PublicNode, SetBan, Span, SpentKeyImageInfo,
     SyncInfoPeer, TxBacklogEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats,
 };
+pub use pool_info::PoolInfo;
 pub use pool_info_extent::PoolInfoExtent;
 pub use status::Status;
 pub use tx_entry::TxEntry;
diff --git a/rpc/types/src/misc/pool_info.rs b/rpc/types/src/misc/pool_info.rs
new file mode 100644
index 00000000..e9ba8753
--- /dev/null
+++ b/rpc/types/src/misc/pool_info.rs
@@ -0,0 +1,171 @@
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Serialize};
+
+#[cfg(feature = "epee")]
+use crate::misc::PoolInfoExtent;
+#[cfg(feature = "epee")]
+use cuprate_epee_encoding::{
+    epee_object, error,
+    macros::bytes::{Buf, BufMut},
+    read_epee_value, write_field, EpeeObject, EpeeObjectBuilder,
+};
+
+use cuprate_fixed_bytes::ByteArrayVec;
+
+use crate::misc::PoolTxInfo;
+
+//---------------------------------------------------------------------------------------------------- PoolInfo
+#[doc = crate::macros::monero_definition_link!(
+    cc73fe71162d564ffda8e549b79a350bca53c454,
+    "rpc/core_rpc_server_commands_defs.h",
+    223..=228
+)]
+/// Used in [`crate::bin::GetBlocksResponse`].
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[repr(u8)]
+pub enum PoolInfo {
+    #[default]
+    None,
+    Incremental(PoolInfoIncremental),
+    Full(PoolInfoFull),
+}
+
+//---------------------------------------------------------------------------------------------------- Internal data
+/// Data within [`PoolInfo::Incremental`].
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct PoolInfoIncremental {
+    pub added_pool_txs: Vec<PoolTxInfo>,
+    pub remaining_added_pool_txids: ByteArrayVec<32>,
+    pub removed_pool_txids: ByteArrayVec<32>,
+}
+
+#[cfg(feature = "epee")]
+epee_object! {
+    PoolInfoIncremental,
+    added_pool_txs: Vec<PoolTxInfo>,
+    remaining_added_pool_txids: ByteArrayVec<32>,
+    removed_pool_txids: ByteArrayVec<32>,
+}
+
+/// Data within [`PoolInfo::Full`].
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct PoolInfoFull {
+    pub added_pool_txs: Vec<PoolTxInfo>,
+    pub remaining_added_pool_txids: ByteArrayVec<32>,
+}
+
+#[cfg(feature = "epee")]
+epee_object! {
+    PoolInfoFull,
+    added_pool_txs: Vec<PoolTxInfo>,
+    remaining_added_pool_txids: ByteArrayVec<32>,
+}
+
+//---------------------------------------------------------------------------------------------------- PoolInfo epee impl
+#[cfg(feature = "epee")]
+/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`].
+///
+/// Not for public usage.
+#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct __PoolInfoEpeeBuilder {
+    /// This is a distinct field in `monerod`,
+    /// which as represented in this library with [`PoolInfo`]'s `u8` tag.
+    pub pool_info_extent: Option<PoolInfoExtent>,
+
+    pub added_pool_txs: Option<Vec<PoolTxInfo>>,
+    pub remaining_added_pool_txids: Option<ByteArrayVec<32>>,
+    pub removed_pool_txids: Option<ByteArrayVec<32>>,
+}
+
+// Custom epee implementation.
+//
+// HACK/INVARIANT:
+// If any data within [`PoolInfo`] changes, the below code should be changed as well.
+#[cfg(feature = "epee")]
+impl EpeeObjectBuilder<PoolInfo> for __PoolInfoEpeeBuilder {
+    fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> error::Result<bool> {
+        macro_rules! read_epee_field {
+            ($($field:ident),*) => {
+                match name {
+                    $(
+                        stringify!($field) => { self.$field = Some(read_epee_value(r)?); },
+                    )*
+                    _ => return Ok(false),
+                }
+            };
+        }
+
+        read_epee_field! {
+            pool_info_extent,
+            added_pool_txs,
+            remaining_added_pool_txids,
+            removed_pool_txids
+        }
+
+        Ok(true)
+    }
+
+    fn finish(self) -> error::Result<PoolInfo> {
+        // INVARIANT:
+        // `monerod` omits serializing the field itself when a container is empty,
+        // `unwrap_or_default()` is used over `error()` in these cases.
+        // Some of the uses are when values have default fallbacks: `pool_info_extent`.
+
+        let pool_info_extent = self.pool_info_extent.unwrap_or_default();
+        let this = match pool_info_extent {
+            PoolInfoExtent::None => PoolInfo::None,
+            PoolInfoExtent::Incremental => PoolInfo::Incremental(PoolInfoIncremental {
+                added_pool_txs: self.added_pool_txs.unwrap_or_default(),
+                remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(),
+                removed_pool_txids: self.removed_pool_txids.unwrap_or_default(),
+            }),
+            PoolInfoExtent::Full => PoolInfo::Full(PoolInfoFull {
+                added_pool_txs: self.added_pool_txs.unwrap_or_default(),
+                remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(),
+            }),
+        };
+
+        Ok(this)
+    }
+}
+
+#[cfg(feature = "epee")]
+impl EpeeObject for PoolInfo {
+    type Builder = __PoolInfoEpeeBuilder;
+
+    fn number_of_fields(&self) -> u64 {
+        // Inner struct fields.
+        let inner_fields = match self {
+            Self::None => 0,
+            Self::Incremental(s) => s.number_of_fields(),
+            Self::Full(s) => s.number_of_fields(),
+        };
+
+        // [`PoolInfoExtent`] + inner struct fields
+        1 + inner_fields
+    }
+
+    fn write_fields<B: BufMut>(self, w: &mut B) -> error::Result<()> {
+        const FIELD: &str = "pool_info_extent";
+
+        match self {
+            Self::None => {
+                write_field(PoolInfoExtent::None.to_u8(), FIELD, w)?;
+            }
+            Self::Incremental(s) => {
+                s.write_fields(w)?;
+                write_field(PoolInfoExtent::Incremental.to_u8(), FIELD, w)?;
+            }
+            Self::Full(s) => {
+                s.write_fields(w)?;
+                write_field(PoolInfoExtent::Full.to_u8(), FIELD, w)?;
+            }
+        }
+
+        Ok(())
+    }
+}
diff --git a/rpc/types/src/misc/tx_entry.rs b/rpc/types/src/misc/tx_entry.rs
index 86d02075..59dd4604 100644
--- a/rpc/types/src/misc/tx_entry.rs
+++ b/rpc/types/src/misc/tx_entry.rs
@@ -2,8 +2,6 @@
 
 //---------------------------------------------------------------------------------------------------- Use
 #[cfg(feature = "serde")]
-use crate::serde::{serde_false, serde_true};
-#[cfg(feature = "serde")]
 use serde::{Deserialize, Serialize};
 
 #[cfg(feature = "epee")]
@@ -13,6 +11,9 @@ use cuprate_epee_encoding::{
     EpeeObject, EpeeObjectBuilder,
 };
 
+#[cfg(feature = "serde")]
+use crate::serde::{serde_false, serde_true};
+
 //---------------------------------------------------------------------------------------------------- TxEntry
 #[doc = crate::macros::monero_definition_link!(
     cc73fe71162d564ffda8e549b79a350bca53c454,
diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs
index 3694041c..d5cbe82b 100644
--- a/rpc/types/src/other.rs
+++ b/rpc/types/src/other.rs
@@ -65,7 +65,7 @@ macro_rules! serde_doc_test {
     (
         // `const` string from `cuprate_test_utils::rpc::data`
         //  v
-        $cuprate_test_utils_rpc_const:ident => $expected:expr
+        $cuprate_test_utils_rpc_const:ident => $expected:expr_2021
         //                                     ^
         //                     Expected value as an expression
     ) => {
diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml
index 46250541..c9359248 100644
--- a/storage/blockchain/Cargo.toml
+++ b/storage/blockchain/Cargo.toml
@@ -34,7 +34,7 @@ serde            = { workspace = true, optional = true }
 tower        = { workspace = true }
 thread_local = { workspace = true }
 rayon        = { workspace = true }
-bytes = "1.7.2"
+bytes        = { workspace = true }
 
 [dev-dependencies]
 cuprate-constants  = { workspace = true }
diff --git a/storage/blockchain/src/ops/alt_block/block.rs b/storage/blockchain/src/ops/alt_block/block.rs
index 6bd01cb3..480bd7d8 100644
--- a/storage/blockchain/src/ops/alt_block/block.rs
+++ b/storage/blockchain/src/ops/alt_block/block.rs
@@ -1,7 +1,7 @@
 use bytemuck::TransparentWrapper;
 use monero_serai::block::{Block, BlockHeader};
 
-use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec};
+use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, StorableVec};
 use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits};
 use cuprate_types::{AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, HardFork};
 
@@ -21,7 +21,7 @@ use crate::{
 pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>(
     env_inner: &E,
     tx_rw: &mut E::Rw<'_>,
-) -> Result<(), RuntimeError> {
+) -> DbResult<()> {
     use crate::tables::{
         AltBlockBlobs, AltBlockHeights, AltBlocksInfo, AltChainInfos, AltTransactionBlobs,
         AltTransactionInfos,
@@ -47,10 +47,7 @@ pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>(
 /// - `alt_block.height` is == `0`
 /// - `alt_block.txs.len()` != `alt_block.block.transactions.len()`
 ///
-pub fn add_alt_block(
-    alt_block: &AltBlockInformation,
-    tables: &mut impl TablesMut,
-) -> Result<(), RuntimeError> {
+pub fn add_alt_block(alt_block: &AltBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> {
     let alt_block_height = AltBlockHeight {
         chain_id: alt_block.chain_id.into(),
         height: alt_block.height,
@@ -100,7 +97,7 @@ pub fn add_alt_block(
 pub fn get_alt_block(
     alt_block_height: &AltBlockHeight,
     tables: &impl Tables,
-) -> Result<AltBlockInformation, RuntimeError> {
+) -> DbResult<AltBlockInformation> {
     let block_info = tables.alt_blocks_info().get(alt_block_height)?;
 
     let block_blob = tables.alt_block_blobs().get(alt_block_height)?.0;
@@ -111,7 +108,7 @@ pub fn get_alt_block(
         .transactions
         .iter()
         .map(|tx_hash| get_alt_transaction(tx_hash, tables))
-        .collect::<Result<_, RuntimeError>>()?;
+        .collect::<DbResult<_>>()?;
 
     Ok(AltBlockInformation {
         block,
@@ -141,7 +138,7 @@ pub fn get_alt_block_hash(
     block_height: &BlockHeight,
     alt_chain: ChainId,
     tables: &impl Tables,
-) -> Result<BlockHash, RuntimeError> {
+) -> DbResult<BlockHash> {
     let alt_chains = tables.alt_chain_infos();
 
     // First find what [`ChainId`] this block would be stored under.
@@ -188,7 +185,7 @@ pub fn get_alt_block_hash(
 pub fn get_alt_block_extended_header_from_height(
     height: &AltBlockHeight,
     table: &impl Tables,
-) -> Result<ExtendedBlockHeader, RuntimeError> {
+) -> DbResult<ExtendedBlockHeader> {
     let block_info = table.alt_blocks_info().get(height)?;
 
     let block_blob = table.alt_block_blobs().get(height)?.0;
diff --git a/storage/blockchain/src/ops/alt_block/chain.rs b/storage/blockchain/src/ops/alt_block/chain.rs
index 5b5f3cb1..676fd7f2 100644
--- a/storage/blockchain/src/ops/alt_block/chain.rs
+++ b/storage/blockchain/src/ops/alt_block/chain.rs
@@ -1,6 +1,6 @@
 use std::cmp::{max, min};
 
-use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError};
+use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError};
 use cuprate_types::{Chain, ChainId};
 
 use crate::{
@@ -21,7 +21,7 @@ pub fn update_alt_chain_info(
     alt_block_height: &AltBlockHeight,
     prev_hash: &BlockHash,
     tables: &mut impl TablesMut,
-) -> Result<(), RuntimeError> {
+) -> DbResult<()> {
     let parent_chain = match tables.alt_block_heights().get(prev_hash) {
         Ok(alt_parent_height) => Chain::Alt(alt_parent_height.chain_id.into()),
         Err(RuntimeError::KeyNotFound) => Chain::Main,
@@ -74,7 +74,7 @@ pub fn get_alt_chain_history_ranges(
     range: std::ops::Range<BlockHeight>,
     alt_chain: ChainId,
     alt_chain_infos: &impl DatabaseRo<AltChainInfos>,
-) -> Result<Vec<(Chain, std::ops::Range<BlockHeight>)>, RuntimeError> {
+) -> DbResult<Vec<(Chain, std::ops::Range<BlockHeight>)>> {
     let mut ranges = Vec::with_capacity(5);
 
     let mut i = range.end;
diff --git a/storage/blockchain/src/ops/alt_block/tx.rs b/storage/blockchain/src/ops/alt_block/tx.rs
index 4185c6cb..b410fed9 100644
--- a/storage/blockchain/src/ops/alt_block/tx.rs
+++ b/storage/blockchain/src/ops/alt_block/tx.rs
@@ -1,7 +1,7 @@
 use bytemuck::TransparentWrapper;
 use monero_serai::transaction::Transaction;
 
-use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec};
+use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec};
 use cuprate_types::VerifiedTransactionInformation;
 
 use crate::{
@@ -22,7 +22,7 @@ use crate::{
 pub fn add_alt_transaction_blob(
     tx: &VerifiedTransactionInformation,
     tables: &mut impl TablesMut,
-) -> Result<(), RuntimeError> {
+) -> DbResult<()> {
     tables.alt_transaction_infos_mut().put(
         &tx.tx_hash,
         &AltTransactionInfo {
@@ -51,7 +51,7 @@ pub fn add_alt_transaction_blob(
 pub fn get_alt_transaction(
     tx_hash: &TxHash,
     tables: &impl Tables,
-) -> Result<VerifiedTransactionInformation, RuntimeError> {
+) -> DbResult<VerifiedTransactionInformation> {
     let tx_info = tables.alt_transaction_infos().get(tx_hash)?;
 
     let tx_blob = match tables.alt_transaction_blobs().get(tx_hash) {
diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs
index 94c1fc64..2dc88aa4 100644
--- a/storage/blockchain/src/ops/block.rs
+++ b/storage/blockchain/src/ops/block.rs
@@ -9,7 +9,7 @@ use monero_serai::{
 };
 
 use cuprate_database::{
-    RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw},
+    DbResult, RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw},
 };
 use cuprate_helper::cast::usize_to_u64;
 use cuprate_helper::{
@@ -44,12 +44,9 @@ use crate::{
 /// # Panics
 /// This function will panic if:
 /// - `block.height > u32::MAX` (not normally possible)
-/// - `block.height` is not != [`chain_height`]
+/// - `block.height` is != [`chain_height`]
 // no inline, too big.
-pub fn add_block(
-    block: &VerifiedBlockInformation,
-    tables: &mut impl TablesMut,
-) -> Result<(), RuntimeError> {
+pub fn add_block(block: &VerifiedBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> {
     //------------------------------------------------------ Check preconditions first
 
     // Cast height to `u32` for storage (handled at top of function).
@@ -155,7 +152,7 @@ pub fn add_block(
 pub fn pop_block(
     move_to_alt_chain: Option<ChainId>,
     tables: &mut impl TablesMut,
-) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> {
+) -> DbResult<(BlockHeight, BlockHash, Block)> {
     //------------------------------------------------------ Block Info
     // Remove block data from tables.
     let (block_height, block_info) = tables.block_infos_mut().pop_last()?;
@@ -197,7 +194,7 @@ pub fn pop_block(
                     tx,
                 })
             })
-            .collect::<Result<Vec<VerifiedTransactionInformation>, RuntimeError>>()?;
+            .collect::<DbResult<Vec<VerifiedTransactionInformation>>>()?;
 
         alt_block::add_alt_block(
             &AltBlockInformation {
@@ -226,6 +223,7 @@ pub fn pop_block(
 
     Ok((block_height, block_info.block_hash, block))
 }
+
 //---------------------------------------------------------------------------------------------------- `get_block_blob_with_tx_indexes`
 /// Retrieve a block's raw bytes, the index of the miner transaction and the number of non miner-txs in the block.
 ///
@@ -234,11 +232,8 @@ pub fn get_block_blob_with_tx_indexes(
     block_height: &BlockHeight,
     tables: &impl Tables,
 ) -> Result<(Vec<u8>, u64, usize), RuntimeError> {
-    use monero_serai::io::write_varint;
+    let miner_tx_idx = tables.block_infos().get(block_height)?.mining_tx_index;
 
-    let block_info = tables.block_infos().get(block_height)?;
-
-    let miner_tx_idx = block_info.mining_tx_index;
     let block_txs = tables.block_txs_hashes().get(block_height)?.0;
     let numb_txs = block_txs.len();
 
@@ -250,10 +245,10 @@ pub fn get_block_blob_with_tx_indexes(
     block.append(&mut miner_tx_blob);
 
     // Add the blocks tx hashes.
-    write_varint(&block_txs.len(), &mut block)
+    monero_serai::io::write_varint(&block_txs.len(), &mut block)
         .expect("The number of txs per block will not exceed u64::MAX");
 
-    let block_txs_bytes = bytemuck::cast_slice(&block_txs);
+    let block_txs_bytes = bytemuck::must_cast_slice(&block_txs);
     block.extend_from_slice(block_txs_bytes);
 
     Ok((block, miner_tx_idx, numb_txs))
@@ -275,7 +270,7 @@ pub fn get_block_complete_entry(
 
     let tx_blobs = tables
         .tx_blobs_iter()
-        .get_range(first_tx_idx..=usize_to_u64(numb_non_miner_txs))?
+        .get_range(first_tx_idx..(usize_to_u64(numb_non_miner_txs) + first_tx_idx))?
         .map(|tx_blob| Ok(Bytes::from(tx_blob?.0)))
         .collect::<Result<_, RuntimeError>>()?;
 
@@ -301,7 +296,7 @@ pub fn get_block_complete_entry(
 pub fn get_block_extended_header(
     block_hash: &BlockHash,
     tables: &impl Tables,
-) -> Result<ExtendedBlockHeader, RuntimeError> {
+) -> DbResult<ExtendedBlockHeader> {
     get_block_extended_header_from_height(&tables.block_heights().get(block_hash)?, tables)
 }
 
@@ -315,7 +310,7 @@ pub fn get_block_extended_header(
 pub fn get_block_extended_header_from_height(
     block_height: &BlockHeight,
     tables: &impl Tables,
-) -> Result<ExtendedBlockHeader, RuntimeError> {
+) -> DbResult<ExtendedBlockHeader> {
     let block_info = tables.block_infos().get(block_height)?;
     let block_header_blob = tables.block_header_blobs().get(block_height)?.0;
     let block_header = BlockHeader::read(&mut block_header_blob.as_slice())?;
@@ -341,7 +336,7 @@ pub fn get_block_extended_header_from_height(
 #[inline]
 pub fn get_block_extended_header_top(
     tables: &impl Tables,
-) -> Result<(ExtendedBlockHeader, BlockHeight), RuntimeError> {
+) -> DbResult<(ExtendedBlockHeader, BlockHeight)> {
     let height = chain_height(tables.block_heights())?.saturating_sub(1);
     let header = get_block_extended_header_from_height(&height, tables)?;
     Ok((header, height))
@@ -354,7 +349,7 @@ pub fn get_block_extended_header_top(
 pub fn get_block_info(
     block_height: &BlockHeight,
     table_block_infos: &impl DatabaseRo<BlockInfos>,
-) -> Result<BlockInfo, RuntimeError> {
+) -> DbResult<BlockInfo> {
     table_block_infos.get(block_height)
 }
 
@@ -364,7 +359,7 @@ pub fn get_block_info(
 pub fn get_block_height(
     block_hash: &BlockHash,
     table_block_heights: &impl DatabaseRo<BlockHeights>,
-) -> Result<BlockHeight, RuntimeError> {
+) -> DbResult<BlockHeight> {
     table_block_heights.get(block_hash)
 }
 
@@ -379,7 +374,7 @@ pub fn get_block_height(
 pub fn block_exists(
     block_hash: &BlockHash,
     table_block_heights: &impl DatabaseRo<BlockHeights>,
-) -> Result<bool, RuntimeError> {
+) -> DbResult<bool> {
     table_block_heights.contains(block_hash)
 }
 
diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs
index c6cd4040..54dd752a 100644
--- a/storage/blockchain/src/ops/blockchain.rs
+++ b/storage/blockchain/src/ops/blockchain.rs
@@ -1,7 +1,7 @@
 //! Blockchain functions - chain height, generated coins, etc.
 
 //---------------------------------------------------------------------------------------------------- Import
-use cuprate_database::{DatabaseRo, RuntimeError};
+use cuprate_database::{DatabaseRo, DbResult, RuntimeError};
 
 use crate::{
     ops::{block::block_exists, macros::doc_error},
@@ -22,9 +22,7 @@ use crate::{
 /// So the height of a new block would be `chain_height()`.
 #[doc = doc_error!()]
 #[inline]
-pub fn chain_height(
-    table_block_heights: &impl DatabaseRo<BlockHeights>,
-) -> Result<BlockHeight, RuntimeError> {
+pub fn chain_height(table_block_heights: &impl DatabaseRo<BlockHeights>) -> DbResult<BlockHeight> {
     #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")]
     table_block_heights.len().map(|height| height as usize)
 }
@@ -45,7 +43,7 @@ pub fn chain_height(
 #[inline]
 pub fn top_block_height(
     table_block_heights: &impl DatabaseRo<BlockHeights>,
-) -> Result<BlockHeight, RuntimeError> {
+) -> DbResult<BlockHeight> {
     match table_block_heights.len()? {
         0 => Err(RuntimeError::KeyNotFound),
         #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")]
@@ -70,7 +68,7 @@ pub fn top_block_height(
 pub fn cumulative_generated_coins(
     block_height: &BlockHeight,
     table_block_infos: &impl DatabaseRo<BlockInfos>,
-) -> Result<u64, RuntimeError> {
+) -> DbResult<u64> {
     match table_block_infos.get(block_height) {
         Ok(block_info) => Ok(block_info.cumulative_generated_coins),
         Err(RuntimeError::KeyNotFound) if block_height == &0 => Ok(0),
@@ -84,7 +82,7 @@ pub fn cumulative_generated_coins(
 /// if the wrong order is specified the return value is meaningless.
 ///
 /// For chronologically ordered chains this will return the index of the first unknown, for reverse
-/// chronologically ordered chains this will return the index of the fist known.
+/// chronologically ordered chains this will return the index of the first known.
 ///
 /// If all blocks are known for chronologically ordered chains or unknown for reverse chronologically
 /// ordered chains then the length of the chain will be returned.
@@ -98,17 +96,16 @@ pub fn find_split_point(
     let mut err = None;
 
     // Do a binary search to find the first unknown/known block in the batch.
-    let idx =
-        block_ids.partition_point(
-            |block_id| match block_exists(block_id, table_block_heights) {
-                Ok(exists) => exists & chronological_order,
-                Err(e) => {
-                    err.get_or_insert(e);
-                    // if this happens the search is scrapped, just return `false` back.
-                    false
-                }
-            },
-        );
+    let idx = block_ids.partition_point(|block_id| {
+        match block_exists(block_id, table_block_heights) {
+            Ok(exists) => exists == chronological_order,
+            Err(e) => {
+                err.get_or_insert(e);
+                // if this happens the search is scrapped, just return `false` back.
+                false
+            }
+        }
+    });
 
     if let Some(e) = err {
         return Err(e);
diff --git a/storage/blockchain/src/ops/key_image.rs b/storage/blockchain/src/ops/key_image.rs
index 19444d6b..5f179129 100644
--- a/storage/blockchain/src/ops/key_image.rs
+++ b/storage/blockchain/src/ops/key_image.rs
@@ -1,7 +1,7 @@
 //! Key image functions.
 
 //---------------------------------------------------------------------------------------------------- Import
-use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError};
+use cuprate_database::{DatabaseRo, DatabaseRw, DbResult};
 
 use crate::{
     ops::macros::{doc_add_block_inner_invariant, doc_error},
@@ -17,7 +17,7 @@ use crate::{
 pub fn add_key_image(
     key_image: &KeyImage,
     table_key_images: &mut impl DatabaseRw<KeyImages>,
-) -> Result<(), RuntimeError> {
+) -> DbResult<()> {
     table_key_images.put(key_image, &())
 }
 
@@ -28,7 +28,7 @@ pub fn add_key_image(
 pub fn remove_key_image(
     key_image: &KeyImage,
     table_key_images: &mut impl DatabaseRw<KeyImages>,
-) -> Result<(), RuntimeError> {
+) -> DbResult<()> {
     table_key_images.delete(key_image)
 }
 
@@ -38,7 +38,7 @@ pub fn remove_key_image(
 pub fn key_image_exists(
     key_image: &KeyImage,
     table_key_images: &impl DatabaseRo<KeyImages>,
-) -> Result<bool, RuntimeError> {
+) -> DbResult<bool> {
     table_key_images.contains(key_image)
 }
 
diff --git a/storage/blockchain/src/ops/macros.rs b/storage/blockchain/src/ops/macros.rs
index 18ec5068..9c6ef7dd 100644
--- a/storage/blockchain/src/ops/macros.rs
+++ b/storage/blockchain/src/ops/macros.rs
@@ -8,7 +8,7 @@
 macro_rules! doc_error {
     () => {
         r#"# Errors
-This function returns [`RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."#
+This function returns [`cuprate_database::RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."#
     };
 }
 pub(super) use doc_error;
diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs
index 14c209ab..96d94bb1 100644
--- a/storage/blockchain/src/ops/output.rs
+++ b/storage/blockchain/src/ops/output.rs
@@ -5,7 +5,7 @@ use curve25519_dalek::edwards::CompressedEdwardsY;
 use monero_serai::transaction::Timelock;
 
 use cuprate_database::{
-    RuntimeError, {DatabaseRo, DatabaseRw},
+    DbResult, RuntimeError, {DatabaseRo, DatabaseRw},
 };
 use cuprate_helper::crypto::compute_zero_commitment;
 use cuprate_helper::map::u64_to_timelock;
@@ -30,7 +30,7 @@ pub fn add_output(
     amount: Amount,
     output: &Output,
     tables: &mut impl TablesMut,
-) -> Result<PreRctOutputId, RuntimeError> {
+) -> DbResult<PreRctOutputId> {
     // FIXME: this would be much better expressed with a
     // `btree_map::Entry`-like API, fix `trait DatabaseRw`.
     let num_outputs = match tables.num_outputs().get(&amount) {
@@ -61,7 +61,7 @@ pub fn add_output(
 pub fn remove_output(
     pre_rct_output_id: &PreRctOutputId,
     tables: &mut impl TablesMut,
-) -> Result<(), RuntimeError> {
+) -> DbResult<()> {
     // Decrement the amount index by 1, or delete the entry out-right.
     // FIXME: this would be much better expressed with a
     // `btree_map::Entry`-like API, fix `trait DatabaseRw`.
@@ -86,7 +86,7 @@ pub fn remove_output(
 pub fn get_output(
     pre_rct_output_id: &PreRctOutputId,
     table_outputs: &impl DatabaseRo<Outputs>,
-) -> Result<Output, RuntimeError> {
+) -> DbResult<Output> {
     table_outputs.get(pre_rct_output_id)
 }
 
@@ -95,7 +95,7 @@ pub fn get_output(
 /// This returns the amount of pre-RCT outputs currently stored.
 #[doc = doc_error!()]
 #[inline]
-pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> Result<u64, RuntimeError> {
+pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> DbResult<u64> {
     table_outputs.len()
 }
 
@@ -110,7 +110,7 @@ pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> Result<u64,
 pub fn add_rct_output(
     rct_output: &RctOutput,
     table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
-) -> Result<AmountIndex, RuntimeError> {
+) -> DbResult<AmountIndex> {
     let amount_index = get_rct_num_outputs(table_rct_outputs)?;
     table_rct_outputs.put(&amount_index, rct_output)?;
     Ok(amount_index)
@@ -123,7 +123,7 @@ pub fn add_rct_output(
 pub fn remove_rct_output(
     amount_index: &AmountIndex,
     table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
-) -> Result<(), RuntimeError> {
+) -> DbResult<()> {
     table_rct_outputs.delete(amount_index)
 }
 
@@ -133,7 +133,7 @@ pub fn remove_rct_output(
 pub fn get_rct_output(
     amount_index: &AmountIndex,
     table_rct_outputs: &impl DatabaseRo<RctOutputs>,
-) -> Result<RctOutput, RuntimeError> {
+) -> DbResult<RctOutput> {
     table_rct_outputs.get(amount_index)
 }
 
@@ -142,9 +142,7 @@ pub fn get_rct_output(
 /// This returns the amount of RCT outputs currently stored.
 #[doc = doc_error!()]
 #[inline]
-pub fn get_rct_num_outputs(
-    table_rct_outputs: &impl DatabaseRo<RctOutputs>,
-) -> Result<u64, RuntimeError> {
+pub fn get_rct_num_outputs(table_rct_outputs: &impl DatabaseRo<RctOutputs>) -> DbResult<u64> {
     table_rct_outputs.len()
 }
 
@@ -155,7 +153,7 @@ pub fn output_to_output_on_chain(
     output: &Output,
     amount: Amount,
     table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
-) -> Result<OutputOnChain, RuntimeError> {
+) -> DbResult<OutputOnChain> {
     let commitment = compute_zero_commitment(amount);
 
     let time_lock = if output
@@ -191,7 +189,7 @@ pub fn output_to_output_on_chain(
 pub fn rct_output_to_output_on_chain(
     rct_output: &RctOutput,
     table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
-) -> Result<OutputOnChain, RuntimeError> {
+) -> DbResult<OutputOnChain> {
     // INVARIANT: Commitments stored are valid when stored by the database.
     let commitment = CompressedEdwardsY::from_slice(&rct_output.commitment)
         .unwrap()
@@ -223,10 +221,7 @@ pub fn rct_output_to_output_on_chain(
 ///
 /// Note that this still support RCT outputs, in that case, [`PreRctOutputId::amount`] should be `0`.
 #[doc = doc_error!()]
-pub fn id_to_output_on_chain(
-    id: &PreRctOutputId,
-    tables: &impl Tables,
-) -> Result<OutputOnChain, RuntimeError> {
+pub fn id_to_output_on_chain(id: &PreRctOutputId, tables: &impl Tables) -> DbResult<OutputOnChain> {
     // v2 transactions.
     if id.amount == 0 {
         let rct_output = get_rct_output(&id.amount_index, tables.rct_outputs())?;
diff --git a/storage/blockchain/src/ops/property.rs b/storage/blockchain/src/ops/property.rs
index 7810000a..3dbb9509 100644
--- a/storage/blockchain/src/ops/property.rs
+++ b/storage/blockchain/src/ops/property.rs
@@ -3,10 +3,9 @@
 //! SOMEDAY: the database `properties` table is not yet implemented.
 
 //---------------------------------------------------------------------------------------------------- Import
+use cuprate_database::DbResult;
 use cuprate_pruning::PruningSeed;
 
-use cuprate_database::RuntimeError;
-
 use crate::ops::macros::doc_error;
 
 //---------------------------------------------------------------------------------------------------- Free Functions
@@ -20,7 +19,7 @@ use crate::ops::macros::doc_error;
 /// // SOMEDAY
 /// ```
 #[inline]
-pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError> {
+pub const fn get_blockchain_pruning_seed() -> DbResult<PruningSeed> {
     // SOMEDAY: impl pruning.
     // We need a DB properties table.
     Ok(PruningSeed::NotPruned)
@@ -36,7 +35,7 @@ pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError>
 /// // SOMEDAY
 /// ```
 #[inline]
-pub const fn db_version() -> Result<u64, RuntimeError> {
+pub const fn db_version() -> DbResult<u64> {
     // SOMEDAY: We need a DB properties table.
     Ok(crate::constants::DATABASE_VERSION)
 }
diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs
index 5a60ad53..0312f215 100644
--- a/storage/blockchain/src/ops/tx.rs
+++ b/storage/blockchain/src/ops/tx.rs
@@ -4,7 +4,7 @@
 use bytemuck::TransparentWrapper;
 use monero_serai::transaction::{Input, Timelock, Transaction};
 
-use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec};
+use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec};
 use cuprate_helper::crypto::compute_zero_commitment;
 
 use crate::{
@@ -52,7 +52,7 @@ pub fn add_tx(
     tx_hash: &TxHash,
     block_height: &BlockHeight,
     tables: &mut impl TablesMut,
-) -> Result<TxId, RuntimeError> {
+) -> DbResult<TxId> {
     let tx_id = get_num_tx(tables.tx_ids_mut())?;
 
     //------------------------------------------------------ Transaction data
@@ -129,7 +129,7 @@ pub fn add_tx(
                 )?
                 .amount_index)
             })
-            .collect::<Result<Vec<_>, RuntimeError>>()?,
+            .collect::<DbResult<Vec<_>>>()?,
         Transaction::V2 { prefix, proofs } => prefix
             .outputs
             .iter()
@@ -186,10 +186,7 @@ pub fn add_tx(
 ///
 #[doc = doc_error!()]
 #[inline]
-pub fn remove_tx(
-    tx_hash: &TxHash,
-    tables: &mut impl TablesMut,
-) -> Result<(TxId, Transaction), RuntimeError> {
+pub fn remove_tx(tx_hash: &TxHash, tables: &mut impl TablesMut) -> DbResult<(TxId, Transaction)> {
     //------------------------------------------------------ Transaction data
     let tx_id = tables.tx_ids_mut().take(tx_hash)?;
     let tx_blob = tables.tx_blobs_mut().take(&tx_id)?;
@@ -267,7 +264,7 @@ pub fn get_tx(
     tx_hash: &TxHash,
     table_tx_ids: &impl DatabaseRo<TxIds>,
     table_tx_blobs: &impl DatabaseRo<TxBlobs>,
-) -> Result<Transaction, RuntimeError> {
+) -> DbResult<Transaction> {
     get_tx_from_id(&table_tx_ids.get(tx_hash)?, table_tx_blobs)
 }
 
@@ -277,7 +274,7 @@ pub fn get_tx(
 pub fn get_tx_from_id(
     tx_id: &TxId,
     table_tx_blobs: &impl DatabaseRo<TxBlobs>,
-) -> Result<Transaction, RuntimeError> {
+) -> DbResult<Transaction> {
     let tx_blob = table_tx_blobs.get(tx_id)?.0;
     Ok(Transaction::read(&mut tx_blob.as_slice())?)
 }
@@ -294,7 +291,7 @@ pub fn get_tx_from_id(
 /// - etc
 #[doc = doc_error!()]
 #[inline]
-pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> Result<u64, RuntimeError> {
+pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> DbResult<u64> {
     table_tx_ids.len()
 }
 
@@ -304,10 +301,7 @@ pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> Result<u64, RuntimeE
 /// Returns `true` if it does, else `false`.
 #[doc = doc_error!()]
 #[inline]
-pub fn tx_exists(
-    tx_hash: &TxHash,
-    table_tx_ids: &impl DatabaseRo<TxIds>,
-) -> Result<bool, RuntimeError> {
+pub fn tx_exists(tx_hash: &TxHash, table_tx_ids: &impl DatabaseRo<TxIds>) -> DbResult<bool> {
     table_tx_ids.contains(tx_hash)
 }
 
diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs
index 01fc92d0..84b1b921 100644
--- a/storage/blockchain/src/service/read.rs
+++ b/storage/blockchain/src/service/read.rs
@@ -22,12 +22,14 @@ use rayon::{
 };
 use thread_local::ThreadLocal;
 
-use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError};
+use cuprate_database::{
+    ConcreteEnv, DatabaseIter, DatabaseRo, DbResult, Env, EnvInner, RuntimeError,
+};
 use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads};
 use cuprate_helper::map::combine_low_high_bits_to_u128;
 use cuprate_types::{
     blockchain::{BlockchainReadRequest, BlockchainResponse},
-    Chain, ChainId, ExtendedBlockHeader, MissingTxsInBlock, OutputHistogramInput, OutputOnChain,
+    Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, TxsInBlock,
 };
 
 use crate::{
@@ -118,10 +120,10 @@ fn map_request(
         R::CompactChainHistory => compact_chain_history(env),
         R::NextChainEntry(block_hashes, amount) => next_chain_entry(env, &block_hashes, amount),
         R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids),
-        R::MissingTxsInBlock {
+        R::TxsInBlock {
             block_hash,
             tx_indexes,
-        } => missing_txs_in_block(env, block_hash, tx_indexes),
+        } => txs_in_block(env, block_hash, tx_indexes),
         R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id),
         R::Block { height } => block(env, height),
         R::BlockByHash(hash) => block_by_hash(env, hash),
@@ -224,7 +226,7 @@ fn block_complete_entries(env: &ConcreteEnv, block_hashes: Vec<BlockHash>) -> Re
                 res => res.map(Either::Right),
             }
         })
-        .collect::<Result<_, _>>()?;
+        .collect::<DbResult<_>>()?;
 
     let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
     let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
@@ -345,7 +347,7 @@ fn block_extended_header_in_range(
                 let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
                 get_block_extended_header_from_height(&block_height, tables)
             })
-            .collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?,
+            .collect::<DbResult<Vec<ExtendedBlockHeader>>>()?,
         Chain::Alt(chain_id) => {
             let ranges = {
                 let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
@@ -375,7 +377,7 @@ fn block_extended_header_in_range(
                         }
                     })
                 })
-                .collect::<Result<Vec<_>, _>>()?
+                .collect::<DbResult<Vec<_>>>()?
         }
     };
 
@@ -421,7 +423,7 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) ->
 
     // The 2nd mapping function.
     // This is pulled out from the below `map()` for readability.
-    let inner_map = |amount, amount_index| -> Result<(AmountIndex, OutputOnChain), RuntimeError> {
+    let inner_map = |amount, amount_index| -> DbResult<(AmountIndex, OutputOnChain)> {
         let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
         let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
 
@@ -444,10 +446,10 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) ->
                 amount_index_set
                     .into_par_iter()
                     .map(|amount_index| inner_map(amount, amount_index))
-                    .collect::<Result<HashMap<AmountIndex, OutputOnChain>, RuntimeError>>()?,
+                    .collect::<DbResult<HashMap<AmountIndex, OutputOnChain>>>()?,
             ))
         })
-        .collect::<Result<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>, RuntimeError>>()?;
+        .collect::<DbResult<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>>>()?;
 
     Ok(BlockchainResponse::Outputs(map))
 }
@@ -496,7 +498,7 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> Respon
                 }
             }
         })
-        .collect::<Result<HashMap<Amount, usize>, RuntimeError>>()?;
+        .collect::<DbResult<HashMap<Amount, usize>>>()?;
 
     Ok(BlockchainResponse::NumberOutputsWithAmount(map))
 }
@@ -562,7 +564,7 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult {
         .map(compact_history_index_to_height_offset::<INITIAL_BLOCKS>)
         .map_while(|i| top_block_height.checked_sub(i))
         .map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash))
-        .collect::<Result<Vec<_>, RuntimeError>>()?;
+        .collect::<DbResult<Vec<_>>>()?;
 
     if compact_history_genesis_not_included::<INITIAL_BLOCKS>(top_block_height) {
         block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash);
@@ -598,7 +600,7 @@ fn next_chain_entry(
     // This will happen if we have a different genesis block.
     if idx == block_ids.len() {
         return Ok(BlockchainResponse::NextChainEntry {
-            start_height: 0,
+            start_height: None,
             chain_height: 0,
             block_ids: vec![],
             block_weights: vec![],
@@ -621,7 +623,7 @@ fn next_chain_entry(
 
             Ok((block_info.block_hash, block_info.weight))
         })
-        .collect::<Result<(Vec<_>, Vec<_>), RuntimeError>>()?;
+        .collect::<DbResult<(Vec<_>, Vec<_>)>>()?;
 
     let top_block_info = table_block_infos.get(&(chain_height - 1))?;
 
@@ -632,7 +634,7 @@ fn next_chain_entry(
     };
 
     Ok(BlockchainResponse::NextChainEntry {
-        start_height: first_known_height,
+        start_height: std::num::NonZero::new(first_known_height),
         chain_height,
         block_ids,
         block_weights,
@@ -669,12 +671,8 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes
     })
 }
 
-/// [`BlockchainReadRequest::MissingTxsInBlock`]
-fn missing_txs_in_block(
-    env: &ConcreteEnv,
-    block_hash: [u8; 32],
-    missing_txs: Vec<u64>,
-) -> ResponseResult {
+/// [`BlockchainReadRequest::TxsInBlock`]
+fn txs_in_block(env: &ConcreteEnv, block_hash: [u8; 32], missing_txs: Vec<u64>) -> ResponseResult {
     // Single-threaded, no `ThreadLocal` required.
     let env_inner = env.env_inner();
     let tx_ro = env_inner.tx_ro()?;
@@ -686,17 +684,18 @@ fn missing_txs_in_block(
     let first_tx_index = miner_tx_index + 1;
 
     if numb_txs < missing_txs.len() {
-        return Ok(BlockchainResponse::MissingTxsInBlock(None));
+        return Ok(BlockchainResponse::TxsInBlock(None));
     }
 
     let txs = missing_txs
         .into_iter()
         .map(|index_offset| Ok(tables.tx_blobs().get(&(first_tx_index + index_offset))?.0))
-        .collect::<Result<_, RuntimeError>>()?;
+        .collect::<DbResult<_>>()?;
 
-    Ok(BlockchainResponse::MissingTxsInBlock(Some(
-        MissingTxsInBlock { block, txs },
-    )))
+    Ok(BlockchainResponse::TxsInBlock(Some(TxsInBlock {
+        block,
+        txs,
+    })))
 }
 
 /// [`BlockchainReadRequest::AltBlocksInChain`]
@@ -736,7 +735,7 @@ fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult {
                 )
             })
         })
-        .collect::<Result<_, _>>()?;
+        .collect::<DbResult<_>>()?;
 
     Ok(BlockchainResponse::AltBlocksInChain(blocks))
 }
diff --git a/storage/blockchain/src/service/types.rs b/storage/blockchain/src/service/types.rs
index 9cd86e9c..190e9f6f 100644
--- a/storage/blockchain/src/service/types.rs
+++ b/storage/blockchain/src/service/types.rs
@@ -1,7 +1,7 @@
 //! Database service type aliases.
 
 //---------------------------------------------------------------------------------------------------- Use
-use cuprate_database::RuntimeError;
+use cuprate_database::DbResult;
 use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle};
 use cuprate_types::blockchain::{
     BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest,
@@ -11,7 +11,7 @@ use cuprate_types::blockchain::{
 /// The actual type of the response.
 ///
 /// Either our [`BlockchainResponse`], or a database error occurred.
-pub(super) type ResponseResult = Result<BlockchainResponse, RuntimeError>;
+pub(super) type ResponseResult = DbResult<BlockchainResponse>;
 
 /// The blockchain database write service.
 pub type BlockchainWriteHandle = DatabaseWriteHandle<BlockchainWriteRequest, BlockchainResponse>;
diff --git a/storage/blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs
index 07162d2a..84c2538f 100644
--- a/storage/blockchain/src/service/write.rs
+++ b/storage/blockchain/src/service/write.rs
@@ -2,7 +2,7 @@
 //---------------------------------------------------------------------------------------------------- Import
 use std::sync::Arc;
 
-use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError, TxRw};
+use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, TxRw};
 use cuprate_database_service::DatabaseWriteHandle;
 use cuprate_types::{
     blockchain::{BlockchainResponse, BlockchainWriteRequest},
@@ -36,7 +36,7 @@ pub fn init_write_service(env: Arc<ConcreteEnv>) -> BlockchainWriteHandle {
 fn handle_blockchain_request(
     env: &ConcreteEnv,
     req: &BlockchainWriteRequest,
-) -> Result<BlockchainResponse, RuntimeError> {
+) -> DbResult<BlockchainResponse> {
     match req {
         BlockchainWriteRequest::WriteBlock(block) => write_block(env, block),
         BlockchainWriteRequest::WriteAltBlock(alt_block) => write_alt_block(env, alt_block),
diff --git a/storage/database/src/backend/heed/database.rs b/storage/database/src/backend/heed/database.rs
index c985d0de..15f16b45 100644
--- a/storage/database/src/backend/heed/database.rs
+++ b/storage/database/src/backend/heed/database.rs
@@ -6,7 +6,7 @@ use std::{cell::RefCell, ops::RangeBounds};
 use crate::{
     backend::heed::types::HeedDb,
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
-    error::RuntimeError,
+    error::{DbResult, RuntimeError},
     table::Table,
 };
 
@@ -54,16 +54,13 @@ fn get<T: Table>(
     db: &HeedDb<T::Key, T::Value>,
     tx_ro: &heed::RoTxn<'_>,
     key: &T::Key,
-) -> Result<T::Value, RuntimeError> {
+) -> DbResult<T::Value> {
     db.get(tx_ro, key)?.ok_or(RuntimeError::KeyNotFound)
 }
 
 /// Shared [`DatabaseRo::len()`].
 #[inline]
-fn len<T: Table>(
-    db: &HeedDb<T::Key, T::Value>,
-    tx_ro: &heed::RoTxn<'_>,
-) -> Result<u64, RuntimeError> {
+fn len<T: Table>(db: &HeedDb<T::Key, T::Value>, tx_ro: &heed::RoTxn<'_>) -> DbResult<u64> {
     Ok(db.len(tx_ro)?)
 }
 
@@ -72,7 +69,7 @@ fn len<T: Table>(
 fn first<T: Table>(
     db: &HeedDb<T::Key, T::Value>,
     tx_ro: &heed::RoTxn<'_>,
-) -> Result<(T::Key, T::Value), RuntimeError> {
+) -> DbResult<(T::Key, T::Value)> {
     db.first(tx_ro)?.ok_or(RuntimeError::KeyNotFound)
 }
 
@@ -81,16 +78,13 @@ fn first<T: Table>(
 fn last<T: Table>(
     db: &HeedDb<T::Key, T::Value>,
     tx_ro: &heed::RoTxn<'_>,
-) -> Result<(T::Key, T::Value), RuntimeError> {
+) -> DbResult<(T::Key, T::Value)> {
     db.last(tx_ro)?.ok_or(RuntimeError::KeyNotFound)
 }
 
 /// Shared [`DatabaseRo::is_empty()`].
 #[inline]
-fn is_empty<T: Table>(
-    db: &HeedDb<T::Key, T::Value>,
-    tx_ro: &heed::RoTxn<'_>,
-) -> Result<bool, RuntimeError> {
+fn is_empty<T: Table>(db: &HeedDb<T::Key, T::Value>, tx_ro: &heed::RoTxn<'_>) -> DbResult<bool> {
     Ok(db.is_empty(tx_ro)?)
 }
 
@@ -100,7 +94,7 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
     fn get_range<'a, Range>(
         &'a self,
         range: Range,
-    ) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError>
+    ) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
     where
         Range: RangeBounds<T::Key> + 'a,
     {
@@ -108,24 +102,17 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
     }
 
     #[inline]
-    fn iter(
-        &self,
-    ) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>
-    {
+    fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_> {
         Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?)))
     }
 
     #[inline]
-    fn keys(
-        &self,
-    ) -> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError> {
+    fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_> {
         Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.0)))
     }
 
     #[inline]
-    fn values(
-        &self,
-    ) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError> {
+    fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_> {
         Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.1)))
     }
 }
@@ -134,27 +121,27 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
 // SAFETY: `HeedTableRo: !Send` as it holds a reference to `heed::RoTxn: Send + !Sync`.
 unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
     #[inline]
-    fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
+    fn get(&self, key: &T::Key) -> DbResult<T::Value> {
         get::<T>(&self.db, self.tx_ro, key)
     }
 
     #[inline]
-    fn len(&self) -> Result<u64, RuntimeError> {
+    fn len(&self) -> DbResult<u64> {
         len::<T>(&self.db, self.tx_ro)
     }
 
     #[inline]
-    fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn first(&self) -> DbResult<(T::Key, T::Value)> {
         first::<T>(&self.db, self.tx_ro)
     }
 
     #[inline]
-    fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn last(&self) -> DbResult<(T::Key, T::Value)> {
         last::<T>(&self.db, self.tx_ro)
     }
 
     #[inline]
-    fn is_empty(&self) -> Result<bool, RuntimeError> {
+    fn is_empty(&self) -> DbResult<bool> {
         is_empty::<T>(&self.db, self.tx_ro)
     }
 }
@@ -164,45 +151,45 @@ unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
 // `HeedTableRw`'s write transaction is `!Send`.
 unsafe impl<T: Table> DatabaseRo<T> for HeedTableRw<'_, '_, T> {
     #[inline]
-    fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
+    fn get(&self, key: &T::Key) -> DbResult<T::Value> {
         get::<T>(&self.db, &self.tx_rw.borrow(), key)
     }
 
     #[inline]
-    fn len(&self) -> Result<u64, RuntimeError> {
+    fn len(&self) -> DbResult<u64> {
         len::<T>(&self.db, &self.tx_rw.borrow())
     }
 
     #[inline]
-    fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn first(&self) -> DbResult<(T::Key, T::Value)> {
         first::<T>(&self.db, &self.tx_rw.borrow())
     }
 
     #[inline]
-    fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn last(&self) -> DbResult<(T::Key, T::Value)> {
         last::<T>(&self.db, &self.tx_rw.borrow())
     }
 
     #[inline]
-    fn is_empty(&self) -> Result<bool, RuntimeError> {
+    fn is_empty(&self) -> DbResult<bool> {
         is_empty::<T>(&self.db, &self.tx_rw.borrow())
     }
 }
 
 impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
     #[inline]
-    fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> {
+    fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> {
         Ok(self.db.put(&mut self.tx_rw.borrow_mut(), key, value)?)
     }
 
     #[inline]
-    fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> {
+    fn delete(&mut self, key: &T::Key) -> DbResult<()> {
         self.db.delete(&mut self.tx_rw.borrow_mut(), key)?;
         Ok(())
     }
 
     #[inline]
-    fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> {
+    fn take(&mut self, key: &T::Key) -> DbResult<T::Value> {
         // LMDB/heed does not return the value on deletion.
         // So, fetch it first - then delete.
         let value = get::<T>(&self.db, &self.tx_rw.borrow(), key)?;
@@ -216,7 +203,7 @@ impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
     }
 
     #[inline]
-    fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> {
         let tx_rw = &mut self.tx_rw.borrow_mut();
 
         // Get the value first...
@@ -235,7 +222,7 @@ impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
     }
 
     #[inline]
-    fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> {
         let tx_rw = &mut self.tx_rw.borrow_mut();
 
         // Get the value first...
diff --git a/storage/database/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs
index 568379e5..b603013b 100644
--- a/storage/database/src/backend/heed/env.rs
+++ b/storage/database/src/backend/heed/env.rs
@@ -18,7 +18,7 @@ use crate::{
     config::{Config, SyncMode},
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
     env::{Env, EnvInner},
-    error::{InitError, RuntimeError},
+    error::{DbResult, InitError, RuntimeError},
     key::{Key, KeyCompare},
     resize::ResizeAlgorithm,
     table::Table,
@@ -203,7 +203,7 @@ impl Env for ConcreteEnv {
         &self.config
     }
 
-    fn sync(&self) -> Result<(), RuntimeError> {
+    fn sync(&self) -> DbResult<()> {
         Ok(self.env.read().unwrap().force_sync()?)
     }
 
@@ -253,12 +253,12 @@ where
     type Rw<'a> = RefCell<heed::RwTxn<'a>>;
 
     #[inline]
-    fn tx_ro(&self) -> Result<Self::Ro<'_>, RuntimeError> {
+    fn tx_ro(&self) -> DbResult<Self::Ro<'_>> {
         Ok(self.read_txn()?)
     }
 
     #[inline]
-    fn tx_rw(&self) -> Result<Self::Rw<'_>, RuntimeError> {
+    fn tx_rw(&self) -> DbResult<Self::Rw<'_>> {
         Ok(RefCell::new(self.write_txn()?))
     }
 
@@ -266,7 +266,7 @@ where
     fn open_db_ro<T: Table>(
         &self,
         tx_ro: &Self::Ro<'_>,
-    ) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError> {
+    ) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>> {
         // Open up a read-only database using our table's const metadata.
         //
         // INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`],
@@ -281,10 +281,7 @@ where
     }
 
     #[inline]
-    fn open_db_rw<T: Table>(
-        &self,
-        tx_rw: &Self::Rw<'_>,
-    ) -> Result<impl DatabaseRw<T>, RuntimeError> {
+    fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>> {
         // Open up a read/write database using our table's const metadata.
         //
         // INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`],
@@ -296,7 +293,7 @@ where
         })
     }
 
-    fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError> {
+    fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()> {
         // Create a database using our:
         // - [`Table`]'s const metadata.
         // - (potentially) our [`Key`] comparison function
@@ -328,7 +325,7 @@ where
     }
 
     #[inline]
-    fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError> {
+    fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()> {
         let tx_rw = tx_rw.get_mut();
 
         // Open the table. We don't care about flags or key
diff --git a/storage/database/src/backend/heed/transaction.rs b/storage/database/src/backend/heed/transaction.rs
index d32f3707..b7c0f54d 100644
--- a/storage/database/src/backend/heed/transaction.rs
+++ b/storage/database/src/backend/heed/transaction.rs
@@ -4,31 +4,31 @@ use std::cell::RefCell;
 
 //---------------------------------------------------------------------------------------------------- Import
 use crate::{
-    error::RuntimeError,
+    error::DbResult,
     transaction::{TxRo, TxRw},
 };
 
 //---------------------------------------------------------------------------------------------------- TxRo
 impl TxRo<'_> for heed::RoTxn<'_> {
-    fn commit(self) -> Result<(), RuntimeError> {
+    fn commit(self) -> DbResult<()> {
         Ok(heed::RoTxn::commit(self)?)
     }
 }
 
 //---------------------------------------------------------------------------------------------------- TxRw
 impl TxRo<'_> for RefCell<heed::RwTxn<'_>> {
-    fn commit(self) -> Result<(), RuntimeError> {
+    fn commit(self) -> DbResult<()> {
         TxRw::commit(self)
     }
 }
 
 impl TxRw<'_> for RefCell<heed::RwTxn<'_>> {
-    fn commit(self) -> Result<(), RuntimeError> {
+    fn commit(self) -> DbResult<()> {
         Ok(heed::RwTxn::commit(self.into_inner())?)
     }
 
     /// This function is infallible.
-    fn abort(self) -> Result<(), RuntimeError> {
+    fn abort(self) -> DbResult<()> {
         heed::RwTxn::abort(self.into_inner());
         Ok(())
     }
diff --git a/storage/database/src/backend/redb/database.rs b/storage/database/src/backend/redb/database.rs
index dafb2417..0be58ef0 100644
--- a/storage/database/src/backend/redb/database.rs
+++ b/storage/database/src/backend/redb/database.rs
@@ -11,7 +11,7 @@ use crate::{
         types::{RedbTableRo, RedbTableRw},
     },
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
-    error::RuntimeError,
+    error::{DbResult, RuntimeError},
     table::Table,
 };
 
@@ -25,7 +25,7 @@ use crate::{
 fn get<T: Table + 'static>(
     db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
     key: &T::Key,
-) -> Result<T::Value, RuntimeError> {
+) -> DbResult<T::Value> {
     Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value())
 }
 
@@ -33,7 +33,7 @@ fn get<T: Table + 'static>(
 #[inline]
 fn len<T: Table>(
     db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
-) -> Result<u64, RuntimeError> {
+) -> DbResult<u64> {
     Ok(db.len()?)
 }
 
@@ -41,7 +41,7 @@ fn len<T: Table>(
 #[inline]
 fn first<T: Table>(
     db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
-) -> Result<(T::Key, T::Value), RuntimeError> {
+) -> DbResult<(T::Key, T::Value)> {
     let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?;
     Ok((key.value(), value.value()))
 }
@@ -50,7 +50,7 @@ fn first<T: Table>(
 #[inline]
 fn last<T: Table>(
     db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
-) -> Result<(T::Key, T::Value), RuntimeError> {
+) -> DbResult<(T::Key, T::Value)> {
     let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?;
     Ok((key.value(), value.value()))
 }
@@ -59,7 +59,7 @@ fn last<T: Table>(
 #[inline]
 fn is_empty<T: Table>(
     db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
-) -> Result<bool, RuntimeError> {
+) -> DbResult<bool> {
     Ok(db.is_empty()?)
 }
 
@@ -69,7 +69,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
     fn get_range<'a, Range>(
         &'a self,
         range: Range,
-    ) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError>
+    ) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
     where
         Range: RangeBounds<T::Key> + 'a,
     {
@@ -80,10 +80,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
     }
 
     #[inline]
-    fn iter(
-        &self,
-    ) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>
-    {
+    fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_> {
         Ok(ReadableTable::iter(self)?.map(|result| {
             let (key, value) = result?;
             Ok((key.value(), value.value()))
@@ -91,9 +88,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
     }
 
     #[inline]
-    fn keys(
-        &self,
-    ) -> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError> {
+    fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_> {
         Ok(ReadableTable::iter(self)?.map(|result| {
             let (key, _value) = result?;
             Ok(key.value())
@@ -101,9 +96,7 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
     }
 
     #[inline]
-    fn values(
-        &self,
-    ) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError> {
+    fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_> {
         Ok(ReadableTable::iter(self)?.map(|result| {
             let (_key, value) = result?;
             Ok(value.value())
@@ -115,27 +108,27 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
 // SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
 unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
     #[inline]
-    fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
+    fn get(&self, key: &T::Key) -> DbResult<T::Value> {
         get::<T>(self, key)
     }
 
     #[inline]
-    fn len(&self) -> Result<u64, RuntimeError> {
+    fn len(&self) -> DbResult<u64> {
         len::<T>(self)
     }
 
     #[inline]
-    fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn first(&self) -> DbResult<(T::Key, T::Value)> {
         first::<T>(self)
     }
 
     #[inline]
-    fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn last(&self) -> DbResult<(T::Key, T::Value)> {
         last::<T>(self)
     }
 
     #[inline]
-    fn is_empty(&self) -> Result<bool, RuntimeError> {
+    fn is_empty(&self) -> DbResult<bool> {
         is_empty::<T>(self)
     }
 }
@@ -144,27 +137,27 @@ unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value>
 // SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
 unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRw<'_, T::Key, T::Value> {
     #[inline]
-    fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
+    fn get(&self, key: &T::Key) -> DbResult<T::Value> {
         get::<T>(self, key)
     }
 
     #[inline]
-    fn len(&self) -> Result<u64, RuntimeError> {
+    fn len(&self) -> DbResult<u64> {
         len::<T>(self)
     }
 
     #[inline]
-    fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn first(&self) -> DbResult<(T::Key, T::Value)> {
         first::<T>(self)
     }
 
     #[inline]
-    fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn last(&self) -> DbResult<(T::Key, T::Value)> {
         last::<T>(self)
     }
 
     #[inline]
-    fn is_empty(&self) -> Result<bool, RuntimeError> {
+    fn is_empty(&self) -> DbResult<bool> {
         is_empty::<T>(self)
     }
 }
@@ -173,19 +166,19 @@ impl<T: Table + 'static> DatabaseRw<T> for RedbTableRw<'_, T::Key, T::Value> {
     // `redb` returns the value after function calls so we end with Ok(()) instead.
 
     #[inline]
-    fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> {
+    fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> {
         redb::Table::insert(self, key, value)?;
         Ok(())
     }
 
     #[inline]
-    fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> {
+    fn delete(&mut self, key: &T::Key) -> DbResult<()> {
         redb::Table::remove(self, key)?;
         Ok(())
     }
 
     #[inline]
-    fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> {
+    fn take(&mut self, key: &T::Key) -> DbResult<T::Value> {
         if let Some(value) = redb::Table::remove(self, key)? {
             Ok(value.value())
         } else {
@@ -194,13 +187,13 @@ impl<T: Table + 'static> DatabaseRw<T> for RedbTableRw<'_, T::Key, T::Value> {
     }
 
     #[inline]
-    fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> {
         let (key, value) = redb::Table::pop_first(self)?.ok_or(RuntimeError::KeyNotFound)?;
         Ok((key.value(), value.value()))
     }
 
     #[inline]
-    fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
+    fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> {
         let (key, value) = redb::Table::pop_last(self)?.ok_or(RuntimeError::KeyNotFound)?;
         Ok((key.value(), value.value()))
     }
diff --git a/storage/database/src/backend/redb/env.rs b/storage/database/src/backend/redb/env.rs
index a405ea72..4bd49d68 100644
--- a/storage/database/src/backend/redb/env.rs
+++ b/storage/database/src/backend/redb/env.rs
@@ -6,7 +6,7 @@ use crate::{
     config::{Config, SyncMode},
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
     env::{Env, EnvInner},
-    error::{InitError, RuntimeError},
+    error::{DbResult, InitError, RuntimeError},
     table::Table,
     TxRw,
 };
@@ -105,7 +105,7 @@ impl Env for ConcreteEnv {
         &self.config
     }
 
-    fn sync(&self) -> Result<(), RuntimeError> {
+    fn sync(&self) -> DbResult<()> {
         // `redb`'s syncs are tied with write transactions,
         // so just create one, don't do anything and commit.
         let mut tx_rw = self.env.begin_write()?;
@@ -127,12 +127,12 @@ where
     type Rw<'a> = redb::WriteTransaction;
 
     #[inline]
-    fn tx_ro(&self) -> Result<redb::ReadTransaction, RuntimeError> {
+    fn tx_ro(&self) -> DbResult<redb::ReadTransaction> {
         Ok(self.0.begin_read()?)
     }
 
     #[inline]
-    fn tx_rw(&self) -> Result<redb::WriteTransaction, RuntimeError> {
+    fn tx_rw(&self) -> DbResult<redb::WriteTransaction> {
         // `redb` has sync modes on the TX level, unlike heed,
         // which sets it at the Environment level.
         //
@@ -146,7 +146,7 @@ where
     fn open_db_ro<T: Table>(
         &self,
         tx_ro: &Self::Ro<'_>,
-    ) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError> {
+    ) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>> {
         // Open up a read-only database using our `T: Table`'s const metadata.
         let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> =
             redb::TableDefinition::new(T::NAME);
@@ -155,10 +155,7 @@ where
     }
 
     #[inline]
-    fn open_db_rw<T: Table>(
-        &self,
-        tx_rw: &Self::Rw<'_>,
-    ) -> Result<impl DatabaseRw<T>, RuntimeError> {
+    fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>> {
         // Open up a read/write database using our `T: Table`'s const metadata.
         let table: redb::TableDefinition<'static, StorableRedb<T::Key>, StorableRedb<T::Value>> =
             redb::TableDefinition::new(T::NAME);
@@ -168,14 +165,14 @@ where
         Ok(tx_rw.open_table(table)?)
     }
 
-    fn create_db<T: Table>(&self, tx_rw: &redb::WriteTransaction) -> Result<(), RuntimeError> {
+    fn create_db<T: Table>(&self, tx_rw: &redb::WriteTransaction) -> DbResult<()> {
         // INVARIANT: `redb` creates tables if they don't exist.
         self.open_db_rw::<T>(tx_rw)?;
         Ok(())
     }
 
     #[inline]
-    fn clear_db<T: Table>(&self, tx_rw: &mut redb::WriteTransaction) -> Result<(), RuntimeError> {
+    fn clear_db<T: Table>(&self, tx_rw: &mut redb::WriteTransaction) -> DbResult<()> {
         let table: redb::TableDefinition<
             'static,
             StorableRedb<<T as Table>::Key>,
diff --git a/storage/database/src/backend/redb/storable.rs b/storage/database/src/backend/redb/storable.rs
index abf2e71b..f0412efb 100644
--- a/storage/database/src/backend/redb/storable.rs
+++ b/storage/database/src/backend/redb/storable.rs
@@ -34,8 +34,14 @@ impl<T> redb::Value for StorableRedb<T>
 where
     T: Storable + 'static,
 {
-    type SelfType<'a> = T where Self: 'a;
-    type AsBytes<'a> = &'a [u8] where Self: 'a;
+    type SelfType<'a>
+        = T
+    where
+        Self: 'a;
+    type AsBytes<'a>
+        = &'a [u8]
+    where
+        Self: 'a;
 
     #[inline]
     fn fixed_width() -> Option<usize> {
diff --git a/storage/database/src/backend/redb/transaction.rs b/storage/database/src/backend/redb/transaction.rs
index 5048851d..8d93986d 100644
--- a/storage/database/src/backend/redb/transaction.rs
+++ b/storage/database/src/backend/redb/transaction.rs
@@ -2,14 +2,14 @@
 
 //---------------------------------------------------------------------------------------------------- Import
 use crate::{
-    error::RuntimeError,
+    error::DbResult,
     transaction::{TxRo, TxRw},
 };
 
 //---------------------------------------------------------------------------------------------------- TxRo
 impl TxRo<'_> for redb::ReadTransaction {
     /// This function is infallible.
-    fn commit(self) -> Result<(), RuntimeError> {
+    fn commit(self) -> DbResult<()> {
         // `redb`'s read transactions cleanup automatically when all references are dropped.
         //
         // There is `close()`:
@@ -22,11 +22,11 @@ impl TxRo<'_> for redb::ReadTransaction {
 
 //---------------------------------------------------------------------------------------------------- TxRw
 impl TxRw<'_> for redb::WriteTransaction {
-    fn commit(self) -> Result<(), RuntimeError> {
+    fn commit(self) -> DbResult<()> {
         Ok(self.commit()?)
     }
 
-    fn abort(self) -> Result<(), RuntimeError> {
+    fn abort(self) -> DbResult<()> {
         Ok(self.abort()?)
     }
 }
diff --git a/storage/database/src/config/sync_mode.rs b/storage/database/src/config/sync_mode.rs
index 5a0cba52..dbb34e7f 100644
--- a/storage/database/src/config/sync_mode.rs
+++ b/storage/database/src/config/sync_mode.rs
@@ -9,7 +9,6 @@
 //! based on these values.
 
 //---------------------------------------------------------------------------------------------------- Import
-
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Serialize};
 
diff --git a/storage/database/src/database.rs b/storage/database/src/database.rs
index 6fbb7aaa..c019972b 100644
--- a/storage/database/src/database.rs
+++ b/storage/database/src/database.rs
@@ -3,7 +3,10 @@
 //---------------------------------------------------------------------------------------------------- Import
 use std::ops::RangeBounds;
 
-use crate::{error::RuntimeError, table::Table};
+use crate::{
+    error::{DbResult, RuntimeError},
+    table::Table,
+};
 
 //---------------------------------------------------------------------------------------------------- DatabaseIter
 /// Generic post-fix documentation for `DatabaseIter` methods.
@@ -48,27 +51,22 @@ pub trait DatabaseIter<T: Table> {
     fn get_range<'a, Range>(
         &'a self,
         range: Range,
-    ) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + 'a, RuntimeError>
+    ) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + 'a>
     where
         Range: RangeBounds<T::Key> + 'a;
 
     /// Get an [`Iterator`] that returns the `(key, value)` types for this database.
     #[doc = doc_iter!()]
     #[expect(clippy::iter_not_returning_iterator)]
-    fn iter(
-        &self,
-    ) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>;
+    fn iter(&self) -> DbResult<impl Iterator<Item = DbResult<(T::Key, T::Value)>> + '_>;
 
     /// Get an [`Iterator`] that returns _only_ the `key` type for this database.
     #[doc = doc_iter!()]
-    fn keys(&self)
-        -> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError>;
+    fn keys(&self) -> DbResult<impl Iterator<Item = DbResult<T::Key>> + '_>;
 
     /// Get an [`Iterator`] that returns _only_ the `value` type for this database.
     #[doc = doc_iter!()]
-    fn values(
-        &self,
-    ) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError>;
+    fn values(&self) -> DbResult<impl Iterator<Item = DbResult<T::Value>> + '_>;
 }
 
 //---------------------------------------------------------------------------------------------------- DatabaseRo
@@ -76,7 +74,7 @@ pub trait DatabaseIter<T: Table> {
 macro_rules! doc_database {
     () => {
         r"# Errors
-This will return [`RuntimeError::KeyNotFound`] if:
+This will return [`crate::RuntimeError::KeyNotFound`] if:
 - Input does not exist OR
 - Database is empty"
     };
@@ -111,7 +109,7 @@ This will return [`RuntimeError::KeyNotFound`] if:
 pub unsafe trait DatabaseRo<T: Table> {
     /// Get the value corresponding to a key.
     #[doc = doc_database!()]
-    fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError>;
+    fn get(&self, key: &T::Key) -> DbResult<T::Value>;
 
     /// Returns `true` if the database contains a value for the specified key.
     ///
@@ -120,7 +118,7 @@ pub unsafe trait DatabaseRo<T: Table> {
     /// as in that case, `Ok(false)` will be returned.
     ///
     /// Other errors may still occur.
-    fn contains(&self, key: &T::Key) -> Result<bool, RuntimeError> {
+    fn contains(&self, key: &T::Key) -> DbResult<bool> {
         match self.get(key) {
             Ok(_) => Ok(true),
             Err(RuntimeError::KeyNotFound) => Ok(false),
@@ -132,21 +130,21 @@ pub unsafe trait DatabaseRo<T: Table> {
     ///
     /// # Errors
     /// This will never return [`RuntimeError::KeyNotFound`].
-    fn len(&self) -> Result<u64, RuntimeError>;
+    fn len(&self) -> DbResult<u64>;
 
     /// Returns the first `(key, value)` pair in the database.
     #[doc = doc_database!()]
-    fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>;
+    fn first(&self) -> DbResult<(T::Key, T::Value)>;
 
     /// Returns the last `(key, value)` pair in the database.
     #[doc = doc_database!()]
-    fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>;
+    fn last(&self) -> DbResult<(T::Key, T::Value)>;
 
     /// Returns `true` if the database contains no `(key, value)` pairs.
     ///
     /// # Errors
     /// This can only return [`RuntimeError::Io`] on errors.
-    fn is_empty(&self) -> Result<bool, RuntimeError>;
+    fn is_empty(&self) -> DbResult<bool>;
 }
 
 //---------------------------------------------------------------------------------------------------- DatabaseRw
@@ -161,7 +159,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
     #[doc = doc_database!()]
     ///
     /// This will never [`RuntimeError::KeyExists`].
-    fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>;
+    fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()>;
 
     /// Delete a key-value pair in the database.
     ///
@@ -170,7 +168,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
     #[doc = doc_database!()]
     ///
     /// This will never [`RuntimeError::KeyExists`].
-    fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>;
+    fn delete(&mut self, key: &T::Key) -> DbResult<()>;
 
     /// Delete and return a key-value pair in the database.
     ///
@@ -178,7 +176,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
     /// it will serialize the `T::Value` and return it.
     ///
     #[doc = doc_database!()]
-    fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError>;
+    fn take(&mut self, key: &T::Key) -> DbResult<T::Value>;
 
     /// Fetch the value, and apply a function to it - or delete the entry.
     ///
@@ -192,7 +190,7 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
     /// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d
     ///
     #[doc = doc_database!()]
-    fn update<F>(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError>
+    fn update<F>(&mut self, key: &T::Key, mut f: F) -> DbResult<()>
     where
         F: FnMut(T::Value) -> Option<T::Value>,
     {
@@ -207,10 +205,10 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
     /// Removes and returns the first `(key, value)` pair in the database.
     ///
     #[doc = doc_database!()]
-    fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
+    fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)>;
 
     /// Removes and returns the last `(key, value)` pair in the database.
     ///
     #[doc = doc_database!()]
-    fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
+    fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)>;
 }
diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs
index 1ae6aa1f..56b92cbd 100644
--- a/storage/database/src/env.rs
+++ b/storage/database/src/env.rs
@@ -6,7 +6,7 @@ use std::num::NonZeroUsize;
 use crate::{
     config::Config,
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
-    error::{InitError, RuntimeError},
+    error::{DbResult, InitError},
     resize::ResizeAlgorithm,
     table::Table,
     transaction::{TxRo, TxRw},
@@ -39,7 +39,7 @@ pub trait Env: Sized {
     ///
     /// # Invariant
     /// If this is `false`, that means this [`Env`]
-    /// must _never_ return a [`RuntimeError::ResizeNeeded`].
+    /// must _never_ return a [`crate::RuntimeError::ResizeNeeded`].
     ///
     /// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`]
     /// _must_ be re-implemented, as it just panics by default.
@@ -88,7 +88,7 @@ pub trait Env: Sized {
     /// This will error if the database file could not be opened.
     ///
     /// This is the only [`Env`] function that will return
-    /// an [`InitError`] instead of a [`RuntimeError`].
+    /// an [`InitError`] instead of a [`crate::RuntimeError`].
     fn open(config: Config) -> Result<Self, InitError>;
 
     /// Return the [`Config`] that this database was [`Env::open`]ed with.
@@ -107,7 +107,7 @@ pub trait Env: Sized {
     ///
     /// # Errors
     /// If there is a synchronization error, this should return an error.
-    fn sync(&self) -> Result<(), RuntimeError>;
+    fn sync(&self) -> DbResult<()>;
 
     /// Resize the database's memory map to a
     /// new (bigger) size using a [`ResizeAlgorithm`].
@@ -218,14 +218,14 @@ pub trait EnvInner<'env> {
     /// Create a read-only transaction.
     ///
     /// # Errors
-    /// This will only return [`RuntimeError::Io`] if it errors.
-    fn tx_ro(&self) -> Result<Self::Ro<'_>, RuntimeError>;
+    /// This will only return [`crate::RuntimeError::Io`] if it errors.
+    fn tx_ro(&self) -> DbResult<Self::Ro<'_>>;
 
     /// Create a read/write transaction.
     ///
     /// # Errors
-    /// This will only return [`RuntimeError::Io`] if it errors.
-    fn tx_rw(&self) -> Result<Self::Rw<'_>, RuntimeError>;
+    /// This will only return [`crate::RuntimeError::Io`] if it errors.
+    fn tx_rw(&self) -> DbResult<Self::Rw<'_>>;
 
     /// Open a database in read-only mode.
     ///
@@ -269,17 +269,17 @@ pub trait EnvInner<'env> {
     /// ```
     ///
     /// # Errors
-    /// This will only return [`RuntimeError::Io`] on normal errors.
+    /// This will only return [`crate::RuntimeError::Io`] on normal errors.
     ///
     /// If the specified table is not created upon before this function is called,
-    /// this will return [`RuntimeError::TableNotFound`].
+    /// this will return [`crate::RuntimeError::TableNotFound`].
     ///
     /// # Invariant
     #[doc = doc_heed_create_db_invariant!()]
     fn open_db_ro<T: Table>(
         &self,
         tx_ro: &Self::Ro<'_>,
-    ) -> Result<impl DatabaseRo<T> + DatabaseIter<T>, RuntimeError>;
+    ) -> DbResult<impl DatabaseRo<T> + DatabaseIter<T>>;
 
     /// Open a database in read/write mode.
     ///
@@ -293,25 +293,22 @@ pub trait EnvInner<'env> {
     /// passed as a generic to this function.
     ///
     /// # Errors
-    /// This will only return [`RuntimeError::Io`] on errors.
+    /// This will only return [`crate::RuntimeError::Io`] on errors.
     ///
     /// # Invariant
     #[doc = doc_heed_create_db_invariant!()]
-    fn open_db_rw<T: Table>(
-        &self,
-        tx_rw: &Self::Rw<'_>,
-    ) -> Result<impl DatabaseRw<T>, RuntimeError>;
+    fn open_db_rw<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<impl DatabaseRw<T>>;
 
     /// Create a database table.
     ///
     /// This will create the database [`Table`] passed as a generic to this function.
     ///
     /// # Errors
-    /// This will only return [`RuntimeError::Io`] on errors.
+    /// This will only return [`crate::RuntimeError::Io`] on errors.
     ///
     /// # Invariant
     #[doc = doc_heed_create_db_invariant!()]
-    fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError>;
+    fn create_db<T: Table>(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()>;
 
     /// Clear all `(key, value)`'s from a database table.
     ///
@@ -322,9 +319,9 @@ pub trait EnvInner<'env> {
     /// function's effects can be aborted using [`TxRw::abort`].
     ///
     /// # Errors
-    /// This will return [`RuntimeError::Io`] on normal errors.
+    /// This will return [`crate::RuntimeError::Io`] on normal errors.
     ///
     /// If the specified table is not created upon before this function is called,
-    /// this will return [`RuntimeError::TableNotFound`].
-    fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError>;
+    /// this will return [`crate::RuntimeError::TableNotFound`].
+    fn clear_db<T: Table>(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()>;
 }
diff --git a/storage/database/src/error.rs b/storage/database/src/error.rs
index 3471ac74..82f80b9a 100644
--- a/storage/database/src/error.rs
+++ b/storage/database/src/error.rs
@@ -7,6 +7,9 @@ use std::fmt::Debug;
 /// Alias for a thread-safe boxed error.
 type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;
 
+/// [`Result`] with [`RuntimeError`] as the error.
+pub type DbResult<T> = Result<T, RuntimeError>;
+
 //---------------------------------------------------------------------------------------------------- InitError
 /// Errors that occur during ([`Env::open`](crate::env::Env::open)).
 ///
diff --git a/storage/database/src/lib.rs b/storage/database/src/lib.rs
index 45bfc53c..8e48fca0 100644
--- a/storage/database/src/lib.rs
+++ b/storage/database/src/lib.rs
@@ -50,7 +50,7 @@ pub use constants::{
 };
 pub use database::{DatabaseIter, DatabaseRo, DatabaseRw};
 pub use env::{Env, EnvInner};
-pub use error::{InitError, RuntimeError};
+pub use error::{DbResult, InitError, RuntimeError};
 pub use key::{Key, KeyCompare};
 pub use storable::{Storable, StorableBytes, StorableStr, StorableVec};
 pub use table::Table;
diff --git a/storage/database/src/table.rs b/storage/database/src/table.rs
index 3ad0e793..6d0daa20 100644
--- a/storage/database/src/table.rs
+++ b/storage/database/src/table.rs
@@ -1,7 +1,6 @@
 //! Database table abstraction; `trait Table`.
 
 //---------------------------------------------------------------------------------------------------- Import
-
 use crate::{key::Key, storable::Storable};
 
 //---------------------------------------------------------------------------------------------------- Table
diff --git a/storage/database/src/tables.rs b/storage/database/src/tables.rs
index 83a00e16..56203ad0 100644
--- a/storage/database/src/tables.rs
+++ b/storage/database/src/tables.rs
@@ -211,7 +211,7 @@ macro_rules! define_tables {
             ///
             /// # Errors
             /// This returns errors on regular database errors.
-            fn all_tables_empty(&self) -> Result<bool, $crate::RuntimeError>;
+            fn all_tables_empty(&self) -> $crate::DbResult<bool>;
         }
 
         /// Object containing all opened [`Table`](cuprate_database::Table)s in read + iter mode.
@@ -293,7 +293,7 @@ macro_rules! define_tables {
                 }
             )*
 
-            fn all_tables_empty(&self) -> Result<bool, $crate::RuntimeError> {
+            fn all_tables_empty(&self) -> $crate::DbResult<bool> {
                 $(
                      if !$crate::DatabaseRo::is_empty(&self.$index)? {
                         return Ok(false);
@@ -369,7 +369,7 @@ macro_rules! define_tables {
             ///
             /// # Errors
             /// This will only return [`cuprate_database::RuntimeError::Io`] if it errors.
-            fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result<impl TablesIter, $crate::RuntimeError>;
+            fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult<impl TablesIter>;
 
             /// Open all tables in read-write mode.
             ///
@@ -378,7 +378,7 @@ macro_rules! define_tables {
             ///
             /// # Errors
             /// This will only return [`cuprate_database::RuntimeError::Io`] on errors.
-            fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result<impl TablesMut, $crate::RuntimeError>;
+            fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<impl TablesMut>;
 
             /// Create all database tables.
             ///
@@ -386,7 +386,7 @@ macro_rules! define_tables {
             ///
             /// # Errors
             /// This will only return [`cuprate_database::RuntimeError::Io`] on errors.
-            fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError>;
+            fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()>;
         }
 
         impl<'env, Ei> OpenTables<'env> for Ei
@@ -396,19 +396,19 @@ macro_rules! define_tables {
             type Ro<'tx> = <Ei as $crate::EnvInner<'env>>::Ro<'tx>;
             type Rw<'tx> = <Ei as $crate::EnvInner<'env>>::Rw<'tx>;
 
-            fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result<impl TablesIter, $crate::RuntimeError> {
+            fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult<impl TablesIter> {
                 Ok(($(
                     Self::open_db_ro::<[<$table:camel>]>(self, tx_ro)?,
                 )*))
             }
 
-            fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result<impl TablesMut, $crate::RuntimeError> {
+            fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<impl TablesMut> {
                 Ok(($(
                     Self::open_db_rw::<[<$table:camel>]>(self, tx_rw)?,
                 )*))
             }
 
-            fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError> {
+            fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()> {
                 let result = Ok(($(
                     Self::create_db::<[<$table:camel>]>(self, tx_rw),
                 )*));
diff --git a/storage/database/src/transaction.rs b/storage/database/src/transaction.rs
index 8f33983d..16d1c518 100644
--- a/storage/database/src/transaction.rs
+++ b/storage/database/src/transaction.rs
@@ -1,7 +1,7 @@
 //! Database transaction abstraction; `trait TxRo`, `trait TxRw`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use crate::error::RuntimeError;
+use crate::error::DbResult;
 
 //---------------------------------------------------------------------------------------------------- TxRo
 /// Read-only database transaction.
@@ -16,7 +16,7 @@ pub trait TxRo<'tx> {
     ///
     /// # Errors
     /// This operation will always return `Ok(())` with the `redb` backend.
-    fn commit(self) -> Result<(), RuntimeError>;
+    fn commit(self) -> DbResult<()>;
 }
 
 //---------------------------------------------------------------------------------------------------- TxRw
@@ -32,12 +32,12 @@ pub trait TxRw<'tx> {
     /// This operation will always return `Ok(())` with the `redb` backend.
     ///
     /// If `Env::MANUAL_RESIZE == true`,
-    /// [`RuntimeError::ResizeNeeded`] may be returned.
-    fn commit(self) -> Result<(), RuntimeError>;
+    /// [`crate::RuntimeError::ResizeNeeded`] may be returned.
+    fn commit(self) -> DbResult<()>;
 
     /// Abort the transaction, erasing any writes that have occurred.
     ///
     /// # Errors
     /// This operation will always return `Ok(())` with the `heed` backend.
-    fn abort(self) -> Result<(), RuntimeError>;
+    fn abort(self) -> DbResult<()>;
 }
diff --git a/storage/service/src/service/read.rs b/storage/service/src/service/read.rs
index 0ab68539..187ffa4c 100644
--- a/storage/service/src/service/read.rs
+++ b/storage/service/src/service/read.rs
@@ -7,7 +7,7 @@ use futures::channel::oneshot;
 use rayon::ThreadPool;
 use tower::Service;
 
-use cuprate_database::{ConcreteEnv, RuntimeError};
+use cuprate_database::{ConcreteEnv, DbResult, RuntimeError};
 use cuprate_helper::asynch::InfallibleOneshotReceiver;
 
 /// The [`rayon::ThreadPool`] service.
@@ -24,7 +24,7 @@ pub struct DatabaseReadService<Req, Res> {
     pool: Arc<ThreadPool>,
 
     /// The function used to handle request.
-    inner_handler: Arc<dyn Fn(Req) -> Result<Res, RuntimeError> + Send + Sync + 'static>,
+    inner_handler: Arc<dyn Fn(Req) -> DbResult<Res> + Send + Sync + 'static>,
 }
 
 // Deriving [`Clone`] means `Req` & `Res` need to be `Clone`, even if they aren't.
@@ -51,7 +51,7 @@ where
     pub fn new(
         env: Arc<ConcreteEnv>,
         pool: Arc<ThreadPool>,
-        req_handler: impl Fn(&ConcreteEnv, Req) -> Result<Res, RuntimeError> + Send + Sync + 'static,
+        req_handler: impl Fn(&ConcreteEnv, Req) -> DbResult<Res> + Send + Sync + 'static,
     ) -> Self {
         let inner_handler = Arc::new(move |req| req_handler(&env, req));
 
@@ -69,9 +69,9 @@ where
 {
     type Response = Res;
     type Error = RuntimeError;
-    type Future = InfallibleOneshotReceiver<Result<Self::Response, Self::Error>>;
+    type Future = InfallibleOneshotReceiver<DbResult<Self::Response>>;
 
-    fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+    fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<DbResult<()>> {
         Poll::Ready(Ok(()))
     }
 
diff --git a/storage/service/src/service/write.rs b/storage/service/src/service/write.rs
index 607c4aa6..6bcd7255 100644
--- a/storage/service/src/service/write.rs
+++ b/storage/service/src/service/write.rs
@@ -6,7 +6,7 @@ use std::{
 
 use futures::channel::oneshot;
 
-use cuprate_database::{ConcreteEnv, Env, RuntimeError};
+use cuprate_database::{ConcreteEnv, DbResult, Env, RuntimeError};
 use cuprate_helper::asynch::InfallibleOneshotReceiver;
 
 //---------------------------------------------------------------------------------------------------- Constants
@@ -26,8 +26,7 @@ pub struct DatabaseWriteHandle<Req, Res> {
     /// Sender channel to the database write thread-pool.
     ///
     /// We provide the response channel for the thread-pool.
-    pub(super) sender:
-        crossbeam::channel::Sender<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>,
+    pub(super) sender: crossbeam::channel::Sender<(Req, oneshot::Sender<DbResult<Res>>)>,
 }
 
 impl<Req, Res> Clone for DatabaseWriteHandle<Req, Res> {
@@ -48,7 +47,7 @@ where
     #[inline(never)] // Only called once.
     pub fn init(
         env: Arc<ConcreteEnv>,
-        inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result<Res, RuntimeError> + Send + 'static,
+        inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult<Res> + Send + 'static,
     ) -> Self {
         // Initialize `Request/Response` channels.
         let (sender, receiver) = crossbeam::channel::unbounded();
@@ -66,10 +65,10 @@ where
 impl<Req, Res> tower::Service<Req> for DatabaseWriteHandle<Req, Res> {
     type Response = Res;
     type Error = RuntimeError;
-    type Future = InfallibleOneshotReceiver<Result<Res, RuntimeError>>;
+    type Future = InfallibleOneshotReceiver<DbResult<Res>>;
 
     #[inline]
-    fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+    fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<DbResult<()>> {
         Poll::Ready(Ok(()))
     }
 
@@ -89,8 +88,8 @@ impl<Req, Res> tower::Service<Req> for DatabaseWriteHandle<Req, Res> {
 /// The main function of the writer thread.
 fn database_writer<Req, Res>(
     env: &ConcreteEnv,
-    receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>,
-    inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result<Res, RuntimeError>,
+    receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender<DbResult<Res>>)>,
+    inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult<Res>,
 ) where
     Req: Send + 'static,
     Res: Debug + Send + 'static,
diff --git a/storage/txpool/src/ops/key_images.rs b/storage/txpool/src/ops/key_images.rs
index 04aa1b44..76cae141 100644
--- a/storage/txpool/src/ops/key_images.rs
+++ b/storage/txpool/src/ops/key_images.rs
@@ -1,7 +1,7 @@
 //! Tx-pool key image ops.
 use monero_serai::transaction::Input;
 
-use cuprate_database::{DatabaseRw, RuntimeError};
+use cuprate_database::{DatabaseRw, DbResult};
 
 use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHash};
 
@@ -34,7 +34,7 @@ pub(super) fn add_tx_key_images(
 pub(super) fn remove_tx_key_images(
     inputs: &[Input],
     kis_table: &mut impl DatabaseRw<SpentKeyImages>,
-) -> Result<(), RuntimeError> {
+) -> DbResult<()> {
     for ki in inputs.iter().map(ki_from_input) {
         kis_table.delete(&ki)?;
     }
diff --git a/storage/txpool/src/ops/tx_read.rs b/storage/txpool/src/ops/tx_read.rs
index 55690750..24101f77 100644
--- a/storage/txpool/src/ops/tx_read.rs
+++ b/storage/txpool/src/ops/tx_read.rs
@@ -5,7 +5,7 @@ use std::sync::Mutex;
 
 use monero_serai::transaction::Transaction;
 
-use cuprate_database::{DatabaseRo, RuntimeError};
+use cuprate_database::{DatabaseRo, DbResult};
 use cuprate_types::{TransactionVerificationData, TxVersion};
 
 use crate::{
@@ -17,7 +17,7 @@ use crate::{
 pub fn get_transaction_verification_data(
     tx_hash: &TransactionHash,
     tables: &impl Tables,
-) -> Result<TransactionVerificationData, RuntimeError> {
+) -> DbResult<TransactionVerificationData> {
     let tx_blob = tables.transaction_blobs().get(tx_hash)?.0;
 
     let tx_info = tables.transaction_infos().get(tx_hash)?;
@@ -45,7 +45,7 @@ pub fn get_transaction_verification_data(
 pub fn in_stem_pool(
     tx_hash: &TransactionHash,
     tx_infos: &impl DatabaseRo<TransactionInfos>,
-) -> Result<bool, RuntimeError> {
+) -> DbResult<bool> {
     Ok(tx_infos
         .get(tx_hash)?
         .flags
diff --git a/storage/txpool/src/ops/tx_write.rs b/storage/txpool/src/ops/tx_write.rs
index dc5ab463..8f426fb7 100644
--- a/storage/txpool/src/ops/tx_write.rs
+++ b/storage/txpool/src/ops/tx_write.rs
@@ -4,7 +4,7 @@
 use bytemuck::TransparentWrapper;
 use monero_serai::transaction::{NotPruned, Transaction};
 
-use cuprate_database::{DatabaseRw, RuntimeError, StorableVec};
+use cuprate_database::{DatabaseRw, DbResult, StorableVec};
 use cuprate_types::TransactionVerificationData;
 
 use crate::{
@@ -67,10 +67,7 @@ pub fn add_transaction(
 }
 
 /// Removes a transaction from the transaction pool.
-pub fn remove_transaction(
-    tx_hash: &TransactionHash,
-    tables: &mut impl TablesMut,
-) -> Result<(), RuntimeError> {
+pub fn remove_transaction(tx_hash: &TransactionHash, tables: &mut impl TablesMut) -> DbResult<()> {
     // Remove the tx blob from table 0.
     let tx_blob = tables.transaction_blobs_mut().take(tx_hash)?.0;
 
diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs
index 0de1e7d0..44a29b3c 100644
--- a/storage/txpool/src/service/read.rs
+++ b/storage/txpool/src/service/read.rs
@@ -11,7 +11,7 @@ use std::{
 
 use rayon::ThreadPool;
 
-use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError};
+use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError};
 use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads};
 
 use crate::{
@@ -137,7 +137,7 @@ fn filter_known_tx_blob_hashes(
 
     // A closure that returns `true` if a tx with a certain blob hash is unknown.
     // This also fills in `stem_tx_hashes`.
-    let mut tx_unknown = |blob_hash| -> Result<bool, RuntimeError> {
+    let mut tx_unknown = |blob_hash| -> DbResult<bool> {
         match tx_blob_hashes.get(&blob_hash) {
             Ok(tx_hash) => {
                 if in_stem_pool(&tx_hash, &tx_infos)? {
diff --git a/storage/txpool/src/service/types.rs b/storage/txpool/src/service/types.rs
index 5c6b97ce..af1ca98b 100644
--- a/storage/txpool/src/service/types.rs
+++ b/storage/txpool/src/service/types.rs
@@ -2,7 +2,7 @@
 //!
 //! Only used internally for our [`tower::Service`] impls.
 
-use cuprate_database::RuntimeError;
+use cuprate_database::DbResult;
 use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle};
 
 use crate::service::interface::{
@@ -12,7 +12,7 @@ use crate::service::interface::{
 /// The actual type of the response.
 ///
 /// Either our [`TxpoolReadResponse`], or a database error occurred.
-pub(super) type ReadResponseResult = Result<TxpoolReadResponse, RuntimeError>;
+pub(super) type ReadResponseResult = DbResult<TxpoolReadResponse>;
 
 /// The transaction pool database write service.
 pub type TxpoolWriteHandle = DatabaseWriteHandle<TxpoolWriteRequest, TxpoolWriteResponse>;
diff --git a/storage/txpool/src/service/write.rs b/storage/txpool/src/service/write.rs
index 13ab81fa..23c5a8a4 100644
--- a/storage/txpool/src/service/write.rs
+++ b/storage/txpool/src/service/write.rs
@@ -1,6 +1,8 @@
 use std::sync::Arc;
 
-use cuprate_database::{ConcreteEnv, DatabaseRo, DatabaseRw, Env, EnvInner, RuntimeError, TxRw};
+use cuprate_database::{
+    ConcreteEnv, DatabaseRo, DatabaseRw, DbResult, Env, EnvInner, RuntimeError, TxRw,
+};
 use cuprate_database_service::DatabaseWriteHandle;
 use cuprate_types::TransactionVerificationData;
 
@@ -25,7 +27,7 @@ pub(super) fn init_write_service(env: Arc<ConcreteEnv>) -> TxpoolWriteHandle {
 fn handle_txpool_request(
     env: &ConcreteEnv,
     req: &TxpoolWriteRequest,
-) -> Result<TxpoolWriteResponse, RuntimeError> {
+) -> DbResult<TxpoolWriteResponse> {
     match req {
         TxpoolWriteRequest::AddTransaction { tx, state_stem } => {
             add_transaction(env, tx, *state_stem)
@@ -50,7 +52,7 @@ fn add_transaction(
     env: &ConcreteEnv,
     tx: &TransactionVerificationData,
     state_stem: bool,
-) -> Result<TxpoolWriteResponse, RuntimeError> {
+) -> DbResult<TxpoolWriteResponse> {
     let env_inner = env.env_inner();
     let tx_rw = env_inner.tx_rw()?;
 
@@ -83,7 +85,7 @@ fn add_transaction(
 fn remove_transaction(
     env: &ConcreteEnv,
     tx_hash: &TransactionHash,
-) -> Result<TxpoolWriteResponse, RuntimeError> {
+) -> DbResult<TxpoolWriteResponse> {
     let env_inner = env.env_inner();
     let tx_rw = env_inner.tx_rw()?;
 
@@ -105,10 +107,7 @@ fn remove_transaction(
 }
 
 /// [`TxpoolWriteRequest::Promote`]
-fn promote(
-    env: &ConcreteEnv,
-    tx_hash: &TransactionHash,
-) -> Result<TxpoolWriteResponse, RuntimeError> {
+fn promote(env: &ConcreteEnv, tx_hash: &TransactionHash) -> DbResult<TxpoolWriteResponse> {
     let env_inner = env.env_inner();
     let tx_rw = env_inner.tx_rw()?;
 
@@ -134,10 +133,7 @@ fn promote(
 }
 
 /// [`TxpoolWriteRequest::NewBlock`]
-fn new_block(
-    env: &ConcreteEnv,
-    spent_key_images: &[KeyImage],
-) -> Result<TxpoolWriteResponse, RuntimeError> {
+fn new_block(env: &ConcreteEnv, spent_key_images: &[KeyImage]) -> DbResult<TxpoolWriteResponse> {
     let env_inner = env.env_inner();
     let tx_rw = env_inner.tx_rw()?;
 
diff --git a/test-utils/src/data/constants.rs b/test-utils/src/data/constants.rs
index fff04416..78413edf 100644
--- a/test-utils/src/data/constants.rs
+++ b/test-utils/src/data/constants.rs
@@ -104,7 +104,7 @@ macro_rules! const_tx_blob {
         hash: $hash:literal, // Transaction hash
         data_path: $data_path:literal, // Path to the transaction blob
         version: $version:literal, // Transaction version
-        timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`)
+        timelock: $timelock:expr_2021, // Transaction's timelock (use the real type `Timelock`)
         input_len: $input_len:literal, // Amount of inputs
         output_len: $output_len:literal, // Amount of outputs
     ) => {
diff --git a/test-utils/src/rpc/data/macros.rs b/test-utils/src/rpc/data/macros.rs
index 63a214c6..5f87c53a 100644
--- a/test-utils/src/rpc/data/macros.rs
+++ b/test-utils/src/rpc/data/macros.rs
@@ -25,11 +25,11 @@ macro_rules! define_request_and_response {
 
         // The request type (and any doc comments, derives, etc).
         $( #[$request_attr:meta] )*
-        Request = $request:expr;
+        Request = $request:expr_2021;
 
         // The response type (and any doc comments, derives, etc).
         $( #[$response_attr:meta] )*
-        Response = $response:expr;
+        Response = $response:expr_2021;
     ) => { paste::paste! {
         #[doc = $crate::rpc::data::macros::define_request_and_response_doc!(
             "response" => [<$name:upper _RESPONSE>],
diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs
index 258d526f..7518935d 100644
--- a/types/src/blockchain.rs
+++ b/types/src/blockchain.rs
@@ -11,9 +11,7 @@ use std::{
 use monero_serai::block::Block;
 
 use crate::{
-    types::{
-        Chain, ExtendedBlockHeader, MissingTxsInBlock, OutputOnChain, VerifiedBlockInformation,
-    },
+    types::{Chain, ExtendedBlockHeader, OutputOnChain, TxsInBlock, VerifiedBlockInformation},
     AltBlockInformation, BlockCompleteEntry, ChainId, ChainInfo, CoinbaseTxSum,
     OutputHistogramEntry, OutputHistogramInput,
 };
@@ -122,7 +120,7 @@ pub enum BlockchainReadRequest {
     FindFirstUnknown(Vec<[u8; 32]>),
 
     /// A request for transactions from a specific block.
-    MissingTxsInBlock {
+    TxsInBlock {
         /// The block to get transactions from.
         block_hash: [u8; 32],
         /// The indexes of the transactions from the block.
@@ -287,10 +285,10 @@ pub enum BlockchainResponse {
 
     /// Response to [`BlockchainReadRequest::NextChainEntry`].
     ///
-    /// If all blocks were unknown `start_height` will be `0`, the other fields will be meaningless.
+    /// If all blocks were unknown `start_height` will be [`None`], the other fields will be meaningless.
     NextChainEntry {
-        /// The start height of this entry, `0` if we could not find the split point.
-        start_height: usize,
+        /// The start height of this entry, [`None`] if we could not find the split point.
+        start_height: Option<std::num::NonZero<usize>>,
         /// The current chain height.
         chain_height: usize,
         /// The next block hashes in the entry.
@@ -310,10 +308,10 @@ pub enum BlockchainResponse {
     /// This will be [`None`] if all blocks were known.
     FindFirstUnknown(Option<(usize, usize)>),
 
-    /// The response for [`BlockchainReadRequest::MissingTxsInBlock`].
+    /// The response for [`BlockchainReadRequest::TxsInBlock`].
     ///
     /// Will return [`None`] if the request contained an index out of range.
-    MissingTxsInBlock(Option<MissingTxsInBlock>),
+    TxsInBlock(Option<TxsInBlock>),
 
     /// The response for [`BlockchainReadRequest::AltBlocksInChain`].
     ///
diff --git a/types/src/lib.rs b/types/src/lib.rs
index 51d37d6c..7aaf0b9e 100644
--- a/types/src/lib.rs
+++ b/types/src/lib.rs
@@ -26,7 +26,7 @@ pub use transaction_verification_data::{
 pub use types::{
     AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum,
     ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry,
-    MissingTxsInBlock, OutputHistogramEntry, OutputHistogramInput, OutputOnChain,
+    OutputHistogramEntry, OutputHistogramInput, OutputOnChain, TxsInBlock,
     VerifiedBlockInformation, VerifiedTransactionInformation,
 };
 
diff --git a/types/src/types.rs b/types/src/types.rs
index ebb02c56..8a5b5aad 100644
--- a/types/src/types.rs
+++ b/types/src/types.rs
@@ -259,9 +259,9 @@ pub struct AddAuxPow {
     pub aux_pow: Vec<AuxPow>,
 }
 
-/// The inner response for a request for missing txs.
+/// The inner response for a request for txs in a block.
 #[derive(Clone, Debug, PartialEq, Eq)]
-pub struct MissingTxsInBlock {
+pub struct TxsInBlock {
     pub block: Vec<u8>,
     pub txs: Vec<Vec<u8>>,
 }
diff --git a/zmq/types/Cargo.toml b/zmq/types/Cargo.toml
new file mode 100644
index 00000000..78e7d00a
--- /dev/null
+++ b/zmq/types/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "cuprate-zmq-types"
+version = "0.1.0"
+edition = "2021"
+description = "Types for the ZMQ Pub/Sub API"
+license = "MIT"
+authors = ["dimalinux"]
+repository = "https://github.com/Cuprate/cuprate/tree/main/zmq/types"
+
+[dependencies]
+serde = { workspace = true, features = ["derive"] }
+hex = { workspace = true, features = ["std", "serde"] }
+cuprate-types = { workspace = true, features = ["hex"] }
+
+[dev-dependencies]
+serde_json = { workspace = true, features = ["std"] }
+assert-json-diff = "2.0.2"
+
+[lints]
+workspace = true
diff --git a/zmq/types/src/json_message_types.rs b/zmq/types/src/json_message_types.rs
new file mode 100644
index 00000000..2699600f
--- /dev/null
+++ b/zmq/types/src/json_message_types.rs
@@ -0,0 +1,646 @@
+//! Objects for JSON serialization and deserialization in message bodies of
+//! the ZMQ pub/sub interface. Handles JSON for the following subscriptions:
+//! * `json-full-txpool_add` (`Vec<TxPoolAdd>`)
+//! * `json-minimal-txpool_add` (`Vec<TxPoolAddMin>`)
+//! * `json-full-chain_main` (`Vec<ChainMain>`)
+//! * `json-minimal-chain_main` (`ChainMainMin`)
+//! * `json-full-miner_data` (`MinerData`)
+use cuprate_types::hex::HexBytes;
+use serde::{Deserialize, Serialize};
+
+/// ZMQ `json-full-txpool_add` packets contain an array of `TxPoolAdd`.
+///
+/// Each `TxPoolAdd` object represents a new transaction in the mempool that was
+/// not previously seen in a block. Miner coinbase transactions *are not*
+/// included. `do-not-relay` transactions *are* included. Values are not
+/// republished during a re-org.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct TxPoolAdd {
+    /// transaction version number. `2` indicates Ring CT (all sub-variants).
+    pub version: u8,
+    /// if not `0` and less than `500_000_000`, this is the block height when
+    /// transaction output(s) are spendable; if >= `500_000_000` this is roughly
+    /// the unix epoch block timestamp when the output(s) are spendable.
+    pub unlock_time: u64,
+    /// transaction inputs (key images) with separate rings for each input
+    pub inputs: Vec<PoolInput>,
+    /// transaction outputs
+    pub outputs: Vec<Output>,
+    /// extra data for the transaction with variable size, but limited to `1060`
+    /// bytes (`2120` hex nibbles).
+    #[serde(with = "hex::serde")]
+    pub extra: Vec<u8>,
+    /// obsolete, empty array in JSON
+    signatures: [Obsolete; 0],
+    /// ring confidential transaction data
+    pub ringct: PoolRingCt,
+}
+
+/// ZMQ `json-minimal-txpool_add` subscriber messages contain an array of
+/// `TxPoolAddMin` JSON objects. See `TxPoolAdd` for information on which
+/// transactions are published to subscribers.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct TxPoolAddMin {
+    /// transaction ID
+    pub id: HexBytes<32>,
+    /// size of the full transaction blob
+    pub blob_size: u64,
+    /// metric used to calculate transaction fee
+    pub weight: u64,
+    /// mining fee included in the transaction in piconeros
+    pub fee: u64,
+}
+
+/// ZMQ `json-full-chain_main` subscriber messages contain an array of
+/// `ChainMain` JSON objects. Each `ChainMain` object represents a new block.
+/// Push messages only contain more than one block if a re-org occurred.
+#[derive(Debug, Serialize, Deserialize)]
+pub struct ChainMain {
+    /// major version of the monero protocol at this block's height
+    pub major_version: u8,
+    /// minor version of the monero protocol at this block's height
+    pub minor_version: u8,
+    /// epoch time, decided by the miner, at which the block was mined
+    pub timestamp: u64,
+    /// block id of the previous block
+    pub prev_id: HexBytes<32>,
+    /// cryptographic random one-time number used in mining a Monero block
+    pub nonce: u32,
+    /// coinbase transaction information
+    pub miner_tx: MinerTx,
+    /// non-coinbase transaction IDs in the block (can be empty)
+    pub tx_hashes: Vec<HexBytes<32>>,
+}
+
+/// ZMQ `json-minimal-chain_main` subscriber messages contain a single
+/// `ChainMainMin` JSON object. Unlike the full version, only the topmost
+/// block is sent in the case of a re-org.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct ChainMainMin {
+    /// height of the block
+    pub first_height: u64,
+    /// block id of the previous block
+    pub first_prev_id: HexBytes<32>,
+    /// block ID of the current block is the 0th entry; additional block IDs
+    /// will only be included if this is the topmost block of a re-org.
+    pub ids: Vec<HexBytes<32>>,
+}
+
+/// ZMQ `json-full-miner_data` subscriber messages contain a single
+/// `MinerData` object that provides the necessary data to create a
+/// custom block template. There is no min version of this object.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct MinerData {
+    /// major version of the monero protocol for the next mined block
+    pub major_version: u8,
+    /// height on which to mine
+    pub height: u64,
+    /// block id of the most recent block on which to mine the next block
+    pub prev_id: HexBytes<32>,
+    /// hash of block to use as seed for Random-X proof-of-work
+    pub seed_hash: HexBytes<32>,
+    /// least-significant 64 bits of the 128-bit network difficulty
+    #[serde(with = "hex_difficulty")]
+    pub difficulty: u64,
+    /// median adjusted block size of the latest 100000 blocks
+    pub median_weight: u64,
+    /// fixed at `u64::MAX` in perpetuity as Monero has already reached tail emission
+    pub already_generated_coins: u64,
+    /// mineable mempool transactions
+    pub tx_backlog: Vec<TxBacklog>,
+}
+
+/// Holds a single input for the `TxPoolAdd` `inputs` array.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct PoolInput {
+    pub to_key: ToKey,
+}
+
+/// Same as `PoolInput` (adds an extra JSON name layer)
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct ToKey {
+    /// obsolete field (always 0), non-coinbase TX amounts are now encrypted
+    amount: u64,
+    /// integer offsets for ring members
+    pub key_offsets: Vec<u64>,
+    /// key image for the given input
+    pub key_image: HexBytes<32>,
+}
+
+/// Holds the block height of the coinbase transaction.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct MinerInput {
+    /// namespace layer around the block height
+    pub r#gen: Gen,
+}
+
+/// Additional namespace layer around the block height in `ChainMain`; gen is
+/// another name for a coinbase transaction
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct Gen {
+    /// block height when the coinbase transaction was created
+    pub height: u64,
+}
+
+/// Transaction output data used by both `TxPoolAdd` and `MinerTx`
+#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)]
+pub struct Output {
+    /// zero for non-coinbase transactions which use encrypted amounts or
+    /// an amount in piconeros for coinbase transactions
+    pub amount: u64,
+    /// public key of the output destination
+    pub to_tagged_key: ToTaggedKey,
+}
+
+/// Holds the public key of an output destination with its view tag.
+#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)]
+pub struct ToTaggedKey {
+    /// public key used to indicate the destination of a transaction output
+    pub key: HexBytes<32>,
+    /// 1st byte of a shared secret used to reduce wallet synchronization time
+    pub view_tag: HexBytes<1>,
+}
+
+/// Ring CT information used inside `TxPoolAdd`
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct PoolRingCt {
+    /// ring CT type; `6` is CLSAG Bulletproof Plus
+    pub r#type: u8,
+    /// encrypted amount values of the transaction outputs
+    pub encrypted: Vec<Encrypted>,
+    /// Ring CT commitments, 1 per transaction input
+    pub commitments: Vec<HexBytes<32>>,
+    /// mining fee in piconeros
+    pub fee: u64,
+    /// data to validate the transaction that can be pruned from older blocks
+    pub prunable: Prunable,
+}
+
+/// Ring CT information used inside `MinerTx`. Miner coinbase transactions don't
+/// use Ring CT, so this only holds a block height.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+struct MinerRingCt {
+    /// always zero to indicate that Ring CT is not used
+    r#type: u8,
+}
+
+/// Holds the encrypted amount of a non-coinbase transaction output.
+#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)]
+pub struct Encrypted {
+    /// obsolete field, but present as zeros in JSON; this does not represent
+    /// the newer deterministically derived mask
+    mask: HexBytes<32>,
+    /// encrypted amount of the transaction output
+    pub amount: HexBytes<32>,
+}
+
+/// Data needed to validate a transaction that can optionally be pruned from
+/// older blocks.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct Prunable {
+    /// obsolete, empty array in JSON
+    range_proofs: [Obsolete; 0],
+    /// obsolete, empty array in JSON
+    bulletproofs: [Obsolete; 0],
+    /// Bulletproofs+ data used to validate a Ring CT transaction
+    pub bulletproofs_plus: [BulletproofPlus; 1],
+    /// obsolete, empty array in JSON
+    mlsags: [Obsolete; 0],
+    /// CLSAG signatures; 1 per transaction input
+    pub clsags: Vec<Clsag>,
+    /// Ring CT pseudo output commitments; 1 per transaction input (*not*
+    /// output)
+    pub pseudo_outs: Vec<HexBytes<32>>,
+}
+
+/// Bulletproofs+ data used to validate the legitimacy of a Ring CT transaction.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+#[expect(non_snake_case)]
+pub struct BulletproofPlus {
+    pub V: Vec<HexBytes<32>>,
+    pub A: HexBytes<32>,
+    pub A1: HexBytes<32>,
+    pub B: HexBytes<32>,
+    pub r1: HexBytes<32>,
+    pub s1: HexBytes<32>,
+    pub d1: HexBytes<32>,
+    pub L: Vec<HexBytes<32>>,
+    pub R: Vec<HexBytes<32>>,
+}
+
+/// Placeholder element type so obsolete fields can be deserialized
+/// to the empty vector for backwards compatibility.
+#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
+struct Obsolete;
+
+/// CLSAG signature fields
+#[expect(non_snake_case)]
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct Clsag {
+    pub s: Vec<HexBytes<32>>,
+    pub c1: HexBytes<32>,
+    pub D: HexBytes<32>,
+}
+
+/// Part of the new block information in `ChainMain`
+#[derive(Debug, Serialize, Deserialize)]
+pub struct MinerTx {
+    /// transaction version number
+    pub version: u8,
+    /// block height when the coinbase transaction becomes spendable (currently
+    /// 60 blocks above the coinbase transaction height)
+    pub unlock_time: u64,
+    /// contains the block height in `inputs[0].gen.height` and nothing else as
+    /// coinbase transactions have no inputs
+    pub inputs: [MinerInput; 1],
+    /// transaction outputs
+    pub outputs: Vec<Output>,
+    /// extra data for the transaction with variable size; not limited to `1060`
+    /// bytes like the extra field of non-coinbase transactions
+    #[serde(with = "hex::serde")]
+    pub extra: Vec<u8>,
+    /// obsolete, empty array in JSON
+    signatures: [Obsolete; 0],
+    /// only for JSON compatibility; miners' don't use Ring CT
+    ringct: MinerRingCt,
+}
+
+/// Holds a transaction entry in the `MinerData` `tx_backlog` field.
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct TxBacklog {
+    /// transaction ID
+    pub id: HexBytes<32>,
+    /// metric used to calculate transaction fee
+    pub weight: u64,
+    /// mining fee in piconeros
+    pub fee: u64,
+}
+
+mod hex_difficulty {
+    //! Serializes the u64 difficulty field of `MinerData` in the same ways as
+    //! monerod. The difficulty value is inside a string, in big-endian hex, and
+    //! has a 0x prefix with no leading zeros.
+    use serde::{Deserialize, Deserializer, Serializer};
+
+    #[expect(clippy::trivially_copy_pass_by_ref)]
+    pub(super) fn serialize<S>(difficulty: &u64, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        serializer.serialize_str(&format!("0x{difficulty:x}"))
+    }
+
+    pub(super) fn deserialize<'de, D>(deserializer: D) -> Result<u64, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        let s = String::deserialize(deserializer)?;
+        let s = s.strip_prefix("0x").unwrap_or(&s);
+        u64::from_str_radix(s, 16).map_err(serde::de::Error::custom)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use assert_json_diff::assert_json_eq;
+    use serde_json::{self, json};
+
+    use super::*;
+
+    #[test]
+    fn test_txpooladd_json() {
+        let json1 = json!([
+          {
+            "version": 2,
+            "unlock_time": 0,
+            "inputs": [
+              {
+                "to_key": {
+                  "amount": 0,
+                  "key_offsets": [
+                    82773133,
+                    30793552,
+                    578803,
+                    620532,
+                    114291,
+                    291870,
+                    111275,
+                    86455,
+                    19769,
+                    1238,
+                    15164,
+                    11374,
+                    5240,
+                    3547,
+                    7423,
+                    4198
+                  ],
+                  "key_image": "89c060b57bba20c0b795bda4b618749e04eba5b40b30062b071dff6e8dd9071d"
+                }
+              }
+            ],
+            "outputs": [
+              {
+                "amount": 0,
+                "to_tagged_key": {
+                  "key": "05b4ff4c3ced6ba078a078af8fee5916512a1893f2b6d9373fb90e0eb4040095",
+                  "view_tag": "7a"
+                }
+              },
+              {
+                "amount": 0,
+                "to_tagged_key": {
+                  "key": "60250376bca49bf24cef45c12738b86347df10954cd35630e81b90bf01e922af",
+                  "view_tag": "b8"
+                }
+              }
+            ],
+            "extra": "01154b87b3334ce9f99d04635eae4e31252a20ba22acb96ff0764a03dc91d203ed020901be80cbce0723d0b4",
+            "signatures": [],
+            "ringct": {
+              "type": 6,
+              "encrypted": [
+                {
+                  "mask": "0000000000000000000000000000000000000000000000000000000000000000",
+                  "amount": "a956be1858615454000000000000000000000000000000000000000000000000"
+                },
+                {
+                  "mask": "0000000000000000000000000000000000000000000000000000000000000000",
+                  "amount": "72972be61af1210b000000000000000000000000000000000000000000000000"
+                }
+              ],
+              "commitments": [
+                "cc2a17e43f0b183235a06e8582fcaaa7c21a07732077e66d4dcfaa0db691ea20",
+                "04e3cd1d3430bb7a1d9ede5ce9ec0ef2f6f9dd9fd31fb95c9e0b3148f1a660c8"
+              ],
+              "fee": 30660000,
+              "prunable": {
+                "range_proofs": [],
+                "bulletproofs": [],
+                "bulletproofs_plus": [
+                  {
+                    "V": [
+                      "0196c1e9ba57ae053ae19c1bfd49e13146bd4b6e49401582f8a5a6f65ae560d0",
+                      "aecd14b0e2d788315023601947c12d7e9227d8a1a0aee41f0b34fe196d96119f"
+                    ],
+                    "A": "8011fb75ba56d16b1ef1193e1fdfdb81e6b83afd726087427163857e8fcdf08e",
+                    "A1": "ab91ab6863fbdee1fb71791e5297d007269f1b2cc050df40628ee7d0a1a5f3cb",
+                    "B": "df1d082111b51d479b7fa72f6363bb731207c9343a528dc05b5798af56702521",
+                    "r1": "2e212ae9ad704611a39b9b242453d2408045b303738b51d6f88f9dba06233401",
+                    "s1": "36be53973fd971edff1f43cc5d04dda78d2b01f4caeaf38bbe195b04e309b30d",
+                    "d1": "592116ca54b2d3ca0e9f222ffcc5fd63d3c992470473911fc70822f37672350a",
+                    "L": [
+                      "98f1e11d62b90c665a8a96fb1b10332e37a790ea1e01a9e8ec8de74b7b27b0df",
+                      "3a14689f3d743a3be719df9af28ca2f0f398e3a2731d5d6f342d0485bf81a525",
+                      "bcb9e389fd494db66e4c796ff03795daa131426c0776ded6d37bfae51f29623d",
+                      "5aa7e1f2bfcfcd74ac8305ce59a7baf5a901f84f8fbdd3a2d639e4058f35e98b",
+                      "5939aa7ea012f88a26bab20270ce5b164c1880f793dc249ec215a0783b4d4ca7",
+                      "08286f78d1bb0d7fc2efc7a3ac314707a4a1ac9656656d496180e131c1748496",
+                      "7fc1de780305601aab95fda4b005927a4643f222e28407c31ad46cc935b7a27c"
+                    ],
+                    "R": [
+                      "69b4f329c0a5f8ae05891ac5ac35b947a7442b66e5b5693c99435deac3a62662",
+                      "a193038cb8dc9d22abe6577fe44271c1693176cb636f9d101723670fb5ca5cda",
+                      "90670e7083e503c2989b6548500234740dabf3451b0bd376979e03ca0cb5e50c",
+                      "6ab149089f73799811f631eab272bd6c8f190f38efff4d49577364956d0148bf",
+                      "62f2178cbdc760a0d3787b5fd42161c3c98394c2ff2b88efc039df59d2116e5d",
+                      "536f91da278f730f2524260d2778dc5959d40a5c724dd789d35bbd309eabd933",
+                      "e47c5c8181e692f3ad91733e7d9a52f8b7e3f5016c5e65f789eea367a13f16cd"
+                    ]
+                  }
+                ],
+                "mlsags": [],
+                "clsags": [
+                  {
+                    "s": [
+                      "f70840a8d65da85e962d2ce5ed1293ae3de83318b464363db85505d99e317b01",
+                      "b7c1125be139b4ed201ce85b8453920306cac7c5da11e0f8c0fd7702f15c6a06",
+                      "5a04335699f5a816eed1cab79085814dbcf3be5cef51b078b1c3e0210bbba606",
+                      "e4743e114fd6352ea29e0b48ac96688edaba1d5d0634c34301756902eeb1fb0e",
+                      "34aae87ab091082356d2815a7c8e973124245ebc6d163b9f01fbfeb360edcf04",
+                      "d2d0b6ddb44ed42096affec08ea9cd77d2c7cdc5b2e1e964f836d3717640ec00",
+                      "79b34258c8be04ddd955389f7ee3b912286c23492c519a5687b81d770619620e",
+                      "3c889c19693463160d6c7e642c46f5d41db052ee3358c7dcb4826f48bca26607",
+                      "da04927a438fd0d9674e64f0c016f30fde27f251d3466f29dcd5b3d757fec90c",
+                      "f3e08d83b11ca6529bc18748d3f732c325fca8ff79f69f0ed754bcd529898102",
+                      "f00d7125909a9a8cc5283ffc7727fce945e85828459eecb836c7aedca414350e",
+                      "0a635a193af37be1c9519309f25eaf9f37b7bc5892864646d8d2a2187fcec601",
+                      "0c4154d575dff3699bd41f0c354601de6535161755bd2164526076f37e2c6908",
+                      "f7b21e2698333285ea10a95edbe80fe0bb8740c30b35c25bd2002e3693867e02",
+                      "a637f338ff2ed65fa96e5529abc575fc2a35ed1a3f62a9e7be495069d8438800",
+                      "f7c355f1c3a663978c5fe1c9337aabd4085ee537a61eec2c5c1e837cb3728c09"
+                    ],
+                    "c1": "c5dd25e0e32dbefa6ac1d0dc9072620eb97a99224462cdd163287f2b60b9810b",
+                    "D": "c4fa3f939ccf02e4c8842cbd417cf3690421986e558734a0a029f8a86d2791a8"
+                  }
+                ],
+                "pseudo_outs": [
+                  "bcb08920f5476d74294aeb89c8001123bffd2f2ab84e105d553b807674c595ce"
+                ]
+              }
+            }
+          }
+        ]);
+
+        let tx_pool_adds: Vec<TxPoolAdd> = serde_json::from_value(json1.clone()).unwrap();
+        let json2 = serde_json::to_value(&tx_pool_adds).unwrap();
+        assert_json_eq!(json1, json2);
+    }
+
+    #[test]
+    fn test_txpooladd_min_json() {
+        let json1 = json!([
+          {
+            "id": "b5086746e805d875cbbbbb49e19aac29d9b75019f656fab8516cdf64ac5cd346",
+            "blob_size": 1533,
+            "weight": 1533,
+            "fee": 30660000
+          }
+        ]);
+
+        let tx_pool_adds: Vec<TxPoolAddMin> = serde_json::from_value(json1.clone()).unwrap();
+        let json2 = serde_json::to_value(&tx_pool_adds).unwrap();
+        assert_json_eq!(json1, json2);
+    }
+
+    #[test]
+    fn test_chain_main_json() {
+        let json1 = json!([
+          {
+            "major_version": 16,
+            "minor_version": 16,
+            "timestamp": 1726973843,
+            "prev_id": "ce3731311b7e4c1e58a2fe902dbb5c60bb2c0decc163d5397fa52a260d7f09c1",
+            "nonce": 537273946,
+            "miner_tx": {
+              "version": 2,
+              "unlock_time": 3242818,
+              "inputs": [
+                {
+                  "gen": {
+                    "height": 3242758
+                  }
+                }
+              ],
+              "outputs": [
+                {
+                  "amount": 618188180000_u64,
+                  "to_tagged_key": {
+                    "key": "83faf44df7e9fb4cf54a8dd6a63868507d1a1896bdb35ea9110d739d5da6cf21",
+                    "view_tag": "38"
+                  }
+                }
+              ],
+              "extra": "010e3356a86dbb339354afbc693408dfe8648bffd0b276e6a431861eb73643d88d02115162e362c98e2d00000000000000000000",
+              "signatures": [],
+              "ringct": {
+                "type": 0
+              }
+            },
+            "tx_hashes": [
+              "2c1b67d3f10b21270cac116e6d5278dc4024ee2d727e4ad56d6dedb1abc0270c",
+              "c2cfec0de23229a2ab80ca464cef66fc1cad53647a444f048834ec236c38c867",
+              "03c7649af2373c0f739d3c2eff9ee1580986b460d2abdd5e2aa332281e52da7e",
+              "1e0834cc658599e786040bdcd9b589a5e8d975233b72279d04ece1a3dd5572b0",
+              "ba65c30150e906a8799ee99bb2e6481873e42ed8b025cf967c5798528ddc81b4",
+              "6fc7b1da1cf433edafb142173e9ac13fe05142a36d8a72e9efdf7a3b94da11d6",
+              "847c06dcda4540d45cae868d4d031781bd87d9bfa4b2186a611428f52e68ccee",
+              "79f87a1b2fc17295d2cf25b6a65dd17fd8630829ee50f9c48f15e4a24e72d872",
+              "32b4f7ce6d864006b274dbd73fc8058151d0fd2dd0bb4b423120e32451fd59eb",
+              "430fe7fa00b63b68b301a4e4810bef2b5be1f651dba8c360e86eac61227382e7",
+              "9f8d2bf5e39071abccb336404ea72ab85cb731500a1d386a3bf537b9046df29d",
+              "f63893b8226ca28c290cb65541d60c1675dda1e2a77a629b6b2d7c3886240b23",
+              "ee8608b6e80cce14beaf95f747f4da8e40e40a49ad1adc20038843a6da3df3c6",
+              "05783765c150ed1e46d6380726e7ca1f788305754e553f5f2d49b9f09aaaf88d",
+              "20b4b95e62f45b72014d6ab14edb0b31e273cdc8c8d106068dd32ef6e92fc0a2",
+              "9230fb0a9dce8e2ca7e109ebf3480838251691de8ed73ea91f74723c5cf19bac",
+              "d59cf84a25f56ec0f1352bb05645efe9b9326598c4f7c5bc39a87eb7a20c48fc",
+              "465deb73c48a460df71861d61666dabb906648035a1fecfd0e988ee37616c655",
+              "5767bc633729ba4555561510f3db739431b16744234dcd549a0d346eaa6685b1",
+              "2c8d9af5d5774de96e67835ac5adbc6ca5579125b08bc907b395645eea6410ec",
+              "d385c884a0687c3360725dd3a3f6acf6f64bf38d8eeea1644d80bc23b13ee870",
+              "b2bc7e9fa9c1da08a8b6ee58505611c05bc388fd30aece00e9a0041470f7e950",
+              "69a4a79b50d42d372e91c6608c2652d1d5ddd343526c387ef6cf1e3c158b1765",
+              "ef508dfa79bbedd226835c42a9d000a64cc4abe0250c9aa55fd968224e2b45c3",
+              "0413c3b3fc621c472e10a102d77456db506f0df10a909833aed0c6738fb31eeb",
+              "e0c52d6d649c2f1abce4c6ffce4dd75a23308afbb6abe33af53da232c40caf5f",
+              "cd1fd68d2a15002ca6236083ff6ae165c8fd922f410da79640a4342fd8ebd1c8",
+              "ba746f80ca4ee496f4188ba278f1ed69a913238450d52bd2e2f3d3bf6fdd43d3",
+              "13c964bc13a55621b7bbbfe9a6d703536d951bfa19eedee93dd1286020959021",
+              "41a6f8d0df227a401a9bd6f5c0fbc21ed89f515ea5c8434a087e8b880080ee1f",
+              "41c2b5994284790b1ee158f7b87aa1231c14975d6456a91ff6f93c6f81277965",
+              "7e6b7f169cc6cab88e652771157cf8c2eb6f69dffb6939a79b34c6554fe6c00b",
+              "619517d9d138bf95c6b77eb801526b8419616de2b8618ccfd3b6d1c10364bc64",
+              "52cca64fb20fc2f6d06034a1a2d9b5665972ebc2569ec69f8d473caada309add",
+              "219c106d09da5a27b339ea0f070da090779b31ef9ccfa90d6d25e7388341eff9",
+              "e07ce6e96e73cff80c9cc4c1b349ad1ef53cff210b876d4e7afd89fcc8b2e5dd",
+              "e98f2a00b2892cd65c0252d956d88a4bb8024c7db98ca003c127b097f097f276",
+              "ed81aa398071fe495e37095e51ff50053e132bd11f27ba9c06ac4bf4063b756f",
+              "667d29a0cefa311e06fcfc22c98ef75edf81deb6c8a812492eb255a049c826db",
+              "8b16e8cbc1765247456bd67a3106498f686401b7529dc0f6b03360caf8671135",
+              "013e443e63259748f6d1a5653374826618ba066b7febcf55c829333f0dd9a6c3",
+              "517a05d82de59a973eb4d343c45558841c9165ccd75ca7c9d2e1a35f80c26c15",
+              "af74d5dd44cfed8f40f853a6fc405dae23d547482296f8dbbc13c1aed2c3d8c5",
+              "b5086746e805d875cbbbbb49e19aac29d9b75019f656fab8516cdf64ac5cd346",
+              "cfcda18d058656797a1272b384774dcfc26a504a24298aa49ba060eb6b4a19e0",
+              "1f380660a99030cc45f85ba8ee0e0541035c0fde719c84aa692796328974c9dd",
+              "53127181a0301a27b3a2749dc997556b211d949a99aa34d1c52d5c54220f49d2",
+              "5d50a66df97f4decc4ecc3f5030589ef966d5af84a995f7fb14f1c02ae9704db",
+              "cdab9628acdb57c460e292660e7a07caf2ddbcffdfff92f3e5e4fb12119a11ca",
+              "e740a098a74d7a66a821c4ac3c5f913a82fc7445b5593cc5fa3e48ad1b4589b1",
+              "760549176fec210cfe0ff58eabbf2670cf33b4cd3942a3b60a98bf8f328a6d01",
+              "961b0956aa6303ed8ca1687d93ed46b9aa8a0203ec4ce0cbc2e86b364fbfb613",
+              "b9db041b2c3bfc6b5b0facb638b0b4643eec76b060039a6b11fb43682ed77a97",
+              "1011c321eb386b9975e8124bdb130790dcf4ac0021da3103cabbf7dfa18ccea7",
+              "6a9d3d15be4b25bd544d96bb1d7685e53f9484735bb22994feffb9037009aeeb",
+              "bf20d6193890cf7fdead9e3b60197564c663b5a62eda782a49d4aa7819bb9665",
+              "472d28f9d25a95e625eb808ff3827e7f6792009e1ba0b3b21951f3058b65a75d",
+              "e3931b2b66da07f983d2235d9d0b3a3098008458bdc0c1ad4370fae73e1eaa9e",
+              "e18a0dea6382c95aa4089a971190683b171e9405c06fd4111924144600f3bcf3",
+              "1a336bcf24026307821b76b9ca18b178c285c591c5df9906e3ffbd2050ccd356",
+              "8ca2d0e5ae9b9981bb8b76ba0da383c585664b2a2f4e861d58aab00c9b0cc808",
+              "e1866c27023ccea276034c4d572eab42713132e4fdb2aafa9488f6d74cd49303",
+              "3674cfafba4cdea5775a72a82e5d553bf180beab456b3cbaa7b41a1574fe1948",
+              "9bb400dd317425f40176c3094a5573037b0217e0b60761cb66a8fa15b63b36c3",
+              "c078048028aca3e9bc40f68f4d42ef25c6af2cef4da20bf3be70dd6a23b82d52",
+              "c28cc85f945085e70259ed02131ae3f8c5992e789c9c75c2c6e257306beaf26e",
+              "4c2b121795fe2b90fda84813543952382daa29c7b96edd9f96040df13e48e347",
+              "63c6fba30b5471fd60e715cbaf4448badafde68dbc42c54d96b56dd2c4bf2d15",
+              "a4240138ecfe736113581f318f261a01992eaa8fa5b7bd6938d9dbeb65aa85d7",
+              "b9d088a7b21f655d0cf50f8404e874f4d1655fb5565a354d2c0dd6d113619c66",
+              "9133e7e98a83f6e10a7fd44c104d9124d93e0d3e920f5c160873b394dd3a2fcb",
+              "953985dbd0ea6f86746e83be144ec2ff2897ef1f3506eede083b893e98dd63ea",
+              "83af840c4cad46de96c86fcf700ade32e73260d4a16cefa330cb5a722ef59fdf",
+              "eea3c0c2b016ea0c269f954fd8172c3d118f08103c9842b81b05290c9faf3780",
+              "ac43a363fdb81fa4f6df1cb06ba49a5f4eeef411957cf2afad55cbc1e79bc4d1",
+              "ca72cf7bda22aed15c16ca67e7b6cc57109cdc86d4ffe38fd71210a5380fcada",
+              "477dc1cd62106d9df6b37f8515579a48d01b310387087c08ce7062a8eb5df98d",
+              "d47b6dcd3b13288825c954df6c6e30eb683d1f79434beaee7172082f8ae74280",
+              "9c64ef20c69589c56fcc5f3a0d10f6957ecea248e44acb432aaf16a88eeef946",
+              "d2aa256bfd61bdb64ac38da6cbc3e77fb315bb9fbaf422087c10345377df44f6",
+              "8b9623e4513594a6eaeb3475ea7d0eb585dd8f6e20e21c316db0b942fada2336",
+              "860725ed0bd18c744e6b8b02888ad88be1cf23d7153131b220a0f9fbb76976bf",
+              "387cc6e807efc263a0ad6a30e6313a27d16abef038264d0afa0e6ad943be55da"
+            ]
+          }
+        ]);
+
+        let chain_main: Vec<ChainMain> = serde_json::from_value(json1.clone()).unwrap();
+        let json2 = serde_json::to_value(&chain_main).unwrap();
+        assert_json_eq!(json1, json2);
+    }
+
+    #[test]
+    fn test_chain_main_min_json() {
+        let json1 = json!({
+          "first_height": 3242758,
+          "first_prev_id": "ce3731311b7e4c1e58a2fe902dbb5c60bb2c0decc163d5397fa52a260d7f09c1",
+          "ids": [
+            "ee1238b884e64f7e438223aa8d42d0efc15e7640f1a432448fbad116dc72f1b2"
+          ]
+        });
+
+        let chain_main_min: ChainMainMin = serde_json::from_value(json1.clone()).unwrap();
+        let json2 = serde_json::to_value(&chain_main_min).unwrap();
+        assert_json_eq!(json1, json2);
+    }
+
+    #[test]
+    fn test_miner_data_json() {
+        let json1 = json!({
+          "major_version": 16,
+          "height": 3242764,
+          "prev_id": "dc53c24683dca14586fb2909b9aa4a44adb524e010d438e2491e7d8cc1c80831",
+          "seed_hash": "526577d6e6689ba8736c16ccc76e6ce4ada3b0ceeaa3a2260b96ba188a17d705",
+          "difficulty": "0x526f2623ce",
+          "median_weight": 300000,
+          "already_generated_coins": 18446744073709551615_u64,
+          "tx_backlog": [
+            {
+              "id": "dbec64651bb4e83d0e9a05c2826bde605a940f12179fab0ab5dc8bc4392c776b",
+              "weight": 2905,
+              "fee": 929600000
+            },
+            {
+              "id": "ec5728dd1fbd98db1f93d612826e73b95f52cca49f247a6dbc35390f45766a7d",
+              "weight": 2222,
+              "fee": 44440000
+            },
+            {
+              "id": "41f613b1a470af494e0a705993e305dfaad3e365fcc0b0db0118256fc54559aa",
+              "weight": 2221,
+              "fee": 44420000
+            },
+            {
+              "id": "34fa33bf96dc2f825fe870e8f5402be6225c1623b345224e0dbc38b6407873de",
+              "weight": 2217,
+              "fee": 709440000
+            }
+          ]
+        });
+
+        let miner_data: MinerData = serde_json::from_value(json1.clone()).unwrap();
+        let json2 = serde_json::to_value(&miner_data).unwrap();
+        assert_json_eq!(json1, json2);
+    }
+}
diff --git a/zmq/types/src/lib.rs b/zmq/types/src/lib.rs
new file mode 100644
index 00000000..3f9562b6
--- /dev/null
+++ b/zmq/types/src/lib.rs
@@ -0,0 +1 @@
+pub mod json_message_types;