diff --git a/.github/actions/monerod-download/action.yml b/.github/actions/monerod-download/action.yml
new file mode 100644
index 00000000..c323d927
--- /dev/null
+++ b/.github/actions/monerod-download/action.yml
@@ -0,0 +1,62 @@
+#  MIT License
+#
+#  Copyright (c) 2022-2023 Luke Parker
+#  Copyright (c) Cuprate developers
+#
+#  Permission is hereby granted, free of charge, to any person obtaining a copy
+#  of this software and associated documentation files (the "Software"), to deal
+#  in the Software without restriction, including without limitation the rights
+#  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+#  copies of the Software, and to permit persons to whom the Software is
+#  furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in all
+#  copies or substantial portions of the Software.
+#
+#  Initially taken from Serai Dex: https://github.com/serai-dex/serai/blob/b823413c9b7ae6747b9af99e18379cfc49f4271a/.github/actions/monero/action.yml.
+
+name: monerod-download
+description: Downloads the core Monero daemon
+
+inputs:
+  version:
+    description: "Version to download"
+    required: false
+    default: v0.18.3.3
+
+runs:
+  using: "composite"
+  steps:
+    - name: Monero Daemon Cache
+      id: cache-monerod
+      uses: actions/cache@v3
+      with:
+        path: |
+          monerod
+          monerod.exe
+        key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
+
+    - name: Download the Monero Daemon
+      if: steps.cache-monerod.outputs.cache-hit != 'true'
+      shell: bash
+      run: |
+        OS=${{ runner.os }}
+        ARCH=${{ runner.arch }}
+
+        case "$OS $ARCH" in
+          "Windows X64") FILE=monero-win-x64-${{ inputs.version }}.zip ;;
+          "Windows X86") FILE=monero-win-x86-${{ inputs.version }}.zip ;;
+          "Linux X64") FILE=monero-linux-x64-${{ inputs.version }}.tar.bz2 ;;
+          "Linux X86") FILE=monero-linux-x86-${{ inputs.version }}.tar.bz2 ;;
+          "macOS X64") FILE=monero-mac-x64-${{ inputs.version }}.tar.bz2 ;;
+          "macOS ARM64") FILE=monero-mac-armv8-${{ inputs.version }}.tar.bz2 ;;
+          *) exit 1 ;;
+        esac
+        curl -O -L https://downloads.getmonero.org/cli/$FILE
+        if [[ ${{ runner.os }} == Windows ]]; then
+          unzip $FILE
+          mv */monerod.exe monerod.exe
+        else
+          tar -xvf $FILE
+          mv */monerod monerod
+        fi
\ No newline at end of file
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ebf40b12..b45a5431 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -8,6 +8,7 @@ on:
   workflow_dispatch:
 
 env:
+  # Show colored output in CI.
   CARGO_TERM_COLOR: always
   # Show full panics.
   RUST_BACKTRACE: "full"
@@ -15,6 +16,8 @@ env:
   RUST_MIN_STACK: 8000000
   # Fail on documentation warnings.
   RUSTDOCFLAGS: '-D warnings'
+  # Enable debug information generation for build dependencies.
+  CARGO_PROFILE_DEV_BUILD_OVERRIDE_DEBUG: true
 
 jobs:
   # Run format separately.
@@ -53,10 +56,15 @@ jobs:
         include:
           - os: windows-latest
             shell: msys2 {0}
+            # GNU Windows is used as we need
+            # `unistd.h` and more in `cryptonight/`.
+            rust: stable-x86_64-pc-windows-gnu
           - os: macos-latest
             shell: bash
+            rust: stable
           - os: ubuntu-latest
             shell: bash
+            rust: stable
 
     defaults:
       run:
@@ -68,15 +76,21 @@ jobs:
       with:
         submodules: recursive
 
+    - name: Install Rust
+      uses: dtolnay/rust-toolchain@master
+      with:
+        toolchain: ${{ matrix.rust }}
+        components: clippy
+
     - name: Cache
       uses: actions/cache@v3
       with:
-        path: |
-          target
-          ~/.cargo
-          ~/.rustup
+        path: target
         key: ${{ matrix.os }}
 
+    - name: Download monerod
+      uses: ./.github/actions/monerod-download
+
     # Packages other than `Boost` used by `Monero` are listed here.
     # https://github.com/monero-project/monero/blob/c444a7e002036e834bfb4c68f04a121ce1af5825/.github/workflows/build.yml#L71
 
@@ -96,8 +110,21 @@ jobs:
         update: true
         install: mingw-w64-x86_64-toolchain mingw-w64-x86_64-boost msys2-runtime-devel git mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja
 
+    # HACK: 2024-05-14
+    # GCC 14+ fails to build `lmdb-master-sys` with no clear error message:
+    # <https://github.com/Cuprate/cuprate/pull/127>
+    #
+    # - MSYS2 repos carry older versions of packages
+    # - pacman lets us manually downgrade from package files
+    # - Note that `gcc` requires `gcc-libs`
+    - name: Downgrade to GCC 13.2 (Windows)
+      if: matrix.os == 'windows-latest'
+      run: |
+        wget https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
+        pacman -U --noconfirm mingw-w64-x86_64-gcc-13.2.0-6-any.pkg.tar.zst mingw-w64-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
+
     - name: Documentation
-      run: cargo doc --workspace --all-features
+      run: cargo doc --workspace --all-features --no-deps
 
     - name: Clippy (fail on warnings)
       run: cargo clippy --workspace --all-features --all-targets -- -D warnings
diff --git a/.gitignore b/.gitignore
index 90691967..5b939ef9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
 target/
 .vscode
+monerod
diff --git a/Cargo.lock b/Cargo.lock
index b469e148..2f95e836 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -17,22 +17,11 @@ version = "1.0.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 
-[[package]]
-name = "aes"
-version = "0.8.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
-dependencies = [
- "cfg-if",
- "cipher",
- "cpufeatures",
-]
-
 [[package]]
 name = "ahash"
-version = "0.8.10"
+version = "0.8.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b79b82693f705137f8fb9b37871d99e4f9a7df12b917eed79c3d3954830a60b"
+checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
 dependencies = [
  "cfg-if",
  "once_cell",
@@ -42,9 +31,9 @@ dependencies = [
 
 [[package]]
 name = "allocator-api2"
-version = "0.2.16"
+version = "0.2.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
+checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
 
 [[package]]
 name = "android-tzdata"
@@ -132,26 +121,26 @@ dependencies = [
 
 [[package]]
 name = "async-trait"
-version = "0.1.77"
+version = "0.1.80"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9"
+checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
 name = "autocfg"
-version = "1.1.0"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80"
 
 [[package]]
 name = "backtrace"
-version = "0.3.69"
+version = "0.3.71"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
+checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d"
 dependencies = [
  "addr2line",
  "cc",
@@ -174,9 +163,9 @@ dependencies = [
 
 [[package]]
 name = "base64"
-version = "0.21.7"
+version = "0.22.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
+checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51"
 
 [[package]]
 name = "base64ct"
@@ -216,10 +205,11 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
 
 [[package]]
 name = "bitflags"
-version = "2.4.2"
+version = "2.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf"
+checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
 dependencies = [
+ "bytemuck",
  "serde",
 ]
 
@@ -255,9 +245,9 @@ dependencies = [
 
 [[package]]
 name = "borsh"
-version = "1.3.1"
+version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f58b559fd6448c6e2fd0adb5720cd98a2506594cafa4737ff98c396f3e82f667"
+checksum = "dbe5b10e214954177fb1dc9fbd20a1a2608fe99e6c832033bdc7cea287a20d77"
 dependencies = [
  "borsh-derive",
  "cfg_aliases",
@@ -265,42 +255,42 @@ dependencies = [
 
 [[package]]
 name = "borsh-derive"
-version = "1.3.1"
+version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7aadb5b6ccbd078890f6d7003694e33816e6b784358f18e15e7e6d9f065a57cd"
+checksum = "d7a8646f94ab393e43e8b35a2558b1624bed28b97ee09c5d15456e3c9463f46d"
 dependencies = [
  "once_cell",
  "proc-macro-crate",
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
  "syn_derive",
 ]
 
 [[package]]
 name = "bumpalo"
-version = "3.15.3"
+version = "3.16.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
 
 [[package]]
 name = "bytemuck"
-version = "1.14.3"
+version = "1.15.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2ef034f05691a48569bd920a96c81b9d91bbad1ab5ac7c4616c1f6ef36cb79f"
+checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15"
 dependencies = [
  "bytemuck_derive",
 ]
 
 [[package]]
 name = "bytemuck_derive"
-version = "1.5.0"
+version = "1.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "965ab7eb5f8f97d2a083c799f3a1b994fc397b2fe2da5d1da1626ce15a39f2b1"
+checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -311,39 +301,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
 
 [[package]]
 name = "bytes"
-version = "1.5.0"
+version = "1.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223"
-
-[[package]]
-name = "bzip2"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8"
-dependencies = [
- "bzip2-sys",
- "libc",
-]
-
-[[package]]
-name = "bzip2-sys"
-version = "0.1.11+1.0.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc"
-dependencies = [
- "cc",
- "libc",
- "pkg-config",
-]
+checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
 
 [[package]]
 name = "cc"
-version = "1.0.88"
+version = "1.0.96"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc"
-dependencies = [
- "libc",
-]
+checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd"
 
 [[package]]
 name = "cfg-if"
@@ -359,31 +325,21 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
 
 [[package]]
 name = "chrono"
-version = "0.4.34"
+version = "0.4.38"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b"
+checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
 dependencies = [
  "android-tzdata",
  "iana-time-zone",
  "num-traits",
- "windows-targets 0.52.4",
-]
-
-[[package]]
-name = "cipher"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
-dependencies = [
- "crypto-common",
- "inout",
+ "windows-targets 0.52.5",
 ]
 
 [[package]]
 name = "clap"
-version = "4.5.1"
+version = "4.5.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da"
+checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
 dependencies = [
  "clap_builder",
  "clap_derive",
@@ -391,9 +347,9 @@ dependencies = [
 
 [[package]]
 name = "clap_builder"
-version = "4.5.1"
+version = "4.5.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb"
+checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
 dependencies = [
  "anstream",
  "anstyle",
@@ -403,14 +359,14 @@ dependencies = [
 
 [[package]]
 name = "clap_derive"
-version = "4.5.0"
+version = "4.5.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47"
+checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
 dependencies = [
- "heck",
+ "heck 0.5.0",
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -427,19 +383,13 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
 
 [[package]]
 name = "concurrent-queue"
-version = "2.4.0"
+version = "2.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363"
+checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"
 dependencies = [
  "crossbeam-utils",
 ]
 
-[[package]]
-name = "constant_time_eq"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
-
 [[package]]
 name = "core-foundation"
 version = "0.9.4"
@@ -465,15 +415,6 @@ dependencies = [
  "libc",
 ]
 
-[[package]]
-name = "crc32fast"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
-dependencies = [
- "cfg-if",
-]
-
 [[package]]
 name = "crossbeam"
 version = "0.8.4"
@@ -587,7 +528,7 @@ dependencies = [
  "rayon",
  "serde",
  "serde_json",
- "syn 2.0.52",
+ "syn 2.0.60",
  "thiserror",
  "thread_local",
  "tokio",
@@ -601,20 +542,30 @@ dependencies = [
 name = "cuprate-database"
 version = "0.0.0"
 dependencies = [
+ "bitflags 2.5.0",
  "bytemuck",
  "bytes",
  "cfg-if",
  "crossbeam",
  "cuprate-helper",
+ "cuprate-test-utils",
+ "cuprate-types",
+ "curve25519-dalek",
  "futures",
  "heed",
+ "hex",
+ "hex-literal",
+ "monero-pruning",
+ "monero-serai",
  "page_size",
  "paste",
+ "pretty_assertions",
  "rayon",
  "redb",
  "serde",
  "tempfile",
  "thiserror",
+ "thread_local",
  "tokio",
  "tokio-util",
  "tower",
@@ -629,11 +580,41 @@ dependencies = [
  "dirs",
  "futures",
  "libc",
+ "monero-serai",
  "rayon",
  "tokio",
  "windows",
 ]
 
+[[package]]
+name = "cuprate-p2p"
+version = "0.1.0"
+dependencies = [
+ "bytes",
+ "cuprate-helper",
+ "cuprate-test-utils",
+ "dashmap",
+ "fixed-bytes",
+ "futures",
+ "hex",
+ "indexmap 2.2.6",
+ "monero-address-book",
+ "monero-p2p",
+ "monero-pruning",
+ "monero-serai",
+ "monero-wire",
+ "pin-project",
+ "rand",
+ "rand_distr",
+ "rayon",
+ "thiserror",
+ "tokio",
+ "tokio-stream",
+ "tokio-util",
+ "tower",
+ "tracing",
+]
+
 [[package]]
 name = "cuprate-test-utils"
 version = "0.1.0"
@@ -641,18 +622,20 @@ dependencies = [
  "async-trait",
  "borsh",
  "bytes",
- "bzip2",
+ "cuprate-helper",
+ "cuprate-types",
  "futures",
  "hex",
+ "hex-literal",
  "monero-p2p",
  "monero-serai",
  "monero-wire",
- "reqwest",
- "tar",
+ "pretty_assertions",
+ "serde",
+ "serde_json",
  "tempfile",
  "tokio",
  "tokio-util",
- "zip",
 ]
 
 [[package]]
@@ -693,7 +676,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -713,14 +696,39 @@ dependencies = [
 ]
 
 [[package]]
-name = "deranged"
-version = "0.3.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
+name = "dandelion_tower"
+version = "0.1.0"
 dependencies = [
- "powerfmt",
+ "futures",
+ "proptest",
+ "rand",
+ "rand_distr",
+ "thiserror",
+ "tokio",
+ "tokio-util",
+ "tower",
+ "tracing",
 ]
 
+[[package]]
+name = "dashmap"
+version = "5.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
+dependencies = [
+ "cfg-if",
+ "hashbrown 0.14.5",
+ "lock_api",
+ "once_cell",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "diff"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
+
 [[package]]
 name = "digest"
 version = "0.10.7"
@@ -783,27 +791,18 @@ dependencies = [
 
 [[package]]
 name = "doxygen-rs"
-version = "0.2.2"
+version = "0.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bff670ea0c9bbb8414e7efa6e23ebde2b8f520a7eef78273a3918cf1903e7505"
+checksum = "415b6ec780d34dcf624666747194393603d0373b7141eef01d12ee58881507d9"
 dependencies = [
  "phf",
 ]
 
 [[package]]
 name = "either"
-version = "1.10.0"
+version = "1.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a"
-
-[[package]]
-name = "encoding_rs"
-version = "0.8.33"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1"
-dependencies = [
- "cfg-if",
-]
+checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2"
 
 [[package]]
 name = "epee-encoding"
@@ -857,9 +856,9 @@ dependencies = [
 
 [[package]]
 name = "fastrand"
-version = "2.0.1"
+version = "2.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
+checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a"
 
 [[package]]
 name = "ff"
@@ -874,21 +873,9 @@ dependencies = [
 
 [[package]]
 name = "fiat-crypto"
-version = "0.2.6"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382"
-
-[[package]]
-name = "filetime"
-version = "0.2.23"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd"
-dependencies = [
- "cfg-if",
- "libc",
- "redox_syscall",
- "windows-sys 0.52.0",
-]
+checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e"
 
 [[package]]
 name = "fixed-bytes"
@@ -898,16 +885,6 @@ dependencies = [
  "thiserror",
 ]
 
-[[package]]
-name = "flate2"
-version = "1.0.28"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e"
-dependencies = [
- "crc32fast",
- "miniz_oxide",
-]
-
 [[package]]
 name = "flexible-transcript"
 version = "0.3.2"
@@ -927,21 +904,6 @@ version = "1.0.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
 
-[[package]]
-name = "foreign-types"
-version = "0.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
-dependencies = [
- "foreign-types-shared",
-]
-
-[[package]]
-name = "foreign-types-shared"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
-
 [[package]]
 name = "form_urlencoded"
 version = "1.2.1"
@@ -1001,7 +963,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -1046,9 +1008,9 @@ dependencies = [
 
 [[package]]
 name = "getrandom"
-version = "0.2.12"
+version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5"
+checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
 dependencies = [
  "cfg-if",
  "libc",
@@ -1072,25 +1034,6 @@ dependencies = [
  "subtle",
 ]
 
-[[package]]
-name = "h2"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8"
-dependencies = [
- "bytes",
- "fnv",
- "futures-core",
- "futures-sink",
- "futures-util",
- "http 0.2.11",
- "indexmap 2.2.5",
- "slab",
- "tokio",
- "tokio-util",
- "tracing",
-]
-
 [[package]]
 name = "hashbrown"
 version = "0.12.3"
@@ -1099,9 +1042,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
 
 [[package]]
 name = "hashbrown"
-version = "0.14.3"
+version = "0.14.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
+checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
 dependencies = [
  "ahash",
  "allocator-api2",
@@ -1124,13 +1067,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
 
 [[package]]
-name = "heed"
-version = "0.20.0-alpha.9"
+name = "heck"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9648a50991c86df7d00c56c268c27754fcf4c80be2ba57fc4a00dc928c6fe934"
+checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
+
+[[package]]
+name = "heed"
+version = "0.20.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7a300b0deeb2957162d7752b0f063b3be1c88333af5bb4e7a57d8fb3716f50b"
 dependencies = [
- "bitflags 2.4.2",
- "bytemuck",
+ "bitflags 2.5.0",
  "byteorder",
  "heed-traits",
  "heed-types",
@@ -1145,15 +1093,15 @@ dependencies = [
 
 [[package]]
 name = "heed-traits"
-version = "0.20.0-alpha.9"
+version = "0.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ab0b7d9cde969ad36dde692e487dc89d97f7168bf6a7bd3b894ad4bf7278298"
+checksum = "eb3130048d404c57ce5a1ac61a903696e8fcde7e8c2991e9fcfc1f27c3ef74ff"
 
 [[package]]
 name = "heed-types"
-version = "0.20.0-alpha.9"
+version = "0.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0cb3567a7363f28b597bf6e9897b9466397951dd0e52df2c8196dd8a71af44a"
+checksum = "3cb0d6ba3700c9a57e83c013693e3eddb68a6d9b6781cacafc62a0d992e8ddb3"
 dependencies = [
  "bincode",
  "byteorder",
@@ -1191,37 +1139,15 @@ dependencies = [
 
 [[package]]
 name = "http"
-version = "0.2.11"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb"
+checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
 dependencies = [
  "bytes",
  "fnv",
  "itoa",
 ]
 
-[[package]]
-name = "http"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea"
-dependencies = [
- "bytes",
- "fnv",
- "itoa",
-]
-
-[[package]]
-name = "http-body"
-version = "0.4.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
-dependencies = [
- "bytes",
- "http 0.2.11",
- "pin-project-lite",
-]
-
 [[package]]
 name = "http-body"
 version = "1.0.0"
@@ -1229,19 +1155,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643"
 dependencies = [
  "bytes",
- "http 1.0.0",
+ "http",
 ]
 
 [[package]]
 name = "http-body-util"
-version = "0.1.0"
+version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840"
+checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d"
 dependencies = [
  "bytes",
- "futures-util",
- "http 1.0.0",
- "http-body 1.0.0",
+ "futures-core",
+ "http",
+ "http-body",
  "pin-project-lite",
 ]
 
@@ -1251,47 +1177,17 @@ version = "1.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904"
 
-[[package]]
-name = "httpdate"
-version = "1.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
-
 [[package]]
 name = "hyper"
-version = "0.14.28"
+version = "1.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80"
-dependencies = [
- "bytes",
- "futures-channel",
- "futures-core",
- "futures-util",
- "h2",
- "http 0.2.11",
- "http-body 0.4.6",
- "httparse",
- "httpdate",
- "itoa",
- "pin-project-lite",
- "socket2",
- "tokio",
- "tower-service",
- "tracing",
- "want",
-]
-
-[[package]]
-name = "hyper"
-version = "1.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a"
+checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d"
 dependencies = [
  "bytes",
  "futures-channel",
  "futures-util",
- "http 1.0.0",
- "http-body 1.0.0",
+ "http",
+ "http-body",
  "httparse",
  "itoa",
  "pin-project-lite",
@@ -1307,8 +1203,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c"
 dependencies = [
  "futures-util",
- "http 1.0.0",
- "hyper 1.2.0",
+ "http",
+ "hyper",
  "hyper-util",
  "rustls",
  "rustls-native-certs",
@@ -1318,19 +1214,6 @@ dependencies = [
  "tower-service",
 ]
 
-[[package]]
-name = "hyper-tls"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
-dependencies = [
- "bytes",
- "hyper 0.14.28",
- "native-tls",
- "tokio",
- "tokio-native-tls",
-]
-
 [[package]]
 name = "hyper-util"
 version = "0.1.3"
@@ -1340,9 +1223,9 @@ dependencies = [
  "bytes",
  "futures-channel",
  "futures-util",
- "http 1.0.0",
- "http-body 1.0.0",
- "hyper 1.2.0",
+ "http",
+ "http-body",
+ "hyper",
  "pin-project-lite",
  "socket2",
  "tokio",
@@ -1396,40 +1279,25 @@ dependencies = [
 
 [[package]]
 name = "indexmap"
-version = "2.2.5"
+version = "2.2.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4"
+checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26"
 dependencies = [
  "equivalent",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.5",
 ]
 
-[[package]]
-name = "inout"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
-dependencies = [
- "generic-array",
-]
-
-[[package]]
-name = "ipnet"
-version = "2.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"
-
 [[package]]
 name = "itoa"
-version = "1.0.10"
+version = "1.0.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
+checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
 
 [[package]]
 name = "js-sys"
-version = "0.3.68"
+version = "0.3.69"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee"
+checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d"
 dependencies = [
  "wasm-bindgen",
 ]
@@ -1453,7 +1321,7 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
 name = "levin-cuprate"
 version = "0.1.0"
 dependencies = [
- "bitflags 2.4.2",
+ "bitflags 2.5.0",
  "bytes",
  "futures",
  "proptest",
@@ -1466,9 +1334,9 @@ dependencies = [
 
 [[package]]
 name = "libc"
-version = "0.2.153"
+version = "0.2.154"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
+checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
 
 [[package]]
 name = "libm"
@@ -1478,13 +1346,12 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
 
 [[package]]
 name = "libredox"
-version = "0.0.1"
+version = "0.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8"
+checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
 dependencies = [
- "bitflags 2.4.2",
+ "bitflags 2.5.0",
  "libc",
- "redox_syscall",
 ]
 
 [[package]]
@@ -1495,21 +1362,20 @@ checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
 
 [[package]]
 name = "lmdb-master-sys"
-version = "0.1.0"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "629c123f5321b48fa4f8f4d3b868165b748d9ba79c7103fb58e3a94f736bcedd"
+checksum = "dc9048db3a58c0732d7236abc4909058f9d2708cfb6d7d047eb895fddec6419a"
 dependencies = [
  "cc",
  "doxygen-rs",
  "libc",
- "pkg-config",
 ]
 
 [[package]]
 name = "lock_api"
-version = "0.4.11"
+version = "0.4.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
+checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
 dependencies = [
  "autocfg",
  "scopeguard",
@@ -1533,9 +1399,9 @@ dependencies = [
 
 [[package]]
 name = "memchr"
-version = "2.7.1"
+version = "2.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
+checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
 
 [[package]]
 name = "merlin"
@@ -1549,12 +1415,6 @@ dependencies = [
  "zeroize",
 ]
 
-[[package]]
-name = "mime"
-version = "0.3.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
-
 [[package]]
 name = "miniz_oxide"
 version = "0.7.2"
@@ -1582,7 +1442,7 @@ dependencies = [
  "borsh",
  "cuprate-test-utils",
  "futures",
- "indexmap 2.2.5",
+ "indexmap 2.2.6",
  "monero-p2p",
  "monero-pruning",
  "monero-wire",
@@ -1676,7 +1536,7 @@ dependencies = [
  "hex-literal",
  "monero-generators",
  "multiexp",
- "pbkdf2 0.12.2",
+ "pbkdf2",
  "rand",
  "rand_chacha",
  "rand_core",
@@ -1696,6 +1556,7 @@ dependencies = [
 name = "monero-wire"
 version = "0.1.0"
 dependencies = [
+ "bitflags 2.5.0",
  "bytes",
  "epee-encoding",
  "fixed-bytes",
@@ -1717,24 +1578,6 @@ dependencies = [
  "zeroize",
 ]
 
-[[package]]
-name = "native-tls"
-version = "0.2.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e"
-dependencies = [
- "lazy_static",
- "libc",
- "log",
- "openssl",
- "openssl-probe",
- "openssl-sys",
- "schannel",
- "security-framework",
- "security-framework-sys",
- "tempfile",
-]
-
 [[package]]
 name = "nu-ansi-term"
 version = "0.46.0"
@@ -1745,12 +1588,6 @@ dependencies = [
  "winapi",
 ]
 
-[[package]]
-name = "num-conv"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
-
 [[package]]
 name = "num-traits"
 version = "0.2.18"
@@ -1786,50 +1623,12 @@ version = "1.19.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
 
-[[package]]
-name = "openssl"
-version = "0.10.64"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f"
-dependencies = [
- "bitflags 2.4.2",
- "cfg-if",
- "foreign-types",
- "libc",
- "once_cell",
- "openssl-macros",
- "openssl-sys",
-]
-
-[[package]]
-name = "openssl-macros"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.52",
-]
-
 [[package]]
 name = "openssl-probe"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
 
-[[package]]
-name = "openssl-sys"
-version = "0.9.101"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff"
-dependencies = [
- "cc",
- "libc",
- "pkg-config",
- "vcpkg",
-]
-
 [[package]]
 name = "option-ext"
 version = "0.2.0"
@@ -1860,9 +1659,9 @@ checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae"
 
 [[package]]
 name = "parking_lot"
-version = "0.12.1"
+version = "0.12.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
 dependencies = [
  "lock_api",
  "parking_lot_core",
@@ -1870,26 +1669,15 @@ dependencies = [
 
 [[package]]
 name = "parking_lot_core"
-version = "0.9.9"
+version = "0.9.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"
+checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
 dependencies = [
  "cfg-if",
  "libc",
  "redox_syscall",
  "smallvec",
- "windows-targets 0.48.5",
-]
-
-[[package]]
-name = "password-hash"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700"
-dependencies = [
- "base64ct",
- "rand_core",
- "subtle",
+ "windows-targets 0.52.5",
 ]
 
 [[package]]
@@ -1909,18 +1697,6 @@ version = "1.0.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
 
-[[package]]
-name = "pbkdf2"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917"
-dependencies = [
- "digest",
- "hmac",
- "password-hash 0.4.2",
- "sha2",
-]
-
 [[package]]
 name = "pbkdf2"
 version = "0.12.2"
@@ -1929,7 +1705,7 @@ checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2"
 dependencies = [
  "digest",
  "hmac",
- "password-hash 0.5.0",
+ "password-hash",
  "sha2",
 ]
 
@@ -1969,7 +1745,7 @@ dependencies = [
  "phf_shared",
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -1983,29 +1759,29 @@ dependencies = [
 
 [[package]]
 name = "pin-project"
-version = "1.1.4"
+version = "1.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0"
+checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3"
 dependencies = [
  "pin-project-internal",
 ]
 
 [[package]]
 name = "pin-project-internal"
-version = "1.1.4"
+version = "1.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690"
+checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
 name = "pin-project-lite"
-version = "0.2.13"
+version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
+checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
 
 [[package]]
 name = "pin-utils"
@@ -2013,23 +1789,11 @@ version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
 
-[[package]]
-name = "pkg-config"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
-
 [[package]]
 name = "platforms"
-version = "3.3.0"
+version = "3.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c"
-
-[[package]]
-name = "powerfmt"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
+checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7"
 
 [[package]]
 name = "ppv-lite86"
@@ -2037,6 +1801,16 @@ version = "0.2.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
 
+[[package]]
+name = "pretty_assertions"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66"
+dependencies = [
+ "diff",
+ "yansi",
+]
+
 [[package]]
 name = "proc-macro-crate"
 version = "3.1.0"
@@ -2071,9 +1845,9 @@ dependencies = [
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.78"
+version = "1.0.81"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae"
+checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba"
 dependencies = [
  "unicode-ident",
 ]
@@ -2086,7 +1860,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf"
 dependencies = [
  "bit-set",
  "bit-vec",
- "bitflags 2.4.2",
+ "bitflags 2.5.0",
  "lazy_static",
  "num-traits",
  "rand",
@@ -2117,9 +1891,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
 
 [[package]]
 name = "quote"
-version = "1.0.35"
+version = "1.0.36"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
+checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
 dependencies = [
  "proc-macro2",
 ]
@@ -2191,9 +1965,9 @@ dependencies = [
 
 [[package]]
 name = "rayon"
-version = "1.9.0"
+version = "1.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd"
+checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
 dependencies = [
  "either",
  "rayon-core",
@@ -2211,27 +1985,27 @@ dependencies = [
 
 [[package]]
 name = "redb"
-version = "2.0.0"
+version = "2.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1100a056c5dcdd4e5513d5333385223b26ef1bf92f31eb38f407e8c20549256"
+checksum = "ed7508e692a49b6b2290b56540384ccae9b1fb4d77065640b165835b56ffe3bb"
 dependencies = [
  "libc",
 ]
 
 [[package]]
 name = "redox_syscall"
-version = "0.4.1"
+version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
+checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e"
 dependencies = [
- "bitflags 1.3.2",
+ "bitflags 2.5.0",
 ]
 
 [[package]]
 name = "redox_users"
-version = "0.4.4"
+version = "0.4.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4"
+checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891"
 dependencies = [
  "getrandom",
  "libredox",
@@ -2255,54 +2029,14 @@ checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
 name = "regex-syntax"
-version = "0.8.2"
+version = "0.8.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
-
-[[package]]
-name = "reqwest"
-version = "0.11.24"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251"
-dependencies = [
- "base64",
- "bytes",
- "encoding_rs",
- "futures-core",
- "futures-util",
- "h2",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
- "hyper-tls",
- "ipnet",
- "js-sys",
- "log",
- "mime",
- "native-tls",
- "once_cell",
- "percent-encoding",
- "pin-project-lite",
- "rustls-pemfile 1.0.4",
- "serde",
- "serde_json",
- "serde_urlencoded",
- "sync_wrapper",
- "system-configuration",
- "tokio",
- "tokio-native-tls",
- "tower-service",
- "url",
- "wasm-bindgen",
- "wasm-bindgen-futures",
- "web-sys",
- "winreg",
-]
+checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
 
 [[package]]
 name = "ring"
@@ -2336,11 +2070,11 @@ dependencies = [
 
 [[package]]
 name = "rustix"
-version = "0.38.31"
+version = "0.38.34"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949"
+checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f"
 dependencies = [
- "bitflags 2.4.2",
+ "bitflags 2.5.0",
  "errno",
  "libc",
  "linux-raw-sys",
@@ -2349,9 +2083,9 @@ dependencies = [
 
 [[package]]
 name = "rustls"
-version = "0.22.2"
+version = "0.22.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41"
+checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
 dependencies = [
  "ring",
  "rustls-pki-types",
@@ -2367,7 +2101,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792"
 dependencies = [
  "openssl-probe",
- "rustls-pemfile 2.1.0",
+ "rustls-pemfile",
  "rustls-pki-types",
  "schannel",
  "security-framework",
@@ -2375,18 +2109,9 @@ dependencies = [
 
 [[package]]
 name = "rustls-pemfile"
-version = "1.0.4"
+version = "2.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c"
-dependencies = [
- "base64",
-]
-
-[[package]]
-name = "rustls-pemfile"
-version = "2.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b"
+checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d"
 dependencies = [
  "base64",
  "rustls-pki-types",
@@ -2394,15 +2119,15 @@ dependencies = [
 
 [[package]]
 name = "rustls-pki-types"
-version = "1.3.1"
+version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8"
+checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54"
 
 [[package]]
 name = "rustls-webpki"
-version = "0.102.2"
+version = "0.102.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610"
+checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf"
 dependencies = [
  "ring",
  "rustls-pki-types",
@@ -2411,9 +2136,9 @@ dependencies = [
 
 [[package]]
 name = "rustversion"
-version = "1.0.14"
+version = "1.0.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
+checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47"
 
 [[package]]
 name = "rusty-fork"
@@ -2454,17 +2179,17 @@ version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d"
 dependencies = [
- "heck",
+ "heck 0.4.1",
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
 name = "security-framework"
-version = "2.9.2"
+version = "2.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de"
+checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6"
 dependencies = [
  "bitflags 1.3.2",
  "core-foundation",
@@ -2475,9 +2200,9 @@ dependencies = [
 
 [[package]]
 name = "security-framework-sys"
-version = "2.9.1"
+version = "2.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a"
+checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef"
 dependencies = [
  "core-foundation-sys",
  "libc",
@@ -2491,58 +2216,35 @@ checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca"
 
 [[package]]
 name = "serde"
-version = "1.0.197"
+version = "1.0.199"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
+checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.197"
+version = "1.0.199"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
+checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
 name = "serde_json"
-version = "1.0.114"
+version = "1.0.116"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0"
+checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
 dependencies = [
  "itoa",
  "ryu",
  "serde",
 ]
 
-[[package]]
-name = "serde_urlencoded"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
-dependencies = [
- "form_urlencoded",
- "itoa",
- "ryu",
- "serde",
-]
-
-[[package]]
-name = "sha1"
-version = "0.10.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
-dependencies = [
- "cfg-if",
- "cpufeatures",
- "digest",
-]
-
 [[package]]
 name = "sha2"
 version = "0.10.8"
@@ -2575,9 +2277,9 @@ dependencies = [
 
 [[package]]
 name = "signal-hook-registry"
-version = "1.4.1"
+version = "1.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
+checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
 dependencies = [
  "libc",
 ]
@@ -2588,7 +2290,7 @@ version = "0.1.0"
 source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
 dependencies = [
  "http-body-util",
- "hyper 1.2.0",
+ "hyper",
  "hyper-rustls",
  "hyper-util",
  "tokio",
@@ -2612,15 +2314,15 @@ dependencies = [
 
 [[package]]
 name = "smallvec"
-version = "1.13.1"
+version = "1.13.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"
+checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
 
 [[package]]
 name = "socket2"
-version = "0.5.6"
+version = "0.5.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871"
+checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
 dependencies = [
  "libc",
  "windows-sys 0.52.0",
@@ -2637,15 +2339,15 @@ name = "std-shims"
 version = "0.1.1"
 source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
 dependencies = [
- "hashbrown 0.14.3",
+ "hashbrown 0.14.5",
  "spin",
 ]
 
 [[package]]
 name = "strsim"
-version = "0.11.0"
+version = "0.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
 
 [[package]]
 name = "subtle"
@@ -2666,9 +2368,9 @@ dependencies = [
 
 [[package]]
 name = "syn"
-version = "2.0.52"
+version = "2.0.60"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07"
+checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -2684,15 +2386,9 @@ dependencies = [
  "proc-macro-error",
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
-[[package]]
-name = "sync_wrapper"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
-
 [[package]]
 name = "synchronoise"
 version = "1.0.1"
@@ -2702,44 +2398,12 @@ dependencies = [
  "crossbeam-queue",
 ]
 
-[[package]]
-name = "system-configuration"
-version = "0.5.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
-dependencies = [
- "bitflags 1.3.2",
- "core-foundation",
- "system-configuration-sys",
-]
-
-[[package]]
-name = "system-configuration-sys"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
-dependencies = [
- "core-foundation-sys",
- "libc",
-]
-
 [[package]]
 name = "tap"
 version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
 
-[[package]]
-name = "tar"
-version = "0.4.40"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb"
-dependencies = [
- "filetime",
- "libc",
- "xattr",
-]
-
 [[package]]
 name = "tempfile"
 version = "3.10.1"
@@ -2754,22 +2418,22 @@ dependencies = [
 
 [[package]]
 name = "thiserror"
-version = "1.0.57"
+version = "1.0.59"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b"
+checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa"
 dependencies = [
  "thiserror-impl",
 ]
 
 [[package]]
 name = "thiserror-impl"
-version = "1.0.57"
+version = "1.0.59"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81"
+checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -2782,25 +2446,6 @@ dependencies = [
  "once_cell",
 ]
 
-[[package]]
-name = "time"
-version = "0.3.34"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"
-dependencies = [
- "deranged",
- "num-conv",
- "powerfmt",
- "serde",
- "time-core",
-]
-
-[[package]]
-name = "time-core"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
-
 [[package]]
 name = "tiny-keccak"
 version = "2.0.2"
@@ -2827,9 +2472,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
 
 [[package]]
 name = "tokio"
-version = "1.36.0"
+version = "1.37.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931"
+checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
 dependencies = [
  "backtrace",
  "bytes",
@@ -2852,17 +2497,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
-]
-
-[[package]]
-name = "tokio-native-tls"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2"
-dependencies = [
- "native-tls",
- "tokio",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -2878,9 +2513,9 @@ dependencies = [
 
 [[package]]
 name = "tokio-stream"
-version = "0.1.14"
+version = "0.1.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842"
+checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af"
 dependencies = [
  "futures-core",
  "pin-project-lite",
@@ -2899,7 +2534,7 @@ dependencies = [
  "futures-io",
  "futures-sink",
  "futures-util",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.5",
  "pin-project-lite",
  "slab",
  "tokio",
@@ -2918,7 +2553,7 @@ version = "0.21.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1"
 dependencies = [
- "indexmap 2.2.5",
+ "indexmap 2.2.6",
  "toml_datetime",
  "winnow",
 ]
@@ -2976,7 +2611,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -3082,12 +2717,6 @@ version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
 
-[[package]]
-name = "vcpkg"
-version = "0.2.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
-
 [[package]]
 name = "version_check"
 version = "0.9.4"
@@ -3120,9 +2749,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
 
 [[package]]
 name = "wasm-bindgen"
-version = "0.2.91"
+version = "0.2.92"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f"
+checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
 dependencies = [
  "cfg-if",
  "wasm-bindgen-macro",
@@ -3130,36 +2759,24 @@ dependencies = [
 
 [[package]]
 name = "wasm-bindgen-backend"
-version = "0.2.91"
+version = "0.2.92"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b"
+checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
 dependencies = [
  "bumpalo",
  "log",
  "once_cell",
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
  "wasm-bindgen-shared",
 ]
 
-[[package]]
-name = "wasm-bindgen-futures"
-version = "0.4.41"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97"
-dependencies = [
- "cfg-if",
- "js-sys",
- "wasm-bindgen",
- "web-sys",
-]
-
 [[package]]
 name = "wasm-bindgen-macro"
-version = "0.2.91"
+version = "0.2.92"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed"
+checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
 dependencies = [
  "quote",
  "wasm-bindgen-macro-support",
@@ -3167,32 +2784,22 @@ dependencies = [
 
 [[package]]
 name = "wasm-bindgen-macro-support"
-version = "0.2.91"
+version = "0.2.92"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66"
+checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
  "wasm-bindgen-backend",
  "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-shared"
-version = "0.2.91"
+version = "0.2.92"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838"
-
-[[package]]
-name = "web-sys"
-version = "0.3.68"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446"
-dependencies = [
- "js-sys",
- "wasm-bindgen",
-]
+checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
 
 [[package]]
 name = "winapi"
@@ -3218,12 +2825,12 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 
 [[package]]
 name = "windows"
-version = "0.54.0"
+version = "0.56.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49"
+checksum = "1de69df01bdf1ead2f4ac895dc77c9351aefff65b2f3db429a343f9cbf05e132"
 dependencies = [
- "windows-core 0.54.0",
- "windows-targets 0.52.4",
+ "windows-core 0.56.0",
+ "windows-targets 0.52.5",
 ]
 
 [[package]]
@@ -3232,26 +2839,50 @@ version = "0.52.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
 dependencies = [
- "windows-targets 0.52.4",
+ "windows-targets 0.52.5",
 ]
 
 [[package]]
 name = "windows-core"
-version = "0.54.0"
+version = "0.56.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65"
+checksum = "4698e52ed2d08f8658ab0c39512a7c00ee5fe2688c65f8c0a4f06750d729f2a6"
 dependencies = [
+ "windows-implement",
+ "windows-interface",
  "windows-result",
- "windows-targets 0.52.4",
+ "windows-targets 0.52.5",
+]
+
+[[package]]
+name = "windows-implement"
+version = "0.56.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6fc35f58ecd95a9b71c4f2329b911016e6bec66b3f2e6a4aad86bd2e99e2f9b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.60",
+]
+
+[[package]]
+name = "windows-interface"
+version = "0.56.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08990546bf4edef8f431fa6326e032865f27138718c587dc21bc0265bbcb57cc"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.60",
 ]
 
 [[package]]
 name = "windows-result"
-version = "0.1.0"
+version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd19df78e5168dfb0aedc343d1d1b8d422ab2db6756d2dc3fef75035402a3f64"
+checksum = "749f0da9cc72d82e600d8d2e44cadd0b9eedb9038f71a1c58556ac1c5791813b"
 dependencies = [
- "windows-targets 0.52.4",
+ "windows-targets 0.52.5",
 ]
 
 [[package]]
@@ -3269,7 +2900,7 @@ version = "0.52.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
 dependencies = [
- "windows-targets 0.52.4",
+ "windows-targets 0.52.5",
 ]
 
 [[package]]
@@ -3289,17 +2920,18 @@ dependencies = [
 
 [[package]]
 name = "windows-targets"
-version = "0.52.4"
+version = "0.52.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b"
+checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
 dependencies = [
- "windows_aarch64_gnullvm 0.52.4",
- "windows_aarch64_msvc 0.52.4",
- "windows_i686_gnu 0.52.4",
- "windows_i686_msvc 0.52.4",
- "windows_x86_64_gnu 0.52.4",
- "windows_x86_64_gnullvm 0.52.4",
- "windows_x86_64_msvc 0.52.4",
+ "windows_aarch64_gnullvm 0.52.5",
+ "windows_aarch64_msvc 0.52.5",
+ "windows_i686_gnu 0.52.5",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc 0.52.5",
+ "windows_x86_64_gnu 0.52.5",
+ "windows_x86_64_gnullvm 0.52.5",
+ "windows_x86_64_msvc 0.52.5",
 ]
 
 [[package]]
@@ -3310,9 +2942,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
 
 [[package]]
 name = "windows_aarch64_gnullvm"
-version = "0.52.4"
+version = "0.52.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9"
+checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
 
 [[package]]
 name = "windows_aarch64_msvc"
@@ -3322,9 +2954,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
 
 [[package]]
 name = "windows_aarch64_msvc"
-version = "0.52.4"
+version = "0.52.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675"
+checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
 
 [[package]]
 name = "windows_i686_gnu"
@@ -3334,9 +2966,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
 
 [[package]]
 name = "windows_i686_gnu"
-version = "0.52.4"
+version = "0.52.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3"
+checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
 
 [[package]]
 name = "windows_i686_msvc"
@@ -3346,9 +2984,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
 
 [[package]]
 name = "windows_i686_msvc"
-version = "0.52.4"
+version = "0.52.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02"
+checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
 
 [[package]]
 name = "windows_x86_64_gnu"
@@ -3358,9 +2996,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
 
 [[package]]
 name = "windows_x86_64_gnu"
-version = "0.52.4"
+version = "0.52.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03"
+checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
 
 [[package]]
 name = "windows_x86_64_gnullvm"
@@ -3370,9 +3008,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
 
 [[package]]
 name = "windows_x86_64_gnullvm"
-version = "0.52.4"
+version = "0.52.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177"
+checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
 
 [[package]]
 name = "windows_x86_64_msvc"
@@ -3382,9 +3020,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
 
 [[package]]
 name = "windows_x86_64_msvc"
-version = "0.52.4"
+version = "0.52.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
+checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
 
 [[package]]
 name = "winnow"
@@ -3395,16 +3033,6 @@ dependencies = [
  "memchr",
 ]
 
-[[package]]
-name = "winreg"
-version = "0.50.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
-dependencies = [
- "cfg-if",
- "windows-sys 0.48.0",
-]
-
 [[package]]
 name = "wyz"
 version = "0.5.1"
@@ -3415,15 +3043,10 @@ dependencies = [
 ]
 
 [[package]]
-name = "xattr"
-version = "1.3.1"
+name = "yansi"
+version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f"
-dependencies = [
- "libc",
- "linux-raw-sys",
- "rustix",
-]
+checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
 
 [[package]]
 name = "zerocopy"
@@ -3442,7 +3065,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
+ "syn 2.0.60",
 ]
 
 [[package]]
@@ -3462,54 +3085,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.52",
-]
-
-[[package]]
-name = "zip"
-version = "0.6.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261"
-dependencies = [
- "aes",
- "byteorder",
- "bzip2",
- "constant_time_eq",
- "crc32fast",
- "crossbeam-utils",
- "flate2",
- "hmac",
- "pbkdf2 0.11.0",
- "sha1",
- "time",
- "zstd",
-]
-
-[[package]]
-name = "zstd"
-version = "0.11.2+zstd.1.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"
-dependencies = [
- "zstd-safe",
-]
-
-[[package]]
-name = "zstd-safe"
-version = "5.0.2+zstd.1.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db"
-dependencies = [
- "libc",
- "zstd-sys",
-]
-
-[[package]]
-name = "zstd-sys"
-version = "2.0.9+zstd.1.5.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656"
-dependencies = [
- "cc",
- "pkg-config",
+ "syn 2.0.60",
 ]
diff --git a/Cargo.toml b/Cargo.toml
index bd4ce6fa..987d93d3 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -11,7 +11,8 @@ members = [
 	"net/fixed-bytes",
 	"net/levin",
 	"net/monero-wire",
-	"p2p/async-buffer",
+	"p2p/cuprate-p2p",
+	"p2p/dandelion",
 	"p2p/monero-p2p",
 	"p2p/address-book",
 	"pruning",
@@ -50,6 +51,7 @@ crypto-bigint         = { version = "0.5.5", default-features = false }
 crossbeam             = { version = "0.8.4", default-features = false }
 curve25519-dalek      = { version = "4.1.1", default-features = false }
 dalek-ff-group        = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
+dashmap               = { version = "5.5.3", default-features = false }
 dirs                  = { version = "5.0.1", default-features = false }
 futures               = { version = "0.3.29", default-features = false }
 hex                   = { version = "0.4.3", default-features = false }
@@ -61,6 +63,7 @@ paste                 = { version = "1.0.14", default-features = false }
 pin-project           = { version = "1.1.3", default-features = false }
 randomx-rs            = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
 rand                  = { version = "0.8.5", default-features = false }
+rand_distr            = { version = "0.4.3", default-features = false }
 rayon                 = { version = "1.9.0", default-features = false }
 serde_bytes           = { version = "0.11.12", default-features = false }
 serde_json            = { version = "1.0.108", default-features = false }
@@ -76,7 +79,7 @@ tracing               = { version = "0.1.40", default-features = false }
 
 ## workspace.dev-dependencies
 tempfile              = { version = "3" }
-reqwest               = { version = "0.11.24" }
+pretty_assertions     = { version = "1.4.0" }
 proptest              = { version = "1" }
 proptest-derive       = { version = "0.4.0" }
 
diff --git a/cryptonight/build.rs b/cryptonight/build.rs
index 0950d94c..465236ec 100644
--- a/cryptonight/build.rs
+++ b/cryptonight/build.rs
@@ -32,7 +32,10 @@ fn main() {
         //    29 | extern int ftime (struct timeb *__timebuf)
         //       |            ^~~~~
         // This flag doesn't work on MSVC and breaks CI.
-        .flag_if_supported("-Wno-deprecated-declarations");
+        .flag_if_supported("-Wno-deprecated-declarations")
+        // `#include <boost>` isn't found without this in macOS CI.
+        // <https://github.com/Cuprate/cuprate/pull/116>
+        .flag_if_supported("-I/opt/homebrew/include");
 
     // Optimization flags are automatically added.
     // https://docs.rs/cc/latest/cc/struct.Build.html#method.opt_level
diff --git a/database/Cargo.toml b/database/Cargo.toml
index 33c3937c..712dbb16 100644
--- a/database/Cargo.toml
+++ b/database/Cargo.toml
@@ -9,41 +9,51 @@ repository  = "https://github.com/Cuprate/cuprate/tree/main/database"
 keywords    = ["cuprate", "database"]
 
 [features]
-# default   = ["heed", "redb", "service"]
+default   = ["heed", "redb", "service"]
 # default   = ["redb", "service"]
-default   = ["redb-memory", "service"]
+# default   = ["redb-memory", "service"]
 heed       = ["dep:heed"]
 redb       = ["dep:redb"]
 redb-memory = ["redb"]
 service     = ["dep:crossbeam", "dep:futures", "dep:tokio", "dep:tokio-util", "dep:tower", "dep:rayon"]
 
 [dependencies]
+bitflags = { workspace = true, features = ["serde", "bytemuck"] }
 bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] }
 bytes    = { workspace = true }
 cfg-if   = { workspace = true }
 # FIXME:
 # We only need the `thread` feature if `service` is enabled.
 # Figure out how to enable features of an already pulled in dependency conditionally.
-cuprate-helper = { path = "../helper", features = ["fs", "thread"] }
-paste          = { workspace = true }
-page_size      = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size.
-thiserror      = { workspace = true }
+cuprate-helper   = { path = "../helper", features = ["fs", "thread", "map"] }
+cuprate-types    = { path = "../types", features = ["service"] }
+curve25519-dalek = { workspace = true }
+monero-pruning   = { path = "../pruning" }
+monero-serai     = { workspace = true, features = ["std"] }
+paste            = { workspace = true }
+page_size        = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size.
+thiserror        = { workspace = true }
 
 # `service` feature.
-crossbeam  = { workspace = true, features = ["std"], optional = true }
-futures    = { workspace = true, optional = true }
-tokio      = { workspace = true, features = ["full"], optional = true }
-tokio-util = { workspace = true, features = ["full"], optional = true }
-tower      = { workspace = true, features = ["full"], optional = true }
-rayon      = { workspace = true, optional = true }
+crossbeam    = { workspace = true, features = ["std"], optional = true }
+futures      = { workspace = true, optional = true }
+tokio        = { workspace = true, features = ["full"], optional = true }
+tokio-util   = { workspace = true, features = ["full"], optional = true }
+tower        = { workspace = true, features = ["full"], optional = true }
+thread_local = { workspace = true }
+rayon        = { workspace = true, optional = true }
 
 # Optional features.
-heed  = { version = "0.20.0-alpha.9", optional = true }
-redb  = { version = "2.0.0", optional = true }
+heed  = { version = "0.20.0", features = ["read-txn-no-tls"], optional = true }
+redb  = { version = "2.1.0", optional = true }
 serde = { workspace = true, optional = true }
 
 [dev-dependencies]
 bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] }
 cuprate-helper = { path = "../helper", features = ["thread"] }
+cuprate-test-utils = { path = "../test-utils" }
 page_size = { version = "0.6.0" }
 tempfile = { version = "3.10.0" }
+pretty_assertions = { workspace = true }
+hex = { workspace = true }
+hex-literal = { workspace = true }
\ No newline at end of file
diff --git a/database/README.md b/database/README.md
index 74d07149..293413ac 100644
--- a/database/README.md
+++ b/database/README.md
@@ -1,33 +1,48 @@
 # Database
 Cuprate's database implementation.
 
-<!-- Did you know markdown automatically increments number lists, even if they are all 1...? -->
-1. [Documentation](#documentation)
-1. [File Structure](#file-structure)
-    - [`src/`](#src)
-    - [`src/ops`](#src-ops)
-    - [`src/service/`](#src-service)
-    - [`src/backend/`](#src-backend)
-1. [Backends](#backends)
-    - [`heed`](#heed)
-    - [`redb`](#redb)
-    - [`redb-memory`](#redb-memory)
-    - [`sanakirja`](#sanakirja)
-    - [`MDBX`](#mdbx)
-1. [Layers](#layers)
-    - [Database](#database)
-    - [Trait](#trait)
-    - [ConcreteEnv](#concreteenv)
-    - [Thread-pool](#thread-pool)
-    - [Service](#service)
-1. [Resizing](#resizing)
-1. [Flushing](#flushing)
-1. [(De)serialization](#deserialization)
+- [1. Documentation](#1-documentation)
+- [2. File structure](#2-file-structure)
+    - [2.1 `src/`](#21-src)
+    - [2.2 `src/backend/`](#22-srcbackend)
+    - [2.3 `src/config/`](#23-srcconfig)
+    - [2.4 `src/ops/`](#24-srcops)
+    - [2.5 `src/service/`](#25-srcservice)
+- [3. Backends](#3-backends)
+    - [3.1 heed](#31-heed)
+    - [3.2 redb](#32-redb)
+    - [3.3 redb-memory](#33-redb-memory)
+    - [3.4 sanakirja](#34-sanakirja)
+    - [3.5 MDBX](#35-mdbx)
+- [4. Layers](#4-layers)
+    - [4.1 Backend](#41-backend)
+    - [4.2 Trait](#42-trait)
+    - [4.3 ConcreteEnv](#43-concreteenv)
+    - [4.4 ops](#44-ops)
+    - [4.5 service](#45-service)
+- [5. The service](#5-the-service)
+    - [5.1 Initialization](#51-initialization)
+    - [5.2 Requests](#53-requests)
+    - [5.3 Responses](#54-responses)
+    - [5.4 Thread model](#52-thread-model)
+    - [5.5 Shutdown](#55-shutdown)
+- [6. Syncing](#6-Syncing)
+- [7. Resizing](#7-resizing)
+- [8. (De)serialization](#8-deserialization)
+- [9. Schema](#9-schema)
+    - [9.1 Tables](#91-tables)
+    - [9.2 Multimap tables](#92-multimap-tables)
+- [10. Known issues and tradeoffs](#10-known-issues-and-tradeoffs)
+    - [10.1 Traits abstracting backends](#101-traits-abstracting-backends)
+    - [10.2 Hot-swappable backends](#102-hot-swappable-backends)
+    - [10.3 Copying unaligned bytes](#103-copying-unaligned-bytes)
+    - [10.4 Endianness](#104-endianness)
+    - [10.5 Extra table data](#105-extra-table-data)
 
 ---
 
-# Documentation
-In general, documentation for `database/` is split into 3:
+## 1. Documentation
+Documentation for `database/` is split into 3 locations:
 
 | Documentation location    | Purpose |
 |---------------------------|---------|
@@ -35,7 +50,7 @@ In general, documentation for `database/` is split into 3:
 | `cuprate-database`        | Practical usage documentation/warnings/notes/etc
 | Source file `// comments` | Implementation-specific details (e.g, how many reader threads to spawn?)
 
-This README serves as the overview/design document.
+This README serves as the implementation design document.
 
 For actual practical usage, `cuprate-database`'s types and general usage are documented via standard Rust tooling.
 
@@ -59,66 +74,41 @@ The code within `src/` is also littered with some `grep`-able comments containin
 | `TODO`      | This must be implemented; There should be 0 of these in production code
 | `SOMEDAY`   | This should be implemented... someday
 
-# File Structure
+## 2. File structure
 A quick reference of the structure of the folders & files in `cuprate-database`.
 
 Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`.
 
-## `src/`
+### 2.1 `src/`
 The top-level `src/` files.
 
-| File                | Purpose |
-|---------------------|---------|
-| `config.rs`         | Database `Env` configuration
-| `constants.rs`      | General constants used throughout `cuprate-database`
-| `database.rs`       | Abstracted database; `trait DatabaseR{o,w}`
-| `env.rs`            | Abstracted database environment; `trait Env`
-| `error.rs`          | Database error types
-| `free.rs`           | General free functions (related to the database)
-| `key.rs`            | Abstracted database keys; `trait Key`
-| `resize.rs`         | Database resizing algorithms
-| `storable.rs`       | Data (de)serialization; `trait Storable`
-| `table.rs`          | Database table abstraction; `trait Table`
-| `tables.rs`         | All the table definitions used by `cuprate-database`
-| `transaction.rs`    | Database transaction abstraction; `trait TxR{o,w}`
-| `types.rs`          | Database table schema types
+| File                   | Purpose |
+|------------------------|---------|
+| `constants.rs`         | General constants used throughout `cuprate-database`
+| `database.rs`          | Abstracted database; `trait DatabaseR{o,w}`
+| `env.rs`               | Abstracted database environment; `trait Env`
+| `error.rs`             | Database error types
+| `free.rs`              | General free functions (related to the database)
+| `key.rs`               | Abstracted database keys; `trait Key`
+| `resize.rs`            | Database resizing algorithms
+| `storable.rs`          | Data (de)serialization; `trait Storable`
+| `table.rs`             | Database table abstraction; `trait Table`
+| `tables.rs`            | All the table definitions used by `cuprate-database`
+| `tests.rs`             | Utilities for `cuprate_database` testing
+| `transaction.rs`       | Database transaction abstraction; `trait TxR{o,w}`
+| `types.rs`             | Database-specific types
+| `unsafe_unsendable.rs` | Marker type to impl `Send` for objects not `Send`
 
-## `src/ops/`
-This folder contains the `cupate_database::ops` module.
-
-TODO: more detailed descriptions.
-
-| File            | Purpose |
-|-----------------|---------|
-| `alt_block.rs`  | Alternative blocks
-| `block.rs`      | Blocks
-| `blockchain.rs` | Blockchain-related
-| `output.rs`     | Outputs
-| `property.rs`   | Properties
-| `spent_key.rs`  | Spent keys
-| `tx.rs`         | Transactions
-
-## `src/service/`
-This folder contains the `cupate_database::service` module.
-
-| File           | Purpose |
-|----------------|---------|
-| `free.rs`      | General free functions used (related to `cuprate_database::service`)
-| `read.rs`      | Read thread-pool definitions and logic
-| `request.rs`   | Read/write `Request`s to the database
-| `response.rs`  | Read/write `Response`'s from the database
-| `tests.rs`     | Thread-pool tests and test helper functions
-| `write.rs`     | Write thread-pool definitions and logic
-
-## `src/backend/`
-This folder contains the actual database crates used as the backend for `cuprate-database`.
+### 2.2 `src/backend/`
+This folder contains the implementation for actual databases used as the backend for `cuprate-database`.
 
 Each backend has its own folder.
 
-| Folder       | Purpose |
-|--------------|---------|
-| `heed/`      | Backend using using forked [`heed`](https://github.com/Cuprate/heed)
-| `sanakirja/` | Backend using [`sanakirja`](https://docs.rs/sanakirja)
+| Folder/File | Purpose |
+|-------------|---------|
+| `heed/`     | Backend using using [`heed`](https://github.com/meilisearch/heed) (LMDB)
+| `redb/`     | Backend using [`redb`](https://github.com/cberner/redb)
+| `tests.rs`  | Backend-agnostic tests
 
 All backends follow the same file structure:
 
@@ -128,19 +118,53 @@ All backends follow the same file structure:
 | `env.rs`         | Implementation of `trait Env`
 | `error.rs`       | Implementation of backend's errors to `cuprate_database`'s error types
 | `storable.rs`    | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization
-| `tests.rs`       | Tests for the specific backend
 | `transaction.rs` | Implementation of `trait TxR{o,w}`
 | `types.rs`       | Type aliases for long backend-specific types
 
-# Backends
-`cuprate-database`'s `trait`s abstract over various actual databases.
+### 2.3 `src/config/`
+This folder contains the `cupate_database::config` module; configuration options for the database.
 
-Each database's implementation is located in its respective file in `src/backend/${DATABASE_NAME}.rs`.
+| File                | Purpose |
+|---------------------|---------|
+| `config.rs`         | Main database `Config` struct
+| `reader_threads.rs` | Reader thread configuration for `service` thread-pool
+| `sync_mode.rs`      | Disk sync configuration for backends
 
-## `heed`
-The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB).
+### 2.4 `src/ops/`
+This folder contains the `cupate_database::ops` module.
 
-`LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically.
+These are higher-level functions abstracted over the database, that are Monero-related.
+
+| File            | Purpose |
+|-----------------|---------|
+| `block.rs`      | Block related (main functions)
+| `blockchain.rs` | Blockchain related (height, cumulative values, etc)
+| `key_image.rs`  | Key image related
+| `macros.rs`     | Macros specific to `ops/`
+| `output.rs`     | Output related
+| `property.rs`   | Database properties (pruned, version, etc)
+| `tx.rs`         | Transaction related
+
+### 2.5 `src/service/`
+This folder contains the `cupate_database::service` module.
+
+The `async`hronous request/response API other Cuprate crates use instead of managing the database directly themselves.
+
+| File           | Purpose |
+|----------------|---------|
+| `free.rs`      | General free functions used (related to `cuprate_database::service`)
+| `read.rs`      | Read thread-pool definitions and logic
+| `tests.rs`     | Thread-pool tests and test helper functions
+| `types.rs`     | `cuprate_database::service`-related type aliases
+| `write.rs`     | Writer thread definitions and logic
+
+## 3. Backends
+`cuprate-database`'s `trait`s allow abstracting over the actual database, such that any backend in particular could be used.
+
+Each database's implementation for those `trait`'s are located in its respective folder in `src/backend/${DATABASE_NAME}/`.
+
+### 3.1 heed
+The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB). The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used. `LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically.
 
 `heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are:
 
@@ -149,11 +173,11 @@ The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB
 | `data.mdb` | Main data file
 | `lock.mdb` | Database lock file
 
-TODO: document max readers limit: https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372. Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for.
+`heed`-specific notes:
+- [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for
+- [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129)
 
-TODO: document DB on remote filesystem: https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129.
-
-## `redb`
+### 3.2 redb
 The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb).
 
 The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used.
@@ -164,45 +188,411 @@ The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used
 |-------------|---------|
 | `data.redb` | Main data file
 
-TODO: document DB on remote filesystem (does redb allow this?)
+<!-- TODO: document DB on remote filesystem (does redb allow this?) -->
 
-## `redb-memory`
-This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a key-value store that completely resides in memory instead of a file.
+### 3.3 redb-memory
+This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a database that completely resides in memory instead of a file.
 
 All other details about this should be the same as the normal `redb` backend.
 
-## `sanakirja`
+### 3.4 sanakirja
 [`sanakirja`](https://docs.rs/sanakirja) was a candidate as a backend, however there were problems with maximum value sizes.
 
 The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.Storable.html) which was too small for our requirements. Using [`sanakirja::Slice`](https://docs.rs/sanakirja/1.4.1/sanakirja/union.Slice.html) and [sanakirja::UnsizedStorage](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.UnsizedStorable.html) was attempted, but there were bugs found when inserting a value in-between `512..=4096` bytes.
 
 As such, it is not implemented.
 
-## `MDBX`
-[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement duplicate tables. It is also quite similar to the main backend LMDB (of which it was originally a fork of).
+### 3.5 MDBX
+[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement [`9.2 Multimap tables`](#92-multimap-tables). It is also quite similar to the main backend LMDB (of which it was originally a fork of).
 
 As such, it is not implemented (yet).
 
-# Layers
-TODO: update with accurate information when ready, update image.
+## 4. Layers
+`cuprate_database` is logically abstracted into 5 layers, with each layer being built upon the last.
 
-## Database
-## Trait
-## ConcreteEnv
-## Thread
-## Service
+Starting from the lowest:
+1. Backend
+2. Trait
+3. ConcreteEnv
+4. `ops`
+5. `service`
 
-# Resizing
-TODO: document resize algorithm:
-- Exactly when it occurs
-- How much bytes are added
+<!-- TODO: insert image here after database/ split -->
 
-All backends follow the same algorithm.
+### 4.1 Backend
+This is the actual database backend implementation (or a Rust shim over one).
 
-# Flushing
-TODO: document disk flushing behavior.
-- Config options
-- Backend-specific behavior
+Examples:
+- `heed` (LMDB)
+- `redb`
 
-# (De)serialization
-TODO: document `Storable` and how databases (de)serialize types when storing/fetching.
+`cuprate_database` itself just uses a backend, it does not implement one.
+
+All backends have the following attributes:
+- [Embedded](https://en.wikipedia.org/wiki/Embedded_database)
+- [Multiversion concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control)
+- [ACID](https://en.wikipedia.org/wiki/ACID)
+- Are `(key, value)` oriented and have the expected API (`get()`, `insert()`, `delete()`)
+- Are table oriented (`"table_name" -> (key, value)`)
+- Allows concurrent readers
+
+### 4.2 Trait
+`cuprate_database` provides a set of `trait`s that abstract over the various database backends.
+
+This allows the function signatures and behavior to stay the same but allows for swapping out databases in an easier fashion.
+
+All common behavior of the backend's are encapsulated here and used instead of using the backend directly.
+
+Examples:
+- [`trait Env`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/env.rs)
+- [`trait {TxRo, TxRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/transaction.rs)
+- [`trait {DatabaseRo, DatabaseRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/database.rs)
+
+For example, instead of calling `LMDB` or `redb`'s `get()` function directly, `DatabaseRo::get()` is called.
+
+### 4.3 ConcreteEnv
+This is the non-generic, concrete `struct` provided by `cuprate_database` that contains all the data necessary to operate the database. The actual database backend `ConcreteEnv` will use internally depends on which backend feature is used.
+
+`ConcreteEnv` implements `trait Env`, which opens the door to all the other traits.
+
+The equivalent objects in the backends themselves are:
+- [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html)
+- [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html)
+
+This is the main object used when handling the database directly, although that is not strictly necessary as a user if the [`4.5 service`](#45-service) layer is used.
+
+### 4.4 ops
+These are Monero-specific functions that use the abstracted `trait` forms of the database.
+
+Instead of dealing with the database directly:
+- `get()`
+- `delete()`
+
+the `ops` layer provides more abstract functions that deal with commonly used Monero operations:
+- `add_block()`
+- `pop_block()`
+
+### 4.5 service
+The final layer abstracts the database completely into a [Monero-specific `async` request/response API](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/types/src/service.rs#L18-L78) using [`tower::Service`](https://docs.rs/tower/latest/tower/trait.Service.html).
+
+For more information on this layer, see the next section: [`5. The service`](#5-the-service).
+
+## 5. The service
+The main API `cuprate_database` exposes for other crates to use is the `cuprate_database::service` module.
+
+This module exposes an `async` request/response API with `tower::Service`, backed by a threadpool, that allows reading/writing Monero-related data from/to the database.
+
+`cuprate_database::service` itself manages the database using a separate writer thread & reader thread-pool, and uses the previously mentioned [`4.4 ops`](#44-ops) functions when responding to requests.
+
+### 5.1 Initialization
+The service is started simply by calling: [`cuprate_database::service::init()`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/service/free.rs#L23).
+
+This function initializes the database, spawns threads, and returns a:
+- Read handle to the database (cloneable)
+- Write handle to the database (not cloneable)
+
+These "handles" implement the `tower::Service` trait, which allows sending requests and receiving responses `async`hronously.
+
+### 5.2 Requests
+Along with the 2 handles, there are 2 types of requests:
+- [`ReadRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L23-L90)
+- [`WriteRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L93-L105)
+
+`ReadRequest` is for retrieving various types of information from the database.
+
+`WriteRequest` currently only has 1 variant: to write a block to the database.
+
+### 5.3 Responses
+After sending one of the above requests using the read/write handle, the value returned is _not_ the response, yet an `async`hronous channel that will eventually return the response:
+```rust,ignore
+// Send a request.
+//                                   tower::Service::call()
+//                                          V
+let response_channel: Channel = read_handle.call(ReadResponse::ChainHeight)?;
+
+// Await the response.
+let response: ReadResponse = response_channel.await?;
+
+// Assert the response is what we expected.
+assert_eq!(matches!(response), Response::ChainHeight(_));
+```
+
+After `await`ing the returned channel, a `Response` will eventually be returned when the `service` threadpool has fetched the value from the database and sent it off.
+
+Both read/write requests variants match in name with `Response` variants, i.e.
+- `ReadRequest::ChainHeight` leads to `Response::ChainHeight`
+- `WriteRequest::WriteBlock` leads to `Response::WriteBlockOk`
+
+### 5.4 Thread model
+As mentioned in the [`4. Layers`](#4-layers) section, the base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread.
+
+However, the `cuprate_database::service` API, _does_ have a thread model backing it.
+
+When [`cuprate_database::service`'s initialization function](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33-L44) is called, threads will be spawned and maintained until the user drops (disconnects) the returned handles.
+
+The current behavior for thread count is:
+- [1 writer thread](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/write.rs#L52-L66)
+- [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L104-L126)
+
+For example, on a system with 32-threads, `cuprate_database` will spawn:
+- 1 writer thread
+- 32 reader threads
+
+whose sole responsibility is to listen for database requests, access the database (potentially in parallel), and return a response.
+
+Note that the `1 system thread = 1 reader thread` model is only the default setting, the reader thread count can be configured by the user to be any number between `1 .. amount_of_system_threads`.
+
+The reader threads are managed by [`rayon`](https://docs.rs/rayon).
+
+For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_database` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L490-L503).
+
+### 5.5 Shutdown
+Once the read/write handles are `Drop`ed, the backing thread(pool) will gracefully exit, automatically.
+
+Note the writer thread and reader threadpool aren't connected whatsoever; dropping the write handle will make the writer thread exit, however, the reader handle is free to be held onto and can be continued to be read from - and vice-versa for the write handle.
+
+## 6. Syncing
+`cuprate_database`'s database has 5 disk syncing modes.
+
+1. FastThenSafe
+1. Safe
+1. Async
+1. Threshold
+1. Fast
+
+The default mode is `Safe`.
+
+This means that upon each transaction commit, all the data that was written will be fully synced to disk. This is the slowest, but safest mode of operation.
+
+Note that upon any database `Drop`, whether via `service` or dropping the database directly, the current implementation will sync to disk regardless of any configuration.
+
+For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144).
+
+## 7. Resizing
+Database backends that require manually resizing will, by default, use a similar algorithm as `monerod`'s.
+
+Note that this only relates to the `service` module, where the database is handled by `cuprate_database` itself, not the user. In the case of a user directly using `cuprate_database`, it is up to them on how to resize.
+
+Within `service`, the resizing logic defined [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/service/write.rs#L139-L201) does the following:
+
+- If there's not enough space to fit a write request's data, start a resize
+- Each resize adds around [`1_073_745_920`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) bytes to the current map size
+- A resize will be attempted `3` times before failing
+
+There are other [resizing algorithms](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L38-L47) that define how the database's memory map grows, although currently the behavior of [`monerod`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) is closely followed.
+
+## 8. (De)serialization
+All types stored inside the database are either bytes already, or are perfectly bitcast-able.
+
+As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions.
+
+The size & layout of types is stable across compiler versions, as they are set and determined with [`#[repr(C)]`](https://doc.rust-lang.org/nomicon/other-reprs.html#reprc) and `bytemuck`'s derive macros such as [`bytemuck::Pod`](https://docs.rs/bytemuck/latest/bytemuck/derive.Pod.html).
+
+Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type.
+
+The main deserialization `trait` for database storage is: [`cuprate_database::Storable`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L16-L115).
+
+- Before storage, the type is [simply cast into bytes](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L125)
+- When fetching, the bytes are [simply cast into the type](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L130)
+
+When a type is casted into bytes, [the reference is casted](https://docs.rs/bytemuck/latest/bytemuck/fn.bytes_of.html), i.e. this is zero-cost serialization.
+
+However, it is worth noting that when bytes are casted into the type, [it is copied](https://docs.rs/bytemuck/latest/bytemuck/fn.pod_read_unaligned.html). This is due to byte alignment guarantee issues with both backends, see:
+- https://github.com/AltSysrq/lmdb-zero/issues/8
+- https://github.com/cberner/redb/issues/360
+
+Without this, `bytemuck` will panic with [`TargetAlignmentGreaterAndInputNotAligned`](https://docs.rs/bytemuck/latest/bytemuck/enum.PodCastError.html#variant.TargetAlignmentGreaterAndInputNotAligned) when casting.
+
+Copying the bytes fixes this problem, although it is more costly than necessary. However, in the main use-case for `cuprate_database` (the `service` module) the bytes would need to be owned regardless as the `Request/Response` API uses owned data types (`T`, `Vec<T>`, `HashMap<K, V>`, etc).
+
+Practically speaking, this means lower-level database functions that normally look like such:
+```rust
+fn get(key: &Key) -> &Value;
+```
+end up looking like this in `cuprate_database`:
+```rust
+fn get(key: &Key) -> Value;
+```
+
+Since each backend has its own (de)serialization methods, our types are wrapped in compatibility types that map our `Storable` functions into whatever is required for the backend, e.g:
+- [`StorableHeed<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/heed/storable.rs#L11-L45)
+- [`StorableRedb<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/redb/storable.rs#L11-L30)
+
+Compatibility structs also exist for any `Storable` containers:
+- [`StorableVec<T>`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L135-L191)
+- [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241)
+
+Again, it's unfortunate that these must be owned, although in `service`'s use-case, they would have to be owned anyway.
+
+## 9. Schema
+This following section contains Cuprate's database schema, it may change throughout the development of Cuprate, as such, nothing here is final.
+
+### 9.1 Tables
+The `CamelCase` names of the table headers documented here (e.g. `TxIds`) are the actual type name of the table within `cuprate_database`.
+
+Note that words written within `code blocks` mean that it is a real type defined and usable within `cuprate_database`. Other standard types like u64 and type aliases (TxId) are written normally.
+
+Within `cuprate_database::tables`, the below table is essentially defined as-is with [a macro](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/tables.rs#L369-L470).
+
+Many of the data types stored are the same data types, although are different semantically, as such, a map of aliases used and their real data types is also provided below.
+
+| Alias                                              | Real Type |
+|----------------------------------------------------|-----------|
+| BlockHeight, Amount, AmountIndex, TxId, UnlockTime | u64
+| BlockHash, KeyImage, TxHash, PrunableHash          | [u8; 32]
+
+| Table             | Key                  | Value              | Description |
+|-------------------|----------------------|--------------------|-------------|
+| `BlockBlobs`      | BlockHeight          | `StorableVec<u8>`  | Maps a block's height to a serialized byte form of a block
+| `BlockHeights`    | BlockHash            | BlockHeight        | Maps a block's hash to its height
+| `BlockInfos`      | BlockHeight          | `BlockInfo`        | Contains metadata of all blocks
+| `KeyImages`       | KeyImage             | ()                 | This table is a set with no value, it stores transaction key images
+| `NumOutputs`      | Amount               | u64                | Maps an output's amount to the number of outputs with that amount
+| `Outputs`         | `PreRctOutputId`     | `Output`           | This table contains legacy CryptoNote outputs which have clear amounts. This table will not contain an output with 0 amount.
+| `PrunedTxBlobs`   | TxId                 | `StorableVec<u8>`  | Contains pruned transaction blobs (even if the database is not pruned)
+| `PrunableTxBlobs` | TxId                 | `StorableVec<u8>`  | Contains the prunable part of a transaction
+| `PrunableHashes`  | TxId                 | PrunableHash       | Contains the hash of the prunable part of a transaction
+| `RctOutputs`      | AmountIndex          | `RctOutput`        | Contains RingCT outputs mapped from their global RCT index
+| `TxBlobs`         | TxId                 | `StorableVec<u8>`  | Serialized transaction blobs (bytes)
+| `TxIds`           | TxHash               | TxId               | Maps a transaction's hash to its index/ID
+| `TxHeights`       | TxId                 | BlockHeight        | Maps a transaction's ID to the height of the block it comes from
+| `TxOutputs`       | TxId                 | `StorableVec<u64>` | Gives the amount indices of a transaction's outputs
+| `TxUnlockTime`    | TxId                 | UnlockTime         | Stores the unlock time of a transaction (only if it has a non-zero lock time)
+
+The definitions for aliases and types (e.g. `RctOutput`) are within the [`cuprate_database::types`](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/types.rs#L51) module.
+
+<!-- TODO(Boog900): We could split this table again into `RingCT (non-miner) Outputs` and `RingCT (miner) Outputs` as for miner outputs we can store the amount instead of commitment saving 24 bytes per miner output. -->
+
+### 9.2 Multimap tables
+When referencing outputs, Monero will [use the amount and the amount index](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L3447-L3449). This means 2 keys are needed to reach an output.
+
+With LMDB you can set the `DUP_SORT` flag on a table and then set the key/value to:
+```rust
+Key = KEY_PART_1
+```
+```rust
+Value = {
+    KEY_PART_2,
+    VALUE // The actual value we are storing.
+}
+```
+
+Then you can set a custom value sorting function that only takes `KEY_PART_2` into account; this is how `monerod` does it.
+
+This requires that the underlying database supports:
+- multimap tables
+- custom sort functions on values
+- setting a cursor on a specific key/value
+
+---
+
+Another way to implement this is as follows:
+```rust
+Key = { KEY_PART_1, KEY_PART_2 }
+```
+```rust
+Value = VALUE
+```
+
+Then the key type is simply used to look up the value; this is how `cuprate_database` does it.
+
+For example, the key/value pair for outputs is:
+```rust
+PreRctOutputId => Output
+```
+where `PreRctOutputId` looks like this:
+```rust
+struct PreRctOutputId {
+    amount: u64,
+    amount_index: u64,
+}
+```
+
+## 10. Known issues and tradeoffs
+`cuprate_database` takes many tradeoffs, whether due to:
+- Prioritizing certain values over others
+- Not having a better solution
+- Being "good enough"
+
+This is a list of the larger ones, along with issues that don't have answers yet.
+
+### 10.1 Traits abstracting backends
+Although all database backends used are very similar, they have some crucial differences in small implementation details that must be worked around when conforming them to `cuprate_database`'s traits.
+
+Put simply: using `cuprate_database`'s traits is less efficient and more awkward than using the backend directly.
+
+For example:
+- [Data types must be wrapped in compatibility layers when they otherwise wouldn't be](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/backend/heed/env.rs#L101-L116)
+- [There are types that only apply to a specific backend, but are visible to all](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/error.rs#L86-L89)
+- [There are extra layers of abstraction to smoothen the differences between all backends](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/env.rs#L62-L68)
+- [Existing functionality of backends must be taken away, as it isn't supported in the others](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/database.rs#L27-L34)
+
+This is a _tradeoff_ that `cuprate_database` takes, as:
+- The backend itself is usually not the source of bottlenecks in the greater system, as such, small inefficiencies are OK
+- None of the lost functionality is crucial for operation
+- The ability to use, test, and swap between multiple database backends is [worth it](https://github.com/Cuprate/cuprate/pull/35#issuecomment-1952804393)
+
+### 10.2 Hot-swappable backends
+Using a different backend is really as simple as re-building `cuprate_database` with a different feature flag:
+```bash
+# Use LMDB.
+cargo build --package cuprate-database --features heed
+
+# Use redb.
+cargo build --package cuprate-database --features redb
+```
+
+This is "good enough" for now, however ideally, this hot-swapping of backends would be able to be done at _runtime_.
+
+As it is now, `cuprate_database` cannot compile both backends and swap based on user input at runtime; it must be compiled with a certain backend, which will produce a binary with only that backend.
+
+This also means things like [CI testing multiple backends is awkward](https://github.com/Cuprate/cuprate/blob/main/.github/workflows/ci.yml#L132-L136), as we must re-compile with different feature flags instead.
+
+### 10.3 Copying unaligned bytes
+As mentioned in [`8. (De)serialization`](#8-deserialization), bytes are _copied_ when they are turned into a type `T` due to unaligned bytes being returned from database backends.
+
+Using a regular reference cast results in an improperly aligned type `T`; [such a type even existing causes undefined behavior](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). In our case, `bytemuck` saves us by panicking before this occurs. 
+
+Thus, when using `cuprate_database`'s database traits, an _owned_ `T` is returned.
+
+This is doubly unfortunately for `&[u8]` as this does not even need deserialization.
+
+For example, `StorableVec` could have been this:
+```rust
+enum StorableBytes<'a, T: Storable> {
+    Owned(T),
+    Ref(&'a T),
+}
+```
+but this would require supporting types that must be copied regardless with the occasional `&[u8]` that can be returned without casting. This was hard to do so in a generic way, thus all `[u8]`'s are copied and returned as owned `StorableVec`s.
+
+This is a _tradeoff_ `cuprate_database` takes as:
+- `bytemuck::pod_read_unaligned` is cheap enough
+- The main API, `service`, needs to return owned value anyway
+- Having no references removes a lot of lifetime complexity
+
+The alternative is either:
+- Using proper (de)serialization instead of casting (which comes with its own costs)
+- Somehow fixing the alignment issues in the backends mentioned previously
+
+### 10.4 Endianness
+`cuprate_database`'s (de)serialization and storage of bytes are native-endian, as in, byte storage order will depend on the machine it is running on.
+
+As Cuprate's build-targets are all little-endian ([big-endian by default machines barely exist](https://en.wikipedia.org/wiki/Endianness#Hardware)), this doesn't matter much and the byte ordering can be seen as a constant.
+
+Practically, this means `cuprated`'s database files can be transferred across computers, as can `monerod`'s.
+
+### 10.5 Extra table data
+Some of `cuprate_database`'s tables differ from `monerod`'s tables, for example, the way [`9.2 Multimap tables`](#92-multimap-tables) tables are done requires that the primary key is stored _for all_ entries, compared to `monerod` only needing to store it once.
+
+For example:
+```rust
+// `monerod` only stores `amount: 1` once,
+// `cuprated` stores it each time it appears.
+struct PreRctOutputId { amount: 1, amount_index: 0 }
+struct PreRctOutputId { amount: 1, amount_index: 1 }
+```
+
+This means `cuprated`'s database will be slightly larger than `monerod`'s.
+
+The current method `cuprate_database` uses will be "good enough" until usage shows that it must be optimized as multimap tables are tricky to implement across all backends.
\ No newline at end of file
diff --git a/database/src/backend/heed/database.rs b/database/src/backend/heed/database.rs
index 9b3745c5..c985d0de 100644
--- a/database/src/backend/heed/database.rs
+++ b/database/src/backend/heed/database.rs
@@ -1,16 +1,10 @@
 //! Implementation of `trait Database` for `heed`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{
-    borrow::{Borrow, Cow},
-    cell::RefCell,
-    fmt::Debug,
-    ops::RangeBounds,
-    sync::RwLockReadGuard,
-};
+use std::{cell::RefCell, ops::RangeBounds};
 
 use crate::{
-    backend::heed::{storable::StorableHeed, types::HeedDb},
+    backend::heed::types::HeedDb,
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
     error::RuntimeError,
     table::Table,
@@ -137,7 +131,8 @@ impl<T: Table> DatabaseIter<T> for HeedTableRo<'_, T> {
 }
 
 //---------------------------------------------------------------------------------------------------- DatabaseRo Impl
-impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
+// SAFETY: `HeedTableRo: !Send` as it holds a reference to `heed::RoTxn: Send + !Sync`.
+unsafe impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
     #[inline]
     fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
         get::<T>(&self.db, self.tx_ro, key)
@@ -165,7 +160,9 @@ impl<T: Table> DatabaseRo<T> for HeedTableRo<'_, T> {
 }
 
 //---------------------------------------------------------------------------------------------------- DatabaseRw Impl
-impl<T: Table> DatabaseRo<T> for HeedTableRw<'_, '_, T> {
+// SAFETY: The `Send` bound only applies to `HeedTableRo`.
+// `HeedTableRw`'s write transaction is `!Send`.
+unsafe impl<T: Table> DatabaseRo<T> for HeedTableRw<'_, '_, T> {
     #[inline]
     fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
         get::<T>(&self.db, &self.tx_rw.borrow(), key)
@@ -204,55 +201,56 @@ impl<T: Table> DatabaseRw<T> for HeedTableRw<'_, '_, T> {
         Ok(())
     }
 
+    #[inline]
+    fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> {
+        // LMDB/heed does not return the value on deletion.
+        // So, fetch it first - then delete.
+        let value = get::<T>(&self.db, &self.tx_rw.borrow(), key)?;
+        match self.db.delete(&mut self.tx_rw.borrow_mut(), key) {
+            Ok(true) => Ok(value),
+            Err(e) => Err(e.into()),
+            // We just `get()`'ed the value - it is
+            // incorrect for it to suddenly not exist.
+            Ok(false) => unreachable!(),
+        }
+    }
+
     #[inline]
     fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
         let tx_rw = &mut self.tx_rw.borrow_mut();
 
-        // Get the first value first...
-        let Some(first) = self.db.first(tx_rw)? else {
+        // Get the value first...
+        let Some((key, value)) = self.db.first(tx_rw)? else {
             return Err(RuntimeError::KeyNotFound);
         };
 
         // ...then remove it.
-        //
-        // We use an iterator because we want to semantically
-        // remove the _first_ and only the first `(key, value)`.
-        // `delete()` removes all keys including duplicates which
-        // is slightly different behavior.
-        let mut iter = self.db.iter_mut(tx_rw)?;
-
-        // SAFETY:
-        // It is undefined behavior to keep a reference of
-        // a value from this database while modifying it.
-        // We are deleting the value and never accessing
-        // the iterator again so this should be safe.
-        unsafe {
-            iter.del_current()?;
+        match self.db.delete(tx_rw, &key) {
+            Ok(true) => Ok((key, value)),
+            Err(e) => Err(e.into()),
+            // We just `get()`'ed the value - it is
+            // incorrect for it to suddenly not exist.
+            Ok(false) => unreachable!(),
         }
-
-        Ok(first)
     }
 
     #[inline]
     fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
         let tx_rw = &mut self.tx_rw.borrow_mut();
 
-        let Some(first) = self.db.last(tx_rw)? else {
+        // Get the value first...
+        let Some((key, value)) = self.db.last(tx_rw)? else {
             return Err(RuntimeError::KeyNotFound);
         };
 
-        let mut iter = self.db.rev_iter_mut(tx_rw)?;
-
-        // SAFETY:
-        // It is undefined behavior to keep a reference of
-        // a value from this database while modifying it.
-        // We are deleting the value and never accessing
-        // the iterator again so this should be safe.
-        unsafe {
-            iter.del_current()?;
+        // ...then remove it.
+        match self.db.delete(tx_rw, &key) {
+            Ok(true) => Ok((key, value)),
+            Err(e) => Err(e.into()),
+            // We just `get()`'ed the value - it is
+            // incorrect for it to suddenly not exist.
+            Ok(false) => unreachable!(),
         }
-
-        Ok(first)
     }
 }
 
diff --git a/database/src/backend/heed/env.rs b/database/src/backend/heed/env.rs
index d9e3fdc2..56064849 100644
--- a/database/src/backend/heed/env.rs
+++ b/database/src/backend/heed/env.rs
@@ -3,9 +3,8 @@
 //---------------------------------------------------------------------------------------------------- Import
 use std::{
     cell::RefCell,
-    fmt::Debug,
-    ops::Deref,
-    sync::{RwLock, RwLockReadGuard, RwLockWriteGuard},
+    num::NonZeroUsize,
+    sync::{RwLock, RwLockReadGuard},
 };
 
 use heed::{DatabaseOpenOptions, EnvFlags, EnvOpenOptions};
@@ -22,10 +21,11 @@ use crate::{
     error::{InitError, RuntimeError},
     resize::ResizeAlgorithm,
     table::Table,
+    tables::call_fn_on_all_tables_or_early_return,
 };
 
 //---------------------------------------------------------------------------------------------------- Consts
-/// TODO
+/// Panic message when there's a table missing.
 const PANIC_MSG_MISSING_TABLE: &str =
     "cuprate_database::Env should uphold the invariant that all tables are already created";
 
@@ -48,7 +48,7 @@ pub struct ConcreteEnv {
     /// `reader_count` would be spinned on until 0, at which point
     /// we are safe to resize.
     ///
-    /// Although, 3 atomic operations (check atomic bool, reader_count++, reader_count--)
+    /// Although, 3 atomic operations (check atomic bool, `reader_count++`, `reader_count--`)
     /// turns out to be roughly as expensive as acquiring a non-contended `RwLock`,
     /// the CPU sleeping instead of spinning is much better too.
     ///
@@ -67,7 +67,7 @@ impl Drop for ConcreteEnv {
     fn drop(&mut self) {
         // INVARIANT: drop(ConcreteEnv) must sync.
         //
-        // TODO:
+        // SOMEDAY:
         // "if the environment has the MDB_NOSYNC flag set the flushes will be omitted,
         // and with MDB_MAPASYNC they will be asynchronous."
         // <http://www.lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>
@@ -75,7 +75,7 @@ impl Drop for ConcreteEnv {
         // We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)`
         // to clear the no sync and async flags such that the below `self.sync()`
         // _actually_ synchronously syncs.
-        if let Err(e) = crate::Env::sync(self) {
+        if let Err(_e) = crate::Env::sync(self) {
             // TODO: log error?
         }
 
@@ -117,10 +117,11 @@ impl Env for ConcreteEnv {
 
     #[cold]
     #[inline(never)] // called once.
-    #[allow(clippy::items_after_statements)]
     fn open(config: Config) -> Result<Self, InitError> {
         // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
 
+        let mut env_open_options = EnvOpenOptions::new();
+
         // Map our `Config` sync mode to the LMDB environment flags.
         //
         // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
@@ -128,11 +129,21 @@ impl Env for ConcreteEnv {
             SyncMode::Safe => EnvFlags::empty(),
             SyncMode::Async => EnvFlags::MAP_ASYNC,
             SyncMode::Fast => EnvFlags::NO_SYNC | EnvFlags::WRITE_MAP | EnvFlags::MAP_ASYNC,
-            // TODO: dynamic syncs are not implemented.
+            // SOMEDAY: dynamic syncs are not implemented.
             SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
         };
 
-        let mut env_open_options = EnvOpenOptions::new();
+        // SAFETY: the flags we're setting are 'unsafe'
+        // from a data durability perspective, although,
+        // the user config wanted this.
+        //
+        // MAYBE: We may need to open/create tables with certain flags
+        // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
+        // MAYBE: Set comparison functions for certain tables
+        // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
+        unsafe {
+            env_open_options.flags(flags);
+        }
 
         // Set the memory map size to
         // (current disk size) + (a bit of leeway)
@@ -151,7 +162,7 @@ impl Env for ConcreteEnv {
 
         // Set the max amount of database tables.
         // We know at compile time how many tables there are.
-        // TODO: ...how many?
+        // SOMEDAY: ...how many?
         env_open_options.max_dbs(32);
 
         // LMDB documentation:
@@ -166,38 +177,33 @@ impl Env for ConcreteEnv {
         // - Use at least 126 reader threads
         // - Add 16 extra reader threads if <126
         //
-        // TODO: This behavior is from `monerod`:
+        // FIXME: This behavior is from `monerod`:
         // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
         // I believe this could be adjusted percentage-wise so very high
         // thread PCs can benefit from something like (cuprated + anything that uses the DB in the future).
         // For now:
         // - No other program using our DB exists
         // - Almost no-one has a 126+ thread CPU
-        #[allow(clippy::cast_possible_truncation)] // no-one has `u32::MAX`+ threads
-        let reader_threads = config.reader_threads.as_threads().get() as u32;
+        let reader_threads =
+            u32::try_from(config.reader_threads.as_threads().get()).unwrap_or(u32::MAX);
         env_open_options.max_readers(if reader_threads < 110 {
             126
         } else {
-            reader_threads + 16
+            reader_threads.saturating_add(16)
         });
 
         // Create the database directory if it doesn't exist.
         std::fs::create_dir_all(config.db_directory())?;
         // Open the environment in the user's PATH.
-        let env = env_open_options.open(config.db_directory())?;
-
-        // TODO: Open/create tables with certain flags
-        // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
-        // `heed` creates the database if it didn't exist.
-        // <https://docs.rs/heed/0.20.0-alpha.9/src/heed/env.rs.html#223-229>
+        // SAFETY: LMDB uses a memory-map backed file.
+        // <https://docs.rs/heed/0.20.0/heed/struct.EnvOpenOptions.html#method.open>
+        let env = unsafe { env_open_options.open(config.db_directory())? };
 
         /// Function that creates the tables based off the passed `T: Table`.
         fn create_table<T: Table>(
             env: &heed::Env,
             tx_rw: &mut heed::RwTxn<'_>,
         ) -> Result<(), InitError> {
-            println!("create_table(): {}", T::NAME); // TODO: use tracing.
-
             DatabaseOpenOptions::new(env)
                 .name(<T as Table>::NAME)
                 .types::<StorableHeed<<T as Table>::Key>, StorableHeed<<T as Table>::Value>>()
@@ -205,31 +211,17 @@ impl Env for ConcreteEnv {
             Ok(())
         }
 
-        use crate::tables::{
-            BlockBlobs, BlockHeights, BlockInfoV1s, BlockInfoV2s, BlockInfoV3s, KeyImages,
-            NumOutputs, Outputs, PrunableHashes, PrunableTxBlobs, PrunedTxBlobs, RctOutputs,
-            TxHeights, TxIds, TxUnlockTime,
-        };
-
         let mut tx_rw = env.write_txn()?;
-        create_table::<BlockBlobs>(&env, &mut tx_rw)?;
-        create_table::<BlockHeights>(&env, &mut tx_rw)?;
-        create_table::<BlockInfoV1s>(&env, &mut tx_rw)?;
-        create_table::<BlockInfoV2s>(&env, &mut tx_rw)?;
-        create_table::<BlockInfoV3s>(&env, &mut tx_rw)?;
-        create_table::<KeyImages>(&env, &mut tx_rw)?;
-        create_table::<NumOutputs>(&env, &mut tx_rw)?;
-        create_table::<Outputs>(&env, &mut tx_rw)?;
-        create_table::<PrunableHashes>(&env, &mut tx_rw)?;
-        create_table::<PrunableTxBlobs>(&env, &mut tx_rw)?;
-        create_table::<PrunedTxBlobs>(&env, &mut tx_rw)?;
-        create_table::<RctOutputs>(&env, &mut tx_rw)?;
-        create_table::<TxHeights>(&env, &mut tx_rw)?;
-        create_table::<TxIds>(&env, &mut tx_rw)?;
-        create_table::<TxUnlockTime>(&env, &mut tx_rw)?;
-
-        // TODO: Set dupsort and comparison functions for certain tables
-        // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1324>
+        // Create all tables.
+        // FIXME: this macro is kinda awkward.
+        {
+            let env = &env;
+            let tx_rw = &mut tx_rw;
+            match call_fn_on_all_tables_or_early_return!(create_table(env, tx_rw)) {
+                Ok(_) => (),
+                Err(e) => return Err(e),
+            }
+        }
 
         // INVARIANT: this should never return `ResizeNeeded` due to adding
         // some tables since we added some leeway to the memory map above.
@@ -249,11 +241,11 @@ impl Env for ConcreteEnv {
         Ok(self.env.read().unwrap().force_sync()?)
     }
 
-    fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) {
+    fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) -> NonZeroUsize {
         let resize_algorithm = resize_algorithm.unwrap_or_else(|| self.config().resize_algorithm);
 
         let current_size_bytes = self.current_map_size();
-        let new_size_bytes = resize_algorithm.resize(current_size_bytes).get();
+        let new_size_bytes = resize_algorithm.resize(current_size_bytes);
 
         // SAFETY:
         // Resizing requires that we have
@@ -264,8 +256,14 @@ impl Env for ConcreteEnv {
         // <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
         unsafe {
             // INVARIANT: `resize()` returns a valid `usize` to resize to.
-            self.env.write().unwrap().resize(new_size_bytes).unwrap();
+            self.env
+                .write()
+                .unwrap()
+                .resize(new_size_bytes.get())
+                .unwrap();
         }
+
+        new_size_bytes
     }
 
     #[inline]
diff --git a/database/src/backend/heed/error.rs b/database/src/backend/heed/error.rs
index 65b781b2..c47bd908 100644
--- a/database/src/backend/heed/error.rs
+++ b/database/src/backend/heed/error.rs
@@ -20,7 +20,6 @@ impl From<heed::Error> for crate::InitError {
             E1::Mdb(mdb_error) => match mdb_error {
                 E2::Invalid => Self::Invalid,
                 E2::VersionMismatch => Self::InvalidVersion,
-                E2::Other(c_int) => Self::Unknown(Box::new(mdb_error)),
 
                 // "Located page was wrong type".
                 // <https://docs.rs/heed/latest/heed/enum.MdbError.html#variant.Corrupted>
@@ -31,6 +30,7 @@ impl From<heed::Error> for crate::InitError {
 
                 // These errors shouldn't be returned on database init.
                 E2::Incompatible
+                | E2::Other(_)
                 | E2::BadTxn
                 | E2::Problem
                 | E2::KeyExist
@@ -49,10 +49,9 @@ impl From<heed::Error> for crate::InitError {
                 | E2::Panic => Self::Unknown(Box::new(mdb_error)),
             },
 
-            E1::InvalidDatabaseTyping
-            | E1::BadOpenOptions { .. }
-            | E1::Encoding(_)
-            | E1::Decoding(_) => Self::Unknown(Box::new(error)),
+            E1::BadOpenOptions { .. } | E1::Encoding(_) | E1::Decoding(_) => {
+                Self::Unknown(Box::new(error))
+            }
         }
     }
 }
@@ -109,7 +108,7 @@ impl From<heed::Error> for crate::RuntimeError {
                 // occurring indicates we did _not_ do that, which is a bug
                 // and we should panic.
                 //
-                // TODO: This can also mean _another_ process wrote to our
+                // FIXME: This can also mean _another_ process wrote to our
                 // LMDB file and increased the size. I don't think we need to accommodate for this.
                 // <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
                 // Although `monerod` reacts to that instead of `MDB_MAP_FULL`
@@ -139,11 +138,9 @@ impl From<heed::Error> for crate::RuntimeError {
             },
 
             // Only if we write incorrect code.
-            E1::InvalidDatabaseTyping
-            | E1::DatabaseClosing
-            | E1::BadOpenOptions { .. }
-            | E1::Encoding(_)
-            | E1::Decoding(_) => panic!("fix the database code! {error:#?}"),
+            E1::DatabaseClosing | E1::BadOpenOptions { .. } | E1::Encoding(_) | E1::Decoding(_) => {
+                panic!("fix the database code! {error:#?}")
+            }
         }
     }
 }
diff --git a/database/src/backend/heed/storable.rs b/database/src/backend/heed/storable.rs
index 0d180c29..83442212 100644
--- a/database/src/backend/heed/storable.rs
+++ b/database/src/backend/heed/storable.rs
@@ -1,11 +1,11 @@
 //! `cuprate_database::Storable` <-> `heed` serde trait compatibility layer.
 
 //---------------------------------------------------------------------------------------------------- Use
-use std::{borrow::Cow, fmt::Debug, marker::PhantomData};
+use std::{borrow::Cow, marker::PhantomData};
 
-use heed::{types::Bytes, BoxedError, BytesDecode, BytesEncode, Database};
+use heed::{BoxedError, BytesDecode, BytesEncode};
 
-use crate::{storable::Storable, storable::StorableVec};
+use crate::storable::Storable;
 
 //---------------------------------------------------------------------------------------------------- StorableHeed
 /// The glue struct that implements `heed`'s (de)serialization
@@ -47,6 +47,8 @@ where
 //---------------------------------------------------------------------------------------------------- Tests
 #[cfg(test)]
 mod test {
+    use std::fmt::Debug;
+
     use super::*;
     use crate::{StorableBytes, StorableVec};
 
diff --git a/database/src/backend/heed/transaction.rs b/database/src/backend/heed/transaction.rs
index 43096980..d32f3707 100644
--- a/database/src/backend/heed/transaction.rs
+++ b/database/src/backend/heed/transaction.rs
@@ -1,6 +1,6 @@
 //! Implementation of `trait TxRo/TxRw` for `heed`.
 
-use std::{cell::RefCell, ops::Deref, sync::RwLockReadGuard};
+use std::cell::RefCell;
 
 //---------------------------------------------------------------------------------------------------- Import
 use crate::{
diff --git a/database/src/backend/mod.rs b/database/src/backend/mod.rs
index 2d6800e1..11ae40b8 100644
--- a/database/src/backend/mod.rs
+++ b/database/src/backend/mod.rs
@@ -1,13 +1,4 @@
 //! Database backends.
-//!
-//! TODO:
-//! Create a test backend backed by `std::collections::HashMap`.
-//!
-//! The full type could be something like `HashMap<&'static str, HashMap<K, V>>`.
-//! where the `str` is the table name, and the containing hashmap are are the
-//! key and values.
-//!
-//! Not sure how duplicate keys will work.
 
 cfg_if::cfg_if! {
     // If both backends are enabled, fallback to `heed`.
diff --git a/database/src/backend/redb/database.rs b/database/src/backend/redb/database.rs
index 43f1a3ac..cd9a0be9 100644
--- a/database/src/backend/redb/database.rs
+++ b/database/src/backend/redb/database.rs
@@ -1,12 +1,7 @@
 //! Implementation of `trait DatabaseR{o,w}` for `redb`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{
-    borrow::{Borrow, Cow},
-    fmt::Debug,
-    marker::PhantomData,
-    ops::{Bound, Deref, RangeBounds},
-};
+use std::ops::RangeBounds;
 
 use redb::ReadableTable;
 
@@ -17,7 +12,6 @@ use crate::{
     },
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
     error::RuntimeError,
-    storable::Storable,
     table::Table,
 };
 
@@ -118,7 +112,8 @@ impl<T: Table + 'static> DatabaseIter<T> for RedbTableRo<T::Key, T::Value> {
 }
 
 //---------------------------------------------------------------------------------------------------- DatabaseRo
-impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
+// SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
+unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
     #[inline]
     fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
         get::<T>(self, key)
@@ -146,7 +141,8 @@ impl<T: Table + 'static> DatabaseRo<T> for RedbTableRo<T::Key, T::Value> {
 }
 
 //---------------------------------------------------------------------------------------------------- DatabaseRw
-impl<T: Table + 'static> DatabaseRo<T> for RedbTableRw<'_, T::Key, T::Value> {
+// SAFETY: Both `redb`'s transaction and table types are `Send + Sync`.
+unsafe impl<T: Table + 'static> DatabaseRo<T> for RedbTableRw<'_, T::Key, T::Value> {
     #[inline]
     fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError> {
         get::<T>(self, key)
@@ -188,6 +184,15 @@ impl<T: Table + 'static> DatabaseRw<T> for RedbTableRw<'_, T::Key, T::Value> {
         Ok(())
     }
 
+    #[inline]
+    fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError> {
+        if let Some(value) = redb::Table::remove(self, key)? {
+            Ok(value.value())
+        } else {
+            Err(RuntimeError::KeyNotFound)
+        }
+    }
+
     #[inline]
     fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> {
         let (key, value) = redb::Table::pop_first(self)?.ok_or(RuntimeError::KeyNotFound)?;
diff --git a/database/src/backend/redb/env.rs b/database/src/backend/redb/env.rs
index 6525de62..e552d454 100644
--- a/database/src/backend/redb/env.rs
+++ b/database/src/backend/redb/env.rs
@@ -1,18 +1,14 @@
 //! Implementation of `trait Env` for `redb`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{fmt::Debug, ops::Deref, path::Path, sync::Arc};
-
 use crate::{
-    backend::redb::{
-        storable::StorableRedb,
-        types::{RedbTableRo, RedbTableRw},
-    },
+    backend::redb::storable::StorableRedb,
     config::{Config, SyncMode},
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
     env::{Env, EnvInner},
     error::{InitError, RuntimeError},
     table::Table,
+    tables::call_fn_on_all_tables_or_early_return,
     TxRw,
 };
 
@@ -36,7 +32,8 @@ impl Drop for ConcreteEnv {
     fn drop(&mut self) {
         // INVARIANT: drop(ConcreteEnv) must sync.
         if let Err(e) = self.sync() {
-            // TODO: log error?
+            // TODO: use tracing
+            println!("{e:#?}");
         }
 
         // TODO: log that we are dropping the database.
@@ -53,23 +50,22 @@ impl Env for ConcreteEnv {
 
     #[cold]
     #[inline(never)] // called once.
-    #[allow(clippy::items_after_statements)]
     fn open(config: Config) -> Result<Self, InitError> {
-        // TODO: dynamic syncs are not implemented.
+        // SOMEDAY: dynamic syncs are not implemented.
         let durability = match config.sync_mode {
-            // TODO: There's also `redb::Durability::Paranoid`:
+            // FIXME: There's also `redb::Durability::Paranoid`:
             // <https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Paranoid>
             // should we use that instead of Immediate?
             SyncMode::Safe => redb::Durability::Immediate,
             SyncMode::Async => redb::Durability::Eventual,
             SyncMode::Fast => redb::Durability::None,
-            // TODO: dynamic syncs are not implemented.
+            // SOMEDAY: dynamic syncs are not implemented.
             SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(),
         };
 
         let env_builder = redb::Builder::new();
 
-        // TODO: we can set cache sizes with:
+        // FIXME: we can set cache sizes with:
         // env_builder.set_cache(bytes);
 
         // Use the in-memory backend if the feature is enabled.
@@ -84,6 +80,7 @@ impl Env for ConcreteEnv {
                 .read(true)
                 .write(true)
                 .create(true)
+                .truncate(false)
                 .open(config.db_file())?;
 
             env_builder.create_file(db_file)?
@@ -95,8 +92,6 @@ impl Env for ConcreteEnv {
 
         /// Function that creates the tables based off the passed `T: Table`.
         fn create_table<T: Table>(tx_rw: &redb::WriteTransaction) -> Result<(), InitError> {
-            println!("create_table(): {}", T::NAME); // TODO: use tracing.
-
             let table: redb::TableDefinition<
                 'static,
                 StorableRedb<<T as Table>::Key>,
@@ -108,32 +103,20 @@ impl Env for ConcreteEnv {
             Ok(())
         }
 
-        use crate::tables::{
-            BlockBlobs, BlockHeights, BlockInfoV1s, BlockInfoV2s, BlockInfoV3s, KeyImages,
-            NumOutputs, Outputs, PrunableHashes, PrunableTxBlobs, PrunedTxBlobs, RctOutputs,
-            TxHeights, TxIds, TxUnlockTime,
-        };
-
-        let tx_rw = env.begin_write()?;
-        create_table::<BlockBlobs>(&tx_rw)?;
-        create_table::<BlockHeights>(&tx_rw)?;
-        create_table::<BlockInfoV1s>(&tx_rw)?;
-        create_table::<BlockInfoV2s>(&tx_rw)?;
-        create_table::<BlockInfoV3s>(&tx_rw)?;
-        create_table::<KeyImages>(&tx_rw)?;
-        create_table::<NumOutputs>(&tx_rw)?;
-        create_table::<Outputs>(&tx_rw)?;
-        create_table::<PrunableHashes>(&tx_rw)?;
-        create_table::<PrunableTxBlobs>(&tx_rw)?;
-        create_table::<PrunedTxBlobs>(&tx_rw)?;
-        create_table::<RctOutputs>(&tx_rw)?;
-        create_table::<TxHeights>(&tx_rw)?;
-        create_table::<TxIds>(&tx_rw)?;
-        create_table::<TxUnlockTime>(&tx_rw)?;
+        // Create all tables.
+        // FIXME: this macro is kinda awkward.
+        let mut tx_rw = env.begin_write()?;
+        {
+            let tx_rw = &mut tx_rw;
+            match call_fn_on_all_tables_or_early_return!(create_table(tx_rw)) {
+                Ok(_) => (),
+                Err(e) => return Err(e),
+            }
+        }
         tx_rw.commit()?;
 
         // Check for file integrity.
-        // TODO: should we do this? is it slow?
+        // FIXME: should we do this? is it slow?
         env.check_integrity()?;
 
         Ok(Self {
diff --git a/database/src/backend/redb/error.rs b/database/src/backend/redb/error.rs
index 28cd6b7e..4d40dbd9 100644
--- a/database/src/backend/redb/error.rs
+++ b/database/src/backend/redb/error.rs
@@ -45,7 +45,7 @@ impl From<redb::StorageError> for InitError {
 
         match error {
             E::Io(e) => Self::Io(e),
-            E::Corrupted(s) => Self::Corrupt,
+            E::Corrupted(_) => Self::Corrupt,
             // HACK: Handle new errors as `redb` adds them.
             _ => Self::Unknown(Box::new(error)),
         }
@@ -56,8 +56,6 @@ impl From<redb::TransactionError> for InitError {
     /// Created by `redb` in:
     /// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write)
     fn from(error: redb::TransactionError) -> Self {
-        use redb::StorageError as E;
-
         match error {
             redb::TransactionError::Storage(error) => error.into(),
             // HACK: Handle new errors as `redb` adds them.
@@ -70,7 +68,6 @@ impl From<redb::TableError> for InitError {
     /// Created by `redb` in:
     /// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table)
     fn from(error: redb::TableError) -> Self {
-        use redb::StorageError as E2;
         use redb::TableError as E;
 
         match error {
@@ -85,8 +82,6 @@ impl From<redb::CommitError> for InitError {
     /// Created by `redb` in:
     /// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit)
     fn from(error: redb::CommitError) -> Self {
-        use redb::StorageError as E;
-
         match error {
             redb::CommitError::Storage(error) => error.into(),
             // HACK: Handle new errors as `redb` adds them.
@@ -102,8 +97,6 @@ impl From<redb::TransactionError> for RuntimeError {
     /// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write)
     /// - [`redb::Database::begin_read`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_read)
     fn from(error: redb::TransactionError) -> Self {
-        use redb::StorageError as E;
-
         match error {
             redb::TransactionError::Storage(error) => error.into(),
 
@@ -118,8 +111,6 @@ impl From<redb::CommitError> for RuntimeError {
     /// Created by `redb` in:
     /// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit)
     fn from(error: redb::CommitError) -> Self {
-        use redb::StorageError as E;
-
         match error {
             redb::CommitError::Storage(error) => error.into(),
 
@@ -135,7 +126,6 @@ impl From<redb::TableError> for RuntimeError {
     /// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table)
     /// - [`redb::ReadTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.ReadTransaction.html#method.open_table)
     fn from(error: redb::TableError) -> Self {
-        use redb::StorageError as E2;
         use redb::TableError as E;
 
         match error {
diff --git a/database/src/backend/redb/storable.rs b/database/src/backend/redb/storable.rs
index 64e7d062..6735fec0 100644
--- a/database/src/backend/redb/storable.rs
+++ b/database/src/backend/redb/storable.rs
@@ -1,7 +1,7 @@
 //! `cuprate_database::Storable` <-> `redb` serde trait compatibility layer.
 
 //---------------------------------------------------------------------------------------------------- Use
-use std::{any::Any, borrow::Cow, cmp::Ordering, fmt::Debug, marker::PhantomData};
+use std::{cmp::Ordering, fmt::Debug, marker::PhantomData};
 
 use redb::TypeName;
 
diff --git a/database/src/backend/redb/transaction.rs b/database/src/backend/redb/transaction.rs
index 21a22ec4..5048851d 100644
--- a/database/src/backend/redb/transaction.rs
+++ b/database/src/backend/redb/transaction.rs
@@ -2,8 +2,6 @@
 
 //---------------------------------------------------------------------------------------------------- Import
 use crate::{
-    config::SyncMode,
-    env::Env,
     error::RuntimeError,
     transaction::{TxRo, TxRw},
 };
diff --git a/database/src/backend/redb/types.rs b/database/src/backend/redb/types.rs
index 1890af17..f3534c55 100644
--- a/database/src/backend/redb/types.rs
+++ b/database/src/backend/redb/types.rs
@@ -1,7 +1,7 @@
 //! `redb` type aliases.
 
 //---------------------------------------------------------------------------------------------------- Types
-use crate::{backend::redb::storable::StorableRedb, table::Table};
+use crate::backend::redb::storable::StorableRedb;
 
 //---------------------------------------------------------------------------------------------------- Types
 /// The concrete type for readable `redb` tables.
diff --git a/database/src/backend/tests.rs b/database/src/backend/tests.rs
index 50f92644..03d06c69 100644
--- a/database/src/backend/tests.rs
+++ b/database/src/backend/tests.rs
@@ -13,50 +13,31 @@
 //!
 //! `redb`, and it only must be enabled for it to be tested.
 
-#![allow(
-    clippy::items_after_statements,
-    clippy::significant_drop_tightening,
-    clippy::cast_possible_truncation
-)]
-
 //---------------------------------------------------------------------------------------------------- Import
-use std::borrow::{Borrow, Cow};
 
 use crate::{
-    config::{Config, SyncMode},
     database::{DatabaseIter, DatabaseRo, DatabaseRw},
     env::{Env, EnvInner},
-    error::{InitError, RuntimeError},
+    error::RuntimeError,
     resize::ResizeAlgorithm,
     storable::StorableVec,
-    table::Table,
     tables::{
-        BlockBlobs, BlockHeights, BlockInfoV1s, BlockInfoV2s, BlockInfoV3s, KeyImages, NumOutputs,
-        Outputs, PrunableHashes, PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxHeights, TxIds,
+        BlockBlobs, BlockHeights, BlockInfos, KeyImages, NumOutputs, Outputs, PrunableHashes,
+        PrunableTxBlobs, PrunedTxBlobs, RctOutputs, TxBlobs, TxHeights, TxIds, TxOutputs,
         TxUnlockTime,
     },
+    tables::{TablesIter, TablesMut},
+    tests::tmp_concrete_env,
     transaction::{TxRo, TxRw},
     types::{
-        Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfoV1,
-        BlockInfoV2, BlockInfoV3, KeyImage, Output, PreRctOutputId, PrunableBlob, PrunableHash,
-        PrunedBlob, RctOutput, TxHash, TxId, UnlockTime,
+        Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage,
+        Output, OutputFlags, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput,
+        TxBlob, TxHash, TxId, UnlockTime,
     },
     ConcreteEnv,
 };
 
 //---------------------------------------------------------------------------------------------------- Tests
-/// Create an `Env` in a temporarily directory.
-/// The directory is automatically removed after the `TempDir` is dropped.
-///
-/// TODO: changing this to `-> impl Env` causes lifetime errors...
-fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) {
-    let tempdir = tempfile::tempdir().unwrap();
-    let config = Config::low_power(Some(tempdir.path().into()));
-    let env = ConcreteEnv::open(config).unwrap();
-
-    (env, tempdir)
-}
-
 /// Simply call [`Env::open`]. If this fails, something is really wrong.
 #[test]
 fn open() {
@@ -87,9 +68,7 @@ fn open_db() {
     // This should be updated when tables are modified.
     env_inner.open_db_ro::<BlockBlobs>(&tx_ro).unwrap();
     env_inner.open_db_ro::<BlockHeights>(&tx_ro).unwrap();
-    env_inner.open_db_ro::<BlockInfoV1s>(&tx_ro).unwrap();
-    env_inner.open_db_ro::<BlockInfoV2s>(&tx_ro).unwrap();
-    env_inner.open_db_ro::<BlockInfoV3s>(&tx_ro).unwrap();
+    env_inner.open_db_ro::<BlockInfos>(&tx_ro).unwrap();
     env_inner.open_db_ro::<KeyImages>(&tx_ro).unwrap();
     env_inner.open_db_ro::<NumOutputs>(&tx_ro).unwrap();
     env_inner.open_db_ro::<Outputs>(&tx_ro).unwrap();
@@ -97,17 +76,17 @@ fn open_db() {
     env_inner.open_db_ro::<PrunableTxBlobs>(&tx_ro).unwrap();
     env_inner.open_db_ro::<PrunedTxBlobs>(&tx_ro).unwrap();
     env_inner.open_db_ro::<RctOutputs>(&tx_ro).unwrap();
+    env_inner.open_db_ro::<TxBlobs>(&tx_ro).unwrap();
     env_inner.open_db_ro::<TxHeights>(&tx_ro).unwrap();
     env_inner.open_db_ro::<TxIds>(&tx_ro).unwrap();
+    env_inner.open_db_ro::<TxOutputs>(&tx_ro).unwrap();
     env_inner.open_db_ro::<TxUnlockTime>(&tx_ro).unwrap();
     TxRo::commit(tx_ro).unwrap();
 
     // Open all tables in read/write mode.
     env_inner.open_db_rw::<BlockBlobs>(&tx_rw).unwrap();
     env_inner.open_db_rw::<BlockHeights>(&tx_rw).unwrap();
-    env_inner.open_db_rw::<BlockInfoV1s>(&tx_rw).unwrap();
-    env_inner.open_db_rw::<BlockInfoV2s>(&tx_rw).unwrap();
-    env_inner.open_db_rw::<BlockInfoV3s>(&tx_rw).unwrap();
+    env_inner.open_db_rw::<BlockInfos>(&tx_rw).unwrap();
     env_inner.open_db_rw::<KeyImages>(&tx_rw).unwrap();
     env_inner.open_db_rw::<NumOutputs>(&tx_rw).unwrap();
     env_inner.open_db_rw::<Outputs>(&tx_rw).unwrap();
@@ -115,8 +94,10 @@ fn open_db() {
     env_inner.open_db_rw::<PrunableTxBlobs>(&tx_rw).unwrap();
     env_inner.open_db_rw::<PrunedTxBlobs>(&tx_rw).unwrap();
     env_inner.open_db_rw::<RctOutputs>(&tx_rw).unwrap();
+    env_inner.open_db_rw::<TxBlobs>(&tx_rw).unwrap();
     env_inner.open_db_rw::<TxHeights>(&tx_rw).unwrap();
     env_inner.open_db_rw::<TxIds>(&tx_rw).unwrap();
+    env_inner.open_db_rw::<TxOutputs>(&tx_rw).unwrap();
     env_inner.open_db_rw::<TxUnlockTime>(&tx_rw).unwrap();
     TxRw::commit(tx_rw).unwrap();
 }
@@ -166,7 +147,6 @@ fn non_manual_resize_2() {
 
 /// Test all `DatabaseR{o,w}` operations.
 #[test]
-#[allow(clippy::too_many_lines)]
 fn db_read_write() {
     let (env, _tempdir) = tmp_concrete_env();
     let env_inner = env.env_inner();
@@ -182,7 +162,7 @@ fn db_read_write() {
     const VALUE: Output = Output {
         key: [35; 32],
         height: 45_761_798,
-        output_flags: 0,
+        output_flags: OutputFlags::empty(),
         tx_idx: 2_353_487,
     };
     /// How many `(key, value)` pairs will be inserted.
@@ -202,7 +182,7 @@ fn db_read_write() {
 
     // Insert keys.
     let mut key = KEY;
-    for i in 0..N {
+    for _ in 0..N {
         table.put(&key, &VALUE).unwrap();
         key.amount += 1;
     }
@@ -271,6 +251,22 @@ fn db_read_write() {
         }
     }
 
+    // Assert `update()` works.
+    {
+        const HEIGHT: u32 = 999;
+
+        assert_ne!(table.get(&KEY).unwrap().height, HEIGHT);
+
+        table
+            .update(&KEY, |mut value| {
+                value.height = HEIGHT;
+                Some(value)
+            })
+            .unwrap();
+
+        assert_eq!(table.get(&KEY).unwrap().height, HEIGHT);
+    }
+
     // Assert deleting works.
     {
         table.delete(&KEY).unwrap();
@@ -284,6 +280,23 @@ fn db_read_write() {
         assert_same(value);
     }
 
+    // Assert `take()` works.
+    {
+        let mut key = KEY;
+        key.amount += 1;
+        let value = table.take(&key).unwrap();
+        assert_eq!(value, VALUE);
+
+        let get = table.get(&KEY);
+        assert!(!table.contains(&key).unwrap());
+        assert!(matches!(get, Err(RuntimeError::KeyNotFound)));
+
+        // Assert the other `(key, value)` pairs are still there.
+        key.amount += 1;
+        let value = table.get(&key).unwrap();
+        assert_same(value);
+    }
+
     drop(table);
     TxRw::commit(tx_rw).unwrap();
 
@@ -309,6 +322,60 @@ fn db_read_write() {
     }
 }
 
+/// Assert that `key`'s in database tables are sorted in
+/// an ordered B-Tree fashion, i.e. `min_value -> max_value`.
+#[test]
+fn tables_are_sorted() {
+    let (env, _tmp) = tmp_concrete_env();
+    let env_inner = env.env_inner();
+    let tx_rw = env_inner.tx_rw().unwrap();
+    let mut tables_mut = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+    // Insert `{5, 4, 3, 2, 1, 0}`, assert each new
+    // number inserted is the minimum `first()` value.
+    for key in (0..6).rev() {
+        tables_mut.num_outputs_mut().put(&key, &123).unwrap();
+        let (first, _) = tables_mut.num_outputs_mut().first().unwrap();
+        assert_eq!(first, key);
+    }
+
+    drop(tables_mut);
+    TxRw::commit(tx_rw).unwrap();
+    let tx_rw = env_inner.tx_rw().unwrap();
+
+    // Assert iterators are ordered.
+    {
+        let tx_ro = env_inner.tx_ro().unwrap();
+        let tables = env_inner.open_tables(&tx_ro).unwrap();
+        let t = tables.num_outputs_iter();
+        let iter = t.iter().unwrap();
+        let keys = t.keys().unwrap();
+        for ((i, iter), key) in (0..6).zip(iter).zip(keys) {
+            let (iter, _) = iter.unwrap();
+            let key = key.unwrap();
+            assert_eq!(i, iter);
+            assert_eq!(iter, key);
+        }
+    }
+
+    let mut tables_mut = env_inner.open_tables_mut(&tx_rw).unwrap();
+    let t = tables_mut.num_outputs_mut();
+
+    // Assert the `first()` values are the minimum, i.e. `{0, 1, 2}`
+    for key in 0..3 {
+        let (first, _) = t.first().unwrap();
+        assert_eq!(first, key);
+        t.delete(&key).unwrap();
+    }
+
+    // Assert the `last()` values are the maximum, i.e. `{5, 4, 3}`
+    for key in (3..6).rev() {
+        let (last, _) = tables_mut.num_outputs_mut().last().unwrap();
+        assert_eq!(last, key);
+        tables_mut.num_outputs_mut().delete(&key).unwrap();
+    }
+}
+
 //---------------------------------------------------------------------------------------------------- Table Tests
 /// Test multiple tables and their key + values.
 ///
@@ -406,35 +473,14 @@ test_tables! {
     BlockHash => BlockHeight,
     [32; 32] => 123,
 
-    BlockInfoV1s,
-    BlockHeight => BlockInfoV1,
-    123 => BlockInfoV1 {
+    BlockInfos,
+    BlockHeight => BlockInfo,
+    123 => BlockInfo {
         timestamp: 1,
-        total_generated_coins: 123,
-        weight: 321,
-        cumulative_difficulty: 111,
-        block_hash: [54; 32],
-    },
-
-    BlockInfoV2s,
-    BlockHeight => BlockInfoV2,
-    123 => BlockInfoV2 {
-        timestamp: 1,
-        total_generated_coins: 123,
-        weight: 321,
-        cumulative_difficulty: 111,
-        cumulative_rct_outs: 2389,
-        block_hash: [54; 32],
-    },
-
-    BlockInfoV3s,
-    BlockHeight => BlockInfoV3,
-    123 => BlockInfoV3 {
-        timestamp: 1,
-        total_generated_coins: 123,
+        cumulative_generated_coins: 123,
         weight: 321,
         cumulative_difficulty_low: 111,
-        cumulative_difficulty_high: 112,
+        cumulative_difficulty_high: 111,
         block_hash: [54; 32],
         cumulative_rct_outs: 2389,
         long_term_weight: 2389,
@@ -448,6 +494,10 @@ test_tables! {
     Amount => AmountIndex,
     123 => 123,
 
+    TxBlobs,
+    TxId => TxBlob,
+    123 => StorableVec(vec![1,2,3,4,5,6,7,8]),
+
     TxIds,
     TxHash => TxId,
     [32; 32] => 123,
@@ -456,6 +506,10 @@ test_tables! {
     TxId => BlockHeight,
     123 => 123,
 
+    TxOutputs,
+    TxId => AmountIndices,
+    123 => StorableVec(vec![1,2,3,4,5,6,7,8]),
+
     TxUnlockTime,
     TxId => UnlockTime,
     123 => 123,
@@ -468,7 +522,7 @@ test_tables! {
     } => Output {
         key: [1; 32],
         height: 1,
-        output_flags: 0,
+        output_flags: OutputFlags::empty(),
         tx_idx: 3,
     },
 
@@ -489,7 +543,7 @@ test_tables! {
     123 => RctOutput {
         key: [1; 32],
         height: 1,
-        output_flags: 0,
+        output_flags: OutputFlags::empty(),
         tx_idx: 3,
         commitment: [3; 32],
     },
diff --git a/database/src/config/backend.rs b/database/src/config/backend.rs
index ed826344..4bbb12ca 100644
--- a/database/src/config/backend.rs
+++ b/database/src/config/backend.rs
@@ -1,4 +1,4 @@
-//! TODO
+//! SOMEDAY
 
 //---------------------------------------------------------------------------------------------------- Import
 use std::{
@@ -19,13 +19,13 @@ use crate::{
 };
 
 //---------------------------------------------------------------------------------------------------- Backend
-/// TODO
+/// SOMEDAY: allow runtime hot-swappable backends.
 #[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub enum Backend {
     #[default]
-    /// TODO
+    /// SOMEDAY
     Heed,
-    /// TODO
+    /// SOMEDAY
     Redb,
 }
diff --git a/database/src/config/config.rs b/database/src/config/config.rs
index 1791a540..d712cb69 100644
--- a/database/src/config/config.rs
+++ b/database/src/config/config.rs
@@ -1,17 +1,8 @@
-//! Database [`Env`](crate::Env) configuration.
-//!
-//! This module contains the main [`Config`]uration struct
-//! for the database [`Env`](crate::Env)ironment, and data
-//! structures related to any configuration setting.
-//!
-//! These configurations are processed at runtime, meaning
-//! the `Env` can/will dynamically adjust its behavior
-//! based on these values.
+//! The main [`Config`] struct, holding all configurable values.
 
 //---------------------------------------------------------------------------------------------------- Import
 use std::{
     borrow::Cow,
-    num::NonZeroUsize,
     path::{Path, PathBuf},
 };
 
@@ -26,13 +17,143 @@ use crate::{
     resize::ResizeAlgorithm,
 };
 
+//---------------------------------------------------------------------------------------------------- ConfigBuilder
+/// Builder for [`Config`].
+///
+// SOMEDAY: there's are many more options to add in the future.
+#[derive(Debug, Clone, PartialEq, PartialOrd)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub struct ConfigBuilder {
+    /// [`Config::db_directory`].
+    db_directory: Option<Cow<'static, Path>>,
+
+    /// [`Config::sync_mode`].
+    sync_mode: Option<SyncMode>,
+
+    /// [`Config::reader_threads`].
+    reader_threads: Option<ReaderThreads>,
+
+    /// [`Config::resize_algorithm`].
+    resize_algorithm: Option<ResizeAlgorithm>,
+}
+
+impl ConfigBuilder {
+    /// Create a new [`ConfigBuilder`].
+    ///
+    /// [`ConfigBuilder::build`] can be called immediately
+    /// after this function to use default values.
+    pub const fn new() -> Self {
+        Self {
+            db_directory: None,
+            sync_mode: None,
+            reader_threads: None,
+            resize_algorithm: None,
+        }
+    }
+
+    /// Build into a [`Config`].
+    ///
+    /// # Default values
+    /// If [`ConfigBuilder::db_directory`] was not called,
+    /// the default [`cuprate_database_dir`] will be used.
+    ///
+    /// For all other values, [`Default::default`] is used.
+    pub fn build(self) -> Config {
+        // INVARIANT: all PATH safety checks are done
+        // in `helper::fs`. No need to do them here.
+        let db_directory = self
+            .db_directory
+            .unwrap_or_else(|| Cow::Borrowed(cuprate_database_dir()));
+
+        // Add the database filename to the directory.
+        let db_file = {
+            let mut db_file = db_directory.to_path_buf();
+            db_file.push(DATABASE_DATA_FILENAME);
+            Cow::Owned(db_file)
+        };
+
+        Config {
+            db_directory,
+            db_file,
+            sync_mode: self.sync_mode.unwrap_or_default(),
+            reader_threads: self.reader_threads.unwrap_or_default(),
+            resize_algorithm: self.resize_algorithm.unwrap_or_default(),
+        }
+    }
+
+    /// Set a custom database directory (and file) [`Path`].
+    #[must_use]
+    pub fn db_directory(mut self, db_directory: PathBuf) -> Self {
+        self.db_directory = Some(Cow::Owned(db_directory));
+        self
+    }
+
+    /// Tune the [`ConfigBuilder`] for the highest performing,
+    /// but also most resource-intensive & maybe risky settings.
+    ///
+    /// Good default for testing, and resource-available machines.
+    #[must_use]
+    pub fn fast(mut self) -> Self {
+        self.sync_mode = Some(SyncMode::Fast);
+        self.reader_threads = Some(ReaderThreads::OnePerThread);
+        self.resize_algorithm = Some(ResizeAlgorithm::default());
+        self
+    }
+
+    /// Tune the [`ConfigBuilder`] for the lowest performing,
+    /// but also least resource-intensive settings.
+    ///
+    /// Good default for resource-limited machines, e.g. a cheap VPS.
+    #[must_use]
+    pub fn low_power(mut self) -> Self {
+        self.sync_mode = Some(SyncMode::default());
+        self.reader_threads = Some(ReaderThreads::One);
+        self.resize_algorithm = Some(ResizeAlgorithm::default());
+        self
+    }
+
+    /// Set a custom [`SyncMode`].
+    #[must_use]
+    pub const fn sync_mode(mut self, sync_mode: SyncMode) -> Self {
+        self.sync_mode = Some(sync_mode);
+        self
+    }
+
+    /// Set a custom [`ReaderThreads`].
+    #[must_use]
+    pub const fn reader_threads(mut self, reader_threads: ReaderThreads) -> Self {
+        self.reader_threads = Some(reader_threads);
+        self
+    }
+
+    /// Set a custom [`ResizeAlgorithm`].
+    #[must_use]
+    pub const fn resize_algorithm(mut self, resize_algorithm: ResizeAlgorithm) -> Self {
+        self.resize_algorithm = Some(resize_algorithm);
+        self
+    }
+}
+
+impl Default for ConfigBuilder {
+    fn default() -> Self {
+        Self {
+            db_directory: Some(Cow::Borrowed(cuprate_database_dir())),
+            sync_mode: Some(SyncMode::default()),
+            reader_threads: Some(ReaderThreads::default()),
+            resize_algorithm: Some(ResizeAlgorithm::default()),
+        }
+    }
+}
+
 //---------------------------------------------------------------------------------------------------- Config
 /// Database [`Env`](crate::Env) configuration.
 ///
 /// This is the struct passed to [`Env::open`](crate::Env::open) that
 /// allows the database to be configured in various ways.
 ///
-/// TODO: there's probably more options to add.
+/// For construction, either use [`ConfigBuilder`] or [`Config::default`].
+///
+// SOMEDAY: there's are many more options to add in the future.
 #[derive(Debug, Clone, PartialEq, PartialOrd)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub struct Config {
@@ -44,8 +165,8 @@ pub struct Config {
     /// By default, if no value is provided in the [`Config`]
     /// constructor functions, this will be [`cuprate_database_dir`].
     ///
-    /// TODO: we should also support `/etc/cuprated.conf`.
-    /// This could be represented with an `enum DbPath { Default, Custom, Etc, }`
+    // SOMEDAY: we should also support `/etc/cuprated.conf`.
+    // This could be represented with an `enum DbPath { Default, Custom, Etc, }`
     pub(crate) db_directory: Cow<'static, Path>,
     /// The actual database data file.
     ///
@@ -67,111 +188,50 @@ pub struct Config {
 }
 
 impl Config {
-    /// Private function to acquire [`Config::db_file`]
-    /// from the user provided (or default) [`Config::db_directory`].
-    ///
-    /// As the database data file PATH is just the directory + the filename,
-    /// we only need the directory from the user/Config, and can add it here.
-    fn return_db_dir_and_file(
-        db_directory: Option<PathBuf>,
-    ) -> (Cow<'static, Path>, Cow<'static, Path>) {
-        // INVARIANT: all PATH safety checks are done
-        // in `helper::fs`. No need to do them here.
-        let db_directory =
-            db_directory.map_or_else(|| Cow::Borrowed(cuprate_database_dir()), Cow::Owned);
-
-        // Add the database filename to the directory.
-        let mut db_file = db_directory.to_path_buf();
-        db_file.push(DATABASE_DATA_FILENAME);
-
-        (db_directory, Cow::Owned(db_file))
-    }
-
     /// Create a new [`Config`] with sane default settings.
     ///
-    /// # `db_directory`
-    /// If this is `Some`, it will be used as the
-    /// directory that contains all database files.
+    /// The [`Config::db_directory`] will be [`cuprate_database_dir`].
     ///
-    /// If `None`, it will use the default directory [`cuprate_database_dir`].
-    pub fn new(db_directory: Option<PathBuf>) -> Self {
-        let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
-        Self {
-            db_directory,
-            db_file,
-            sync_mode: SyncMode::default(),
-            reader_threads: ReaderThreads::OnePerThread,
-            resize_algorithm: ResizeAlgorithm::default(),
-        }
-    }
-
-    /// Create a [`Config`] with the highest performing,
-    /// but also most resource-intensive & maybe risky settings.
+    /// All other values will be [`Default::default`].
     ///
-    /// Good default for testing, and resource-available machines.
+    /// Same as [`Config::default`].
     ///
-    /// # `db_directory`
-    /// If this is `Some`, it will be used as the
-    /// directory that contains all database files.
+    /// ```rust
+    /// use cuprate_database::{config::*, resize::*, DATABASE_DATA_FILENAME};
+    /// use cuprate_helper::fs::*;
     ///
-    /// If `None`, it will use the default directory [`cuprate_database_dir`].
-    pub fn fast(db_directory: Option<PathBuf>) -> Self {
-        let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
-        Self {
-            db_directory,
-            db_file,
-            sync_mode: SyncMode::Fast,
-            reader_threads: ReaderThreads::OnePerThread,
-            resize_algorithm: ResizeAlgorithm::default(),
-        }
-    }
-
-    /// Create a [`Config`] with the lowest performing,
-    /// but also least resource-intensive settings.
+    /// let config = Config::new();
     ///
-    /// Good default for resource-limited machines, e.g. a cheap VPS.
-    ///
-    /// # `db_directory`
-    /// If this is `Some`, it will be used as the
-    /// directory that contains all database files.
-    ///
-    /// If `None`, it will use the default directory [`cuprate_database_dir`].
-    pub fn low_power(db_directory: Option<PathBuf>) -> Self {
-        let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory);
-        Self {
-            db_directory,
-            db_file,
-            sync_mode: SyncMode::default(),
-            reader_threads: ReaderThreads::One,
-            resize_algorithm: ResizeAlgorithm::default(),
-        }
+    /// assert_eq!(config.db_directory(), cuprate_database_dir());
+    /// assert!(config.db_file().starts_with(cuprate_database_dir()));
+    /// assert!(config.db_file().ends_with(DATABASE_DATA_FILENAME));
+    /// assert_eq!(config.sync_mode, SyncMode::default());
+    /// assert_eq!(config.reader_threads, ReaderThreads::default());
+    /// assert_eq!(config.resize_algorithm, ResizeAlgorithm::default());
+    /// ```
+    pub fn new() -> Self {
+        ConfigBuilder::default().build()
     }
 
     /// Return the absolute [`Path`] to the database directory.
-    ///
-    /// This will be the `db_directory` given
-    /// (or default) during [`Config`] construction.
     pub const fn db_directory(&self) -> &Cow<'_, Path> {
         &self.db_directory
     }
 
     /// Return the absolute [`Path`] to the database data file.
-    ///
-    /// This will be based off the `db_directory` given
-    /// (or default) during [`Config`] construction.
     pub const fn db_file(&self) -> &Cow<'_, Path> {
         &self.db_file
     }
 }
 
 impl Default for Config {
-    /// Same as `Self::new(None)`.
+    /// Same as [`Config::new`].
     ///
     /// ```rust
     /// # use cuprate_database::config::*;
-    /// assert_eq!(Config::default(), Config::new(None));
+    /// assert_eq!(Config::default(), Config::new());
     /// ```
     fn default() -> Self {
-        Self::new(None)
+        Self::new()
     }
 }
diff --git a/database/src/config/mod.rs b/database/src/config/mod.rs
index a8da828c..dfa4f674 100644
--- a/database/src/config/mod.rs
+++ b/database/src/config/mod.rs
@@ -1,7 +1,44 @@
-//! TODO
+//! Database [`Env`](crate::Env) configuration.
+//!
+//! This module contains the main [`Config`]uration struct
+//! for the database [`Env`](crate::Env)ironment, and types
+//! related to configuration settings.
+//!
+//! The main constructor is the [`ConfigBuilder`].
+//!
+//! These configurations are processed at runtime, meaning
+//! the `Env` can/will dynamically adjust its behavior
+//! based on these values.
+//!
+//! # Example
+//! ```rust
+//! use cuprate_database::{
+//!     Env,
+//!     config::{ConfigBuilder, ReaderThreads, SyncMode}
+//! };
+//!
+//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! let db_dir = tempfile::tempdir()?;
+//!
+//! let config = ConfigBuilder::new()
+//!      // Use a custom database directory.
+//!     .db_directory(db_dir.path().to_path_buf())
+//!     // Use as many reader threads as possible (when using `service`).
+//!     .reader_threads(ReaderThreads::OnePerThread)
+//!     // Use the fastest sync mode.
+//!     .sync_mode(SyncMode::Fast)
+//!     // Build into `Config`
+//!     .build();
+//!
+//! // Start a database `service` using this configuration.
+//! let (reader_handle, _) = cuprate_database::service::init(config.clone())?;
+//! // It's using the config we provided.
+//! assert_eq!(reader_handle.env().config(), &config);
+//! # Ok(()) }
+//! ```
 
 mod config;
-pub use config::Config;
+pub use config::{Config, ConfigBuilder};
 
 mod reader_threads;
 pub use reader_threads::ReaderThreads;
diff --git a/database/src/config/reader_threads.rs b/database/src/config/reader_threads.rs
index 0dc35581..34b20a88 100644
--- a/database/src/config/reader_threads.rs
+++ b/database/src/config/reader_threads.rs
@@ -9,25 +9,19 @@
 //! based on these values.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{
-    borrow::Cow,
-    num::NonZeroUsize,
-    path::{Path, PathBuf},
-};
+use std::num::NonZeroUsize;
 
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Serialize};
 
-use cuprate_helper::fs::cuprate_database_dir;
-
-use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
-
 //---------------------------------------------------------------------------------------------------- ReaderThreads
-/// Amount of database reader threads to spawn.
+/// Amount of database reader threads to spawn when using [`service`](crate::service).
 ///
-/// This controls how many reader thread [`crate::service`]'s
+/// This controls how many reader thread `service`'s
 /// thread-pool will spawn to receive and send requests/responses.
 ///
+/// It does nothing outside of `service`.
+///
 /// It will always be at least 1, up until the amount of threads on the machine.
 ///
 /// The main function used to extract an actual
@@ -38,8 +32,8 @@ pub enum ReaderThreads {
     #[default]
     /// Spawn 1 reader thread per available thread on the machine.
     ///
-    /// For example, a `16-core, 32-thread` Ryzen 5950x will
-    /// spawn `32` reader threads using this setting.
+    /// For example, a `32-thread` system will spawn
+    /// `32` reader threads using this setting.
     OnePerThread,
 
     /// Only spawn 1 reader thread.
diff --git a/database/src/config/sync_mode.rs b/database/src/config/sync_mode.rs
index 7dba062a..1d203396 100644
--- a/database/src/config/sync_mode.rs
+++ b/database/src/config/sync_mode.rs
@@ -9,19 +9,10 @@
 //! based on these values.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{
-    borrow::Cow,
-    num::NonZeroUsize,
-    path::{Path, PathBuf},
-};
 
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Serialize};
 
-use cuprate_helper::fs::cuprate_database_dir;
-
-use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
-
 //---------------------------------------------------------------------------------------------------- SyncMode
 /// Disk synchronization mode.
 ///
@@ -48,7 +39,7 @@ use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm};
 /// ```
 /// will be fine, most likely pulling from memory instead of disk.
 ///
-/// # TODO
+/// # SOMEDAY
 /// Dynamic sync's are not yet supported.
 ///
 /// Only:
@@ -64,24 +55,24 @@ pub enum SyncMode {
     /// Use [`SyncMode::Fast`] until fully synced,
     /// then use [`SyncMode::Safe`].
     ///
-    /// # TODO: how to implement this?
-    /// ref: <https://github.com/monero-project/monero/issues/1463>
-    /// monerod-solution: <https://github.com/monero-project/monero/pull/1506>
-    /// cuprate-issue: <https://github.com/Cuprate/cuprate/issues/78>
-    ///
-    /// We could:
-    /// ```rust,ignore
-    /// if current_db_block <= top_block.saturating_sub(N) {
-    ///     // don't sync()
-    /// } else {
-    ///     // sync()
-    /// }
-    /// ```
-    /// where N is some threshold we pick that is _close_ enough
-    /// to being synced where we want to start being safer.
-    ///
-    /// Essentially, when we are in a certain % range of being finished,
-    /// switch to safe mode, until then, go fast.
+    // # SOMEDAY: how to implement this?
+    // ref: <https://github.com/monero-project/monero/issues/1463>
+    // monerod-solution: <https://github.com/monero-project/monero/pull/1506>
+    // cuprate-issue: <https://github.com/Cuprate/cuprate/issues/78>
+    //
+    // We could:
+    // ```rust,ignore
+    // if current_db_block <= top_block.saturating_sub(N) {
+    //     // don't sync()
+    // } else {
+    //     // sync()
+    // }
+    // ```
+    // where N is some threshold we pick that is _close_ enough
+    // to being synced where we want to start being safer.
+    //
+    // Essentially, when we are in a certain % range of being finished,
+    // switch to safe mode, until then, go fast.
     FastThenSafe,
 
     #[default]
@@ -136,7 +127,7 @@ pub enum SyncMode {
     /// In the case of a system crash, the database
     /// may become corrupted when using this option.
     //
-    // TODO: we could call this `unsafe`
+    // FIXME: we could call this `unsafe`
     // and use that terminology in the config file
     // so users know exactly what they are getting
     // themselves into.
diff --git a/database/src/constants.rs b/database/src/constants.rs
index 37a3efaf..667e36cb 100644
--- a/database/src/constants.rs
+++ b/database/src/constants.rs
@@ -3,6 +3,18 @@
 //---------------------------------------------------------------------------------------------------- Import
 use cfg_if::cfg_if;
 
+//---------------------------------------------------------------------------------------------------- Version
+/// Current major version of the database.
+///
+/// Returned by [`crate::ops::property::db_version`].
+///
+/// This is incremented by 1 when `cuprate_database`'s
+/// structure/schema/tables change.
+///
+/// This is akin to `VERSION` in `monerod`:
+/// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L57>
+pub const DATABASE_VERSION: u64 = 0;
+
 //---------------------------------------------------------------------------------------------------- Error Messages
 /// Corrupt database error message.
 ///
@@ -23,8 +35,8 @@ TODO: instructions on:
 ///
 /// | Backend | Value |
 /// |---------|-------|
-/// | `heed`  | "heed"
-/// | `redb`  | "redb"
+/// | `heed`  | `"heed"`
+/// | `redb`  | `"redb"`
 pub const DATABASE_BACKEND: &str = {
     cfg_if! {
         if #[cfg(all(feature = "redb", not(feature = "heed")))] {
@@ -41,8 +53,8 @@ pub const DATABASE_BACKEND: &str = {
 ///
 /// | Backend | Value |
 /// |---------|-------|
-/// | `heed`  | "data.mdb"
-/// | `redb`  | "data.redb"
+/// | `heed`  | `"data.mdb"`
+/// | `redb`  | `"data.redb"`
 pub const DATABASE_DATA_FILENAME: &str = {
     cfg_if! {
         if #[cfg(all(feature = "redb", not(feature = "heed")))] {
@@ -57,8 +69,8 @@ pub const DATABASE_DATA_FILENAME: &str = {
 ///
 /// | Backend | Value |
 /// |---------|-------|
-/// | `heed`  | Some("lock.mdb")
-/// | `redb`  | None (redb doesn't use a file lock)
+/// | `heed`  | `Some("lock.mdb")`
+/// | `redb`  | `None` (redb doesn't use a file lock)
 pub const DATABASE_LOCK_FILENAME: Option<&str> = {
     cfg_if! {
         if #[cfg(all(feature = "redb", not(feature = "heed")))] {
diff --git a/database/src/database.rs b/database/src/database.rs
index df62414b..4a45f7cc 100644
--- a/database/src/database.rs
+++ b/database/src/database.rs
@@ -1,33 +1,38 @@
-//! Abstracted database; `trait DatabaseRo` & `trait DatabaseRw`.
+//! Abstracted database table operations; `trait DatabaseRo` & `trait DatabaseRw`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{
-    borrow::{Borrow, Cow},
-    fmt::Debug,
-    ops::{Deref, RangeBounds},
-};
+use std::ops::RangeBounds;
 
-use crate::{
-    error::RuntimeError,
-    table::Table,
-    transaction::{TxRo, TxRw},
-};
+use crate::{error::RuntimeError, table::Table};
+
+//---------------------------------------------------------------------------------------------------- DatabaseIter
+/// Generic post-fix documentation for `DatabaseIter` methods.
+macro_rules! doc_iter {
+    () => {
+        r"Although the returned iterator itself is tied to the lifetime
+of `&self`, the returned values from the iterator are _owned_.
+
+# Errors
+The construction of the iterator itself may error.
+
+Each iteration of the iterator has the potential to error as well."
+    };
+}
 
-//---------------------------------------------------------------------------------------------------- DatabaseRoIter
 /// Database (key-value store) read-only iteration abstraction.
 ///
 /// These are read-only iteration-related operations that
 /// can only be called from [`DatabaseRo`] objects.
 ///
 /// # Hack
-/// This is a HACK to get around the fact our read/write tables
+/// This is a HACK to get around the fact [`DatabaseRw`] tables
 /// cannot safely return values returning lifetimes, as such,
 /// only read-only tables implement this trait.
 ///
 /// - <https://github.com/Cuprate/cuprate/pull/102#discussion_r1548695610>
 /// - <https://github.com/Cuprate/cuprate/pull/104>
 pub trait DatabaseIter<T: Table> {
-    /// Get an iterator of value's corresponding to a range of keys.
+    /// Get an [`Iterator`] of value's corresponding to a range of keys.
     ///
     /// For example:
     /// ```rust,ignore
@@ -39,12 +44,7 @@ pub trait DatabaseIter<T: Table> {
     /// Although the returned iterator itself is tied to the lifetime
     /// of `&'a self`, the returned values from the iterator are _owned_.
     ///
-    /// # Errors
-    /// Each key in the `range` has the potential to error, for example,
-    /// if a particular key in the `range` does not exist,
-    /// [`RuntimeError::KeyNotFound`] wrapped in [`Err`] will be returned
-    /// from the iterator.
-    #[allow(clippy::iter_not_returning_iterator)]
+    #[doc = doc_iter!()]
     fn get_range<'a, Range>(
         &'a self,
         range: Range,
@@ -52,51 +52,74 @@ pub trait DatabaseIter<T: Table> {
     where
         Range: RangeBounds<T::Key> + 'a;
 
-    /// TODO
-    ///
-    /// # Errors
-    /// TODO
+    /// Get an [`Iterator`] that returns the `(key, value)` types for this database.
+    #[doc = doc_iter!()]
     #[allow(clippy::iter_not_returning_iterator)]
     fn iter(
         &self,
     ) -> Result<impl Iterator<Item = Result<(T::Key, T::Value), RuntimeError>> + '_, RuntimeError>;
 
-    /// TODO
-    ///
-    /// # Errors
-    /// TODO
+    /// Get an [`Iterator`] that returns _only_ the `key` type for this database.
+    #[doc = doc_iter!()]
     fn keys(&self)
         -> Result<impl Iterator<Item = Result<T::Key, RuntimeError>> + '_, RuntimeError>;
 
-    /// TODO
-    ///
-    /// # Errors
-    /// TODO
+    /// Get an [`Iterator`] that returns _only_ the `value` type for this database.
+    #[doc = doc_iter!()]
     fn values(
         &self,
     ) -> Result<impl Iterator<Item = Result<T::Value, RuntimeError>> + '_, RuntimeError>;
 }
 
 //---------------------------------------------------------------------------------------------------- DatabaseRo
+/// Generic post-fix documentation for `DatabaseR{o,w}` methods.
+macro_rules! doc_database {
+    () => {
+        r"# Errors
+This will return [`RuntimeError::KeyNotFound`] if:
+- Input does not exist OR
+- Database is empty"
+    };
+}
+
 /// Database (key-value store) read abstraction.
 ///
 /// This is a read-only database table,
 /// write operations are defined in [`DatabaseRw`].
-pub trait DatabaseRo<T: Table> {
+///
+/// # Safety
+/// The table type that implements this MUST be `Send`.
+///
+/// However if the table holds a reference to a transaction:
+/// - only the transaction only has to be `Send`
+/// - the table cannot implement `Send`
+///
+/// For example:
+///
+/// `heed`'s transactions are `Send` but `HeedTableRo` contains a `&`
+/// to the transaction, as such, if `Send` were implemented on `HeedTableRo`
+/// then 1 transaction could be used to open multiple tables, then sent to
+/// other threads - this would be a soundness hole against `HeedTableRo`.
+///
+/// `&T` is only `Send` if `T: Sync`.
+///
+/// `heed::RoTxn: !Sync`, therefore our table
+/// holding `&heed::RoTxn` must NOT be `Send`.
+///
+/// - <https://doc.rust-lang.org/std/marker/trait.Sync.html>
+/// - <https://doc.rust-lang.org/nomicon/send-and-sync.html>
+pub unsafe trait DatabaseRo<T: Table> {
     /// Get the value corresponding to a key.
-    ///
-    /// The returned value is _owned_.
-    ///
-    /// # Errors
-    /// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist.
-    ///
-    /// It will return other [`RuntimeError`]'s on things like IO errors as well.
+    #[doc = doc_database!()]
     fn get(&self, key: &T::Key) -> Result<T::Value, RuntimeError>;
 
-    /// TODO
+    /// Returns `true` if the database contains a value for the specified key.
     ///
     /// # Errors
-    /// TODO
+    /// Note that this will _never_ return `Err(RuntimeError::KeyNotFound)`,
+    /// as in that case, `Ok(false)` will be returned.
+    ///
+    /// Other errors may still occur.
     fn contains(&self, key: &T::Key) -> Result<bool, RuntimeError> {
         match self.get(key) {
             Ok(_) => Ok(true),
@@ -105,28 +128,24 @@ pub trait DatabaseRo<T: Table> {
         }
     }
 
-    /// TODO
+    /// Returns the number of `(key, value)` pairs in the database.
     ///
     /// # Errors
-    /// TODO
+    /// This will never return [`RuntimeError::KeyNotFound`].
     fn len(&self) -> Result<u64, RuntimeError>;
 
-    /// TODO
-    ///
-    /// # Errors
-    /// TODO
+    /// Returns the first `(key, value)` pair in the database.
+    #[doc = doc_database!()]
     fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>;
 
-    /// TODO
-    ///
-    /// # Errors
-    /// TODO
+    /// Returns the last `(key, value)` pair in the database.
+    #[doc = doc_database!()]
     fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>;
 
-    /// TODO
+    /// Returns `true` if the database contains no `(key, value)` pairs.
     ///
     /// # Errors
-    /// TODO
+    /// This can only return [`RuntimeError::Io`] on errors.
     fn is_empty(&self) -> Result<bool, RuntimeError>;
 }
 
@@ -139,25 +158,59 @@ pub trait DatabaseRw<T: Table>: DatabaseRo<T> {
     ///
     /// This will overwrite any existing key-value pairs.
     ///
-    /// # Errors
-    /// This will not return [`RuntimeError::KeyExists`].
+    #[doc = doc_database!()]
+    ///
+    /// This will never [`RuntimeError::KeyExists`].
     fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>;
 
     /// Delete a key-value pair in the database.
     ///
-    /// # Errors
-    /// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist.
+    /// This will return `Ok(())` if the key does not exist.
+    ///
+    #[doc = doc_database!()]
+    ///
+    /// This will never [`RuntimeError::KeyExists`].
     fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>;
 
-    /// TODO
+    /// Delete and return a key-value pair in the database.
     ///
-    /// # Errors
-    /// TODO
+    /// This is the same as [`DatabaseRw::delete`], however,
+    /// it will serialize the `T::Value` and return it.
+    ///
+    #[doc = doc_database!()]
+    fn take(&mut self, key: &T::Key) -> Result<T::Value, RuntimeError>;
+
+    /// Fetch the value, and apply a function to it - or delete the entry.
+    ///
+    /// This will call [`DatabaseRo::get`] and call your provided function `f` on it.
+    ///
+    /// The [`Option`] `f` returns will dictate whether `update()`:
+    /// - Updates the current value OR
+    /// - Deletes the `(key, value)` pair
+    ///
+    /// - If `f` returns `Some(value)`, that will be [`DatabaseRw::put`] as the new value
+    /// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d
+    ///
+    #[doc = doc_database!()]
+    fn update<F>(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError>
+    where
+        F: FnMut(T::Value) -> Option<T::Value>,
+    {
+        let value = DatabaseRo::get(self, key)?;
+
+        match f(value) {
+            Some(value) => DatabaseRw::put(self, key, &value),
+            None => DatabaseRw::delete(self, key),
+        }
+    }
+
+    /// Removes and returns the first `(key, value)` pair in the database.
+    ///
+    #[doc = doc_database!()]
     fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
 
-    /// TODO
+    /// Removes and returns the last `(key, value)` pair in the database.
     ///
-    /// # Errors
-    /// TODO
+    #[doc = doc_database!()]
     fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>;
 }
diff --git a/database/src/env.rs b/database/src/env.rs
index 26adc975..3a32666b 100644
--- a/database/src/env.rs
+++ b/database/src/env.rs
@@ -1,7 +1,7 @@
 //! Abstracted database environment; `trait Env`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{fmt::Debug, ops::Deref};
+use std::num::NonZeroUsize;
 
 use crate::{
     config::Config,
@@ -9,6 +9,7 @@ use crate::{
     error::{InitError, RuntimeError},
     resize::ResizeAlgorithm,
     table::Table,
+    tables::{call_fn_on_all_tables_or_early_return, TablesIter, TablesMut},
     transaction::{TxRo, TxRw},
 };
 
@@ -23,8 +24,16 @@ use crate::{
 /// although, no invariant relies on this (yet).
 ///
 /// # Lifetimes
-/// TODO: Explain the very sequential lifetime pipeline:
-/// - `ConcreteEnv` -> `'env` -> `'tx` -> `impl DatabaseR{o,w}`
+/// The lifetimes associated with `Env` have a sequential flow:
+/// 1. `ConcreteEnv`
+/// 2. `'env`
+/// 3. `'tx`
+/// 4. `'db`
+///
+/// As in:
+/// - open database tables only live as long as...
+/// - transactions which only live as long as the...
+/// - environment ([`EnvInner`])
 pub trait Env: Sized {
     //------------------------------------------------ Constants
     /// Does the database backend need to be manually
@@ -32,7 +41,7 @@ pub trait Env: Sized {
     ///
     /// # Invariant
     /// If this is `false`, that means this [`Env`]
-    /// can _never_ return a [`RuntimeError::ResizeNeeded`].
+    /// must _never_ return a [`RuntimeError::ResizeNeeded`].
     ///
     /// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`]
     /// _must_ be re-implemented, as it just panics by default.
@@ -50,10 +59,10 @@ pub trait Env: Sized {
     /// This is used as the `self` in [`EnvInner`] functions, so whatever
     /// this type is, is what will be accessible from those functions.
     ///
-    /// # Explanation (not needed for practical use)
-    /// For `heed`, this is just `heed::Env`, for `redb` this is
-    /// `(redb::Database, redb::Durability)` as each transaction
-    /// needs the sync mode set during creation.
+    // # HACK
+    // For `heed`, this is just `heed::Env`, for `redb` this is
+    // `(redb::Database, redb::Durability)` as each transaction
+    // needs the sync mode set during creation.
     type EnvInner<'env>: EnvInner<'env, Self::TxRo<'env>, Self::TxRw<'env>>
     where
         Self: 'env;
@@ -95,11 +104,11 @@ pub trait Env: Sized {
     /// I.e., after this function returns, there must be no doubts
     /// that the data isn't synced yet, it _must_ be synced.
     ///
-    /// TODO: either this invariant or `sync()` itself will most
-    /// likely be removed/changed after `SyncMode` is finalized.
+    // FIXME: either this invariant or `sync()` itself will most
+    // likely be removed/changed after `SyncMode` is finalized.
     ///
     /// # Errors
-    /// TODO
+    /// If there is a synchronization error, this should return an error.
     fn sync(&self) -> Result<(), RuntimeError>;
 
     /// Resize the database's memory map to a
@@ -109,11 +118,14 @@ pub trait Env: Sized {
     ///
     /// If `resize_algorithm` is `Some`, that will be used instead.
     ///
+    /// This function returns the _new_ memory map size in bytes.
+    ///
     /// # Invariant
     /// This function _must_ be re-implemented if [`Env::MANUAL_RESIZE`] is `true`.
     ///
     /// Otherwise, this function will panic with `unreachable!()`.
-    fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) {
+    #[allow(unused_variables)]
+    fn resize_map(&self, resize_algorithm: Option<ResizeAlgorithm>) -> NonZeroUsize {
         unreachable!()
     }
 
@@ -164,7 +176,26 @@ pub trait Env: Sized {
 }
 
 //---------------------------------------------------------------------------------------------------- DatabaseRo
-/// TODO
+/// Document errors when opening tables in [`EnvInner`].
+macro_rules! doc_table_error {
+    () => {
+        r"# Errors
+This will only return [`RuntimeError::Io`] if it errors.
+
+As all tables are created upon [`Env::open`],
+this function will never error because a table doesn't exist."
+    };
+}
+
+/// The inner [`Env`] type.
+///
+/// This type is created with [`Env::env_inner`] and represents
+/// the type able to generate transactions and open tables.
+///
+/// # Locking behavior
+/// As noted in `Env::env_inner`, this is a `RwLockReadGuard`
+/// when using the `heed` backend, be aware of this and do
+/// not hold onto an `EnvInner` for a long time.
 pub trait EnvInner<'env, Ro, Rw>
 where
     Self: 'env,
@@ -185,6 +216,9 @@ where
 
     /// Open a database in read-only mode.
     ///
+    /// The returned value can have [`DatabaseRo`]
+    /// & [`DatabaseIter`] functions called on it.
+    ///
     /// This will open the database [`Table`]
     /// passed as a generic to this function.
     ///
@@ -195,12 +229,7 @@ where
     /// //                      (name, key/value type)
     /// ```
     ///
-    /// # Errors
-    /// This function errors upon internal database/IO errors.
-    ///
-    /// As [`Table`] is `Sealed`, and all tables are created
-    /// upon [`Env::open`], this function will never error because
-    /// a table doesn't exist.
+    #[doc = doc_table_error!()]
     fn open_db_ro<T: Table>(
         &self,
         tx_ro: &Ro,
@@ -211,17 +240,39 @@ where
     /// All [`DatabaseRo`] functions are also callable
     /// with the returned [`DatabaseRw`] structure.
     ///
+    /// Note that [`DatabaseIter`] functions are _not_
+    /// available to [`DatabaseRw`] structures.
+    ///
     /// This will open the database [`Table`]
     /// passed as a generic to this function.
     ///
-    /// # Errors
-    /// This function errors upon internal database/IO errors.
-    ///
-    /// As [`Table`] is `Sealed`, and all tables are created
-    /// upon [`Env::open`], this function will never error because
-    /// a table doesn't exist.
+    #[doc = doc_table_error!()]
     fn open_db_rw<T: Table>(&self, tx_rw: &Rw) -> Result<impl DatabaseRw<T>, RuntimeError>;
 
+    /// Open all tables in read/iter mode.
+    ///
+    /// This calls [`EnvInner::open_db_ro`] on all database tables
+    /// and returns a structure that allows access to all tables.
+    ///
+    #[doc = doc_table_error!()]
+    fn open_tables(&self, tx_ro: &Ro) -> Result<impl TablesIter, RuntimeError> {
+        call_fn_on_all_tables_or_early_return! {
+            Self::open_db_ro(self, tx_ro)
+        }
+    }
+
+    /// Open all tables in read-write mode.
+    ///
+    /// This calls [`EnvInner::open_db_rw`] on all database tables
+    /// and returns a structure that allows access to all tables.
+    ///
+    #[doc = doc_table_error!()]
+    fn open_tables_mut(&self, tx_rw: &Rw) -> Result<impl TablesMut, RuntimeError> {
+        call_fn_on_all_tables_or_early_return! {
+            Self::open_db_rw(self, tx_rw)
+        }
+    }
+
     /// Clear all `(key, value)`'s from a database table.
     ///
     /// This will delete all key and values in the passed
@@ -230,11 +281,6 @@ where
     /// Note that this operation is tied to `tx_rw`, as such this
     /// function's effects can be aborted using [`TxRw::abort`].
     ///
-    /// # Errors
-    /// This function errors upon internal database/IO errors.
-    ///
-    /// As [`Table`] is `Sealed`, and all tables are created
-    /// upon [`Env::open`], this function will never error because
-    /// a table doesn't exist.
+    #[doc = doc_table_error!()]
     fn clear_db<T: Table>(&self, tx_rw: &mut Rw) -> Result<(), RuntimeError>;
 }
diff --git a/database/src/error.rs b/database/src/error.rs
index 33c64346..e47634f6 100644
--- a/database/src/error.rs
+++ b/database/src/error.rs
@@ -1,5 +1,4 @@
 //! Database error types.
-//! TODO: `InitError/RuntimeError` are maybe bad names.
 
 //---------------------------------------------------------------------------------------------------- Import
 use std::fmt::Debug;
@@ -42,8 +41,12 @@ pub enum InitError {
     /// The database is currently in the process
     /// of shutting down and cannot respond.
     ///
-    /// TODO: This might happen if we try to open
-    /// while we are shutting down, `unreachable!()`?
+    /// # Notes
+    /// This error can only occur with the `heed` backend when
+    /// the database environment is opened _right_ at the same time
+    /// another thread/process is closing it.
+    ///
+    /// This will never occur with other backends.
     #[error("database is shutting down")]
     ShuttingDown,
 
diff --git a/database/src/free.rs b/database/src/free.rs
index a9b93855..7e145a28 100644
--- a/database/src/free.rs
+++ b/database/src/free.rs
@@ -1,6 +1,4 @@
 //! General free functions (related to the database).
-//!
-//! TODO.
 
 //---------------------------------------------------------------------------------------------------- Import
 
diff --git a/database/src/key.rs b/database/src/key.rs
index f88455ca..13f7cede 100644
--- a/database/src/key.rs
+++ b/database/src/key.rs
@@ -1,40 +1,22 @@
 //! Database key abstraction; `trait Key`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{cmp::Ordering, fmt::Debug};
+use std::cmp::Ordering;
 
-use bytemuck::Pod;
-
-use crate::storable::{self, Storable};
+use crate::storable::Storable;
 
 //---------------------------------------------------------------------------------------------------- Table
 /// Database [`Table`](crate::table::Table) key metadata.
 ///
-/// Purely compile time information for database table keys, supporting duplicate keys.
+/// Purely compile time information for database table keys.
+//
+// FIXME: this doesn't need to exist right now but
+// may be used if we implement getting values using ranges.
+// <https://github.com/Cuprate/cuprate/pull/117#discussion_r1589378104>
 pub trait Key: Storable + Sized {
-    /// Does this [`Key`] require multiple keys to reach a value?
-    ///
-    /// # Invariant
-    /// - If [`Key::DUPLICATE`] is `true`, [`Key::primary_secondary`] MUST be re-implemented.
-    /// - If [`Key::DUPLICATE`] is `true`, [`Key::new_with_max_secondary`] MUST be re-implemented.
-    const DUPLICATE: bool;
-
-    /// Does this [`Key`] have a custom comparison function?
-    ///
-    /// # Invariant
-    /// If [`Key::CUSTOM_COMPARE`] is `true`, [`Key::compare`] MUST be re-implemented.
-    const CUSTOM_COMPARE: bool;
-
     /// The primary key type.
     type Primary: Storable;
 
-    /// Acquire [`Self::Primary`] and the secondary key.
-    ///
-    /// # TODO: doc test
-    fn primary_secondary(self) -> (Self::Primary, u64) {
-        unreachable!()
-    }
-
     /// Compare 2 [`Key`]'s against each other.
     ///
     /// By default, this does a straight _byte_ comparison,
@@ -55,67 +37,17 @@ pub trait Key: Storable + Sized {
     ///     std::cmp::Ordering::Greater,
     /// );
     /// ```
+    #[inline]
     fn compare(left: &[u8], right: &[u8]) -> Ordering {
         left.cmp(right)
     }
-
-    /// Create a new [`Key`] from the [`Key::Primary`] type,
-    /// with the secondary key type set to the maximum value.
-    ///
-    /// # Invariant
-    /// Secondary key must be the max value of the type.
-    ///
-    /// # TODO: doc test
-    fn new_with_max_secondary(primary: Self::Primary) -> Self {
-        unreachable!()
-    }
 }
 
 //---------------------------------------------------------------------------------------------------- Impl
-/// TODO: remove after we finalize tables.
-///
-/// Implement `Key` on most primitive types.
-///
-/// - `Key::DUPLICATE` is always `false`.
-/// - `Key::CUSTOM_COMPARE` is always `false`.
-macro_rules! impl_key {
-    (
-        $(
-            $t:ident // Key type.
-        ),* $(,)?
-    ) => {
-        $(
-            impl Key for $t {
-                const DUPLICATE: bool = false;
-                const CUSTOM_COMPARE: bool = false;
-
-                type Primary = $t;
-            }
-        )*
-    };
-}
-// Implement `Key` for primitives.
-impl_key! {
-    u8,
-    u16,
-    u32,
-    u64,
-    i8,
-    i16,
-    i32,
-    i64,
-}
-
-impl<T: Key + Pod, const N: usize> Key for [T; N] {
-    const DUPLICATE: bool = false;
-    const CUSTOM_COMPARE: bool = false;
-    type Primary = Self;
-}
-
-// TODO: temporary for now for `Key` bound, remove later.
-impl Key for crate::types::PreRctOutputId {
-    const DUPLICATE: bool = false;
-    const CUSTOM_COMPARE: bool = false;
+impl<T> Key for T
+where
+    T: Storable + Sized,
+{
     type Primary = Self;
 }
 
diff --git a/database/src/lib.rs b/database/src/lib.rs
index a80647f0..f1d2b2eb 100644
--- a/database/src/lib.rs
+++ b/database/src/lib.rs
@@ -1,4 +1,4 @@
-//! Database abstraction and utilities.
+//! Cuprate's database abstraction.
 //!
 //! This documentation is mostly for practical usage of `cuprate_database`.
 //!
@@ -8,28 +8,33 @@
 //! # Purpose
 //! This crate does 3 things:
 //! 1. Abstracts various database backends with traits
-//! 2. Implements various `Monero` related [functions](ops) & [tables] & [types]
+//! 2. Implements various `Monero` related [operations](ops), [tables], and [types]
 //! 3. Exposes a [`tower::Service`] backed by a thread-pool
 //!
+//! Each layer builds on-top of the previous.
+//!
+//! As a user of `cuprate_database`, consider using the higher-level [`service`] module,
+//! or at the very least the [`ops`] module instead of interacting with the database traits directly.
+//!
+//! With that said, many database traits and internals (like [`DatabaseRo::get`]) are exposed.
+//!
 //! # Terminology
 //! To be more clear on some terms used in this crate:
 //!
-//! | Term          | Meaning                              |
-//! |---------------|--------------------------------------|
-//! | `Env`         | The 1 database environment, the "whole" thing
-//! | `DatabaseRo`  | A read-only `key/value` store
-//! | `DatabaseRw`  | A readable/writable `key/value` store
-//! | `Table`       | Solely the metadata of a `Database` (the `key` and `value` types, and the name)
-//! | `TxRo`        | Read only transaction
-//! | `TxRw`        | Read/write transaction
-//! | `Storable`    | A data that type can be stored in the database
+//! | Term             | Meaning                              |
+//! |------------------|--------------------------------------|
+//! | `Env`            | The 1 database environment, the "whole" thing
+//! | `DatabaseR{o,w}` | A _actively open_ readable/writable `key/value` store
+//! | `Table`          | Solely the metadata of a `Database` (the `key` and `value` types, and the name)
+//! | `TxR{o,w}`       | A read/write transaction
+//! | `Storable`       | A data that type can be stored in the database
 //!
 //! The dataflow is `Env` -> `Tx` -> `Database`
 //!
 //! Which reads as:
 //! 1. You have a database `Environment`
 //! 1. You open up a `Transaction`
-//! 1. You get a particular `Database` from that `Environment`
+//! 1. You open a particular `Table` from that `Environment`, getting a `Database`
 //! 1. You can now read/write data from/to that `Database`
 //!
 //! # `ConcreteEnv`
@@ -58,14 +63,10 @@
 //! Note that `ConcreteEnv` itself is not a clonable type,
 //! it should be wrapped in [`std::sync::Arc`].
 //!
-//! TODO: we could also expose `ConcreteDatabase` if we're
-//! going to be storing any databases in structs, to lessen
-//! the generic `<D: Database>` pain.
-//!
-//! TODO: we could replace `ConcreteEnv` with `fn Env::open() -> impl Env`/
+//! <!-- SOMEDAY: replace `ConcreteEnv` with `fn Env::open() -> impl Env`/
 //! and use `<E: Env>` everywhere it is stored instead. This would allow
 //! generic-backed dynamic runtime selection of the database backend, i.e.
-//! the user can select which database backend they use.
+//! the user can select which database backend they use. -->
 //!
 //! # Feature flags
 //! The `service` module requires the `service` feature to be enabled.
@@ -77,43 +78,66 @@
 //!
 //! The default is `heed`.
 //!
+//! `tracing` is always enabled and cannot be disabled via feature-flag.
+//! <!-- FIXME: tracing should be behind a feature flag -->
+//!
 //! # Invariants when not using `service`
 //! `cuprate_database` can be used without the `service` feature enabled but
-//! there are some things that must be kept in mind when doing so:
+//! there are some things that must be kept in mind when doing so.
 //!
-//! TODO: make pretty. these will need to be updated
-//! as things change and as more backends are added.
+//! Failing to uphold these invariants may cause panics.
 //!
-//! 1. Memory map resizing (must resize as needed)
-//! 1. Must not exceed `Config`'s maximum reader count
-//! 1. Avoid many nested transactions
-//! 1. `heed::MdbError::BadValSize`
-//! 1. `heed::Error::InvalidDatabaseTyping`
-//! 1. `heed::Error::BadOpenOptions`
-//! 1. Encoding/decoding into `[u8]`
+//! 1. `LMDB` requires the user to resize the memory map resizing (see [`RuntimeError::ResizeNeeded`]
+//! 1. `LMDB` has a maximum reader transaction count, currently it is set to `128`
+//! 1. `LMDB` has [maximum key/value byte size](http://www.lmdb.tech/doc/group__internal.html#gac929399f5d93cef85f874b9e9b1d09e0) which must not be exceeded
 //!
-//! # Example
-//! Simple usage of this crate.
+//! # Examples
+//! The below is an example of using `cuprate_database`'s
+//! lowest API, i.e. using the database directly.
+//!
+//! For examples of the higher-level APIs, see:
+//! - [`ops`]
+//! - [`service`]
 //!
 //! ```rust
 //! use cuprate_database::{
-//!     config::Config,
 //!     ConcreteEnv,
-//!     Env, Key, TxRo, TxRw,
-//!     service::{ReadRequest, WriteRequest, Response},
+//!     config::ConfigBuilder,
+//!     Env, EnvInner,
+//!     tables::{Tables, TablesMut},
+//!     DatabaseRo, DatabaseRw, TxRo, TxRw,
 //! };
 //!
+//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
 //! // Create a configuration for the database environment.
-//! let db_dir = tempfile::tempdir().unwrap();
-//! let config = Config::new(Some(db_dir.path().to_path_buf()));
+//! let db_dir = tempfile::tempdir()?;
+//! let config = ConfigBuilder::new()
+//!     .db_directory(db_dir.path().to_path_buf())
+//!     .build();
 //!
-//! // Initialize the database thread-pool.
+//! // Initialize the database environment.
+//! let env = ConcreteEnv::open(config)?;
 //!
-//! // TODO:
-//! // 1. let (read_handle, write_handle) = cuprate_database::service::init(config).unwrap();
-//! // 2. Send write/read requests
-//! // 3. Use some other `Env` functions
-//! // 4. Shutdown
+//! // Open up a transaction + tables for writing.
+//! let env_inner = env.env_inner();
+//! let tx_rw = env_inner.tx_rw()?;
+//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
+//!
+//! // ⚠️ Write data to the tables directly.
+//! // (not recommended, use `ops` or `service`).
+//! const KEY_IMAGE: [u8; 32] = [88; 32];
+//! tables.key_images_mut().put(&KEY_IMAGE, &())?;
+//!
+//! // Commit the data written.
+//! drop(tables);
+//! TxRw::commit(tx_rw)?;
+//!
+//! // Read the data, assert it is correct.
+//! let tx_ro = env_inner.tx_ro()?;
+//! let tables = env_inner.open_tables(&tx_ro)?;
+//! let (key_image, _) = tables.key_images().first()?;
+//! assert_eq!(key_image, KEY_IMAGE);
+//! # Ok(()) }
 //! ```
 
 //---------------------------------------------------------------------------------------------------- Lints
@@ -136,7 +160,6 @@
 	unconditional_recursion,
 	for_loops_over_fallibles,
 	unused_braces,
-	unused_doc_comments,
 	unused_labels,
 	keyword_idents,
 	non_ascii_idents,
@@ -167,13 +190,13 @@
     clippy::pedantic,
     clippy::nursery,
     clippy::cargo,
+    unused_doc_comments,
     unused_mut,
     missing_docs,
     deprecated,
     unused_comparisons,
     nonstandard_style
 )]
-#![allow(unreachable_code, unused_variables, dead_code, unused_imports)] // TODO: remove
 #![allow(
 	// FIXME: this lint affects crates outside of
 	// `database/` for some reason, allow for now.
@@ -184,8 +207,12 @@
 	// although it is sometimes nice.
 	clippy::must_use_candidate,
 
-	// TODO: should be removed after all `todo!()`'s are gone.
-	clippy::diverging_sub_expression,
+	// FIXME: good lint but too many false positives
+	// with our `Env` + `RwLock` setup.
+	clippy::significant_drop_tightening,
+
+	// FIXME: good lint but is less clear in most cases.
+	clippy::items_after_statements,
 
 	clippy::module_name_repetitions,
 	clippy::module_inception,
@@ -194,7 +221,16 @@
 )]
 // Allow some lints when running in debug mode.
 #![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))]
-
+// Allow some lints in tests.
+#![cfg_attr(
+    test,
+    allow(
+        clippy::cognitive_complexity,
+        clippy::needless_pass_by_value,
+        clippy::cast_possible_truncation,
+        clippy::too_many_lines
+    )
+)]
 // Only allow building 64-bit targets.
 //
 // This allows us to assume 64-bit
@@ -219,6 +255,7 @@ pub mod config;
 mod constants;
 pub use constants::{
     DATABASE_BACKEND, DATABASE_CORRUPT_MSG, DATABASE_DATA_FILENAME, DATABASE_LOCK_FILENAME,
+    DATABASE_VERSION,
 };
 
 mod database;
@@ -230,15 +267,13 @@ pub use env::{Env, EnvInner};
 mod error;
 pub use error::{InitError, RuntimeError};
 
-mod free;
+pub(crate) mod free;
 
 pub mod resize;
 
 mod key;
 pub use key::Key;
 
-mod macros;
-
 mod storable;
 pub use storable::{Storable, StorableBytes, StorableVec};
 
@@ -259,3 +294,8 @@ pub use transaction::{TxRo, TxRw};
 pub mod service;
 
 //---------------------------------------------------------------------------------------------------- Private
+#[cfg(test)]
+pub(crate) mod tests;
+
+#[cfg(feature = "service")] // only needed in `service` for now
+pub(crate) mod unsafe_sendable;
diff --git a/database/src/macros.rs b/database/src/macros.rs
deleted file mode 100644
index 7d4c2047..00000000
--- a/database/src/macros.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-//! General macros used throughout `cuprate-database`.
-
-//---------------------------------------------------------------------------------------------------- Import
-
-//---------------------------------------------------------------------------------------------------- Constants
-
-//---------------------------------------------------------------------------------------------------- TYPE
-
-//---------------------------------------------------------------------------------------------------- IMPL
-
-//---------------------------------------------------------------------------------------------------- Trait Impl
-
-//---------------------------------------------------------------------------------------------------- Tests
-#[cfg(test)]
-mod test {
-    // use super::*;
-}
diff --git a/database/src/ops/alt_block.rs b/database/src/ops/alt_block.rs
deleted file mode 100644
index 82f33aaf..00000000
--- a/database/src/ops/alt_block.rs
+++ /dev/null
@@ -1,29 +0,0 @@
-//! Alternative blocks.
-
-//---------------------------------------------------------------------------------------------------- Import
-
-//---------------------------------------------------------------------------------------------------- Free Functions
-/// TODO
-pub fn add_alt_block() {
-    todo!()
-}
-
-/// TODO
-pub fn get_alt_block() {
-    todo!()
-}
-
-/// TODO
-pub fn remove_alt_block() {
-    todo!()
-}
-
-/// TODO
-pub fn get_alt_block_count() {
-    todo!()
-}
-
-/// TODO
-pub fn drop_alt_blocks() {
-    todo!()
-}
diff --git a/database/src/ops/block.rs b/database/src/ops/block.rs
index 361b391b..4f16cfde 100644
--- a/database/src/ops/block.rs
+++ b/database/src/ops/block.rs
@@ -1,89 +1,472 @@
-//! Blocks.
+//! Blocks functions.
 
 //---------------------------------------------------------------------------------------------------- Import
+use bytemuck::TransparentWrapper;
+use monero_serai::block::Block;
 
-//---------------------------------------------------------------------------------------------------- Free Functions
-/// TODO
-pub fn add_block() {
-    todo!()
+use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits};
+use cuprate_types::{ExtendedBlockHeader, VerifiedBlockInformation};
+
+use crate::{
+    database::{DatabaseRo, DatabaseRw},
+    error::RuntimeError,
+    ops::{
+        blockchain::{chain_height, cumulative_generated_coins},
+        macros::doc_error,
+        output::get_rct_num_outputs,
+        tx::{add_tx, remove_tx},
+    },
+    tables::{BlockHeights, BlockInfos, Tables, TablesMut},
+    types::{BlockHash, BlockHeight, BlockInfo},
+    StorableVec,
+};
+
+//---------------------------------------------------------------------------------------------------- `add_block_*`
+/// Add a [`VerifiedBlockInformation`] to the database.
+///
+/// This extracts all the data from the input block and
+/// maps/adds them to the appropriate database tables.
+///
+#[doc = doc_error!()]
+///
+/// # Panics
+/// This function will panic if:
+/// - `block.height > u32::MAX` (not normally possible)
+/// - `block.height` is not != [`chain_height`]
+///
+/// # Already exists
+/// This function will operate normally even if `block` already
+/// exists, i.e., this function will not return `Err` even if you
+/// call this function infinitely with the same block.
+// no inline, too big.
+pub fn add_block(
+    block: &VerifiedBlockInformation,
+    tables: &mut impl TablesMut,
+) -> Result<(), RuntimeError> {
+    //------------------------------------------------------ Check preconditions first
+
+    // Cast height to `u32` for storage (handled at top of function).
+    // Panic (should never happen) instead of allowing DB corruption.
+    // <https://github.com/Cuprate/cuprate/pull/102#discussion_r1560020991>
+    assert!(
+        u32::try_from(block.height).is_ok(),
+        "block.height ({}) > u32::MAX",
+        block.height,
+    );
+
+    let chain_height = chain_height(tables.block_heights())?;
+    assert_eq!(
+        block.height, chain_height,
+        "block.height ({}) != chain_height ({})",
+        block.height, chain_height,
+    );
+
+    // Expensive checks - debug only.
+    #[cfg(debug_assertions)]
+    {
+        assert_eq!(block.block.serialize(), block.block_blob);
+        assert_eq!(block.block.txs.len(), block.txs.len());
+        for (i, tx) in block.txs.iter().enumerate() {
+            assert_eq!(tx.tx_blob, tx.tx.serialize());
+            assert_eq!(tx.tx_hash, block.block.txs[i]);
+        }
+    }
+
+    //------------------------------------------------------ Transaction / Outputs / Key Images
+    // Add the miner transaction first.
+    {
+        let tx = &block.block.miner_tx;
+        add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)?;
+    }
+
+    for tx in &block.txs {
+        add_tx(&tx.tx, &tx.tx_blob, &tx.tx_hash, &chain_height, tables)?;
+    }
+
+    //------------------------------------------------------ Block Info
+
+    // INVARIANT: must be below the above transaction loop since this
+    // RCT output count needs account for _this_ block's outputs.
+    let cumulative_rct_outs = get_rct_num_outputs(tables.rct_outputs())?;
+
+    let cumulative_generated_coins =
+        cumulative_generated_coins(&block.height.saturating_sub(1), tables.block_infos())?
+            + block.generated_coins;
+
+    let (cumulative_difficulty_low, cumulative_difficulty_high) =
+        split_u128_into_low_high_bits(block.cumulative_difficulty);
+
+    // Block Info.
+    tables.block_infos_mut().put(
+        &block.height,
+        &BlockInfo {
+            cumulative_difficulty_low,
+            cumulative_difficulty_high,
+            cumulative_generated_coins,
+            cumulative_rct_outs,
+            timestamp: block.block.header.timestamp,
+            block_hash: block.block_hash,
+            // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
+            weight: block.weight as u64,
+            long_term_weight: block.long_term_weight as u64,
+        },
+    )?;
+
+    // Block blobs.
+    tables
+        .block_blobs_mut()
+        .put(&block.height, StorableVec::wrap_ref(&block.block_blob))?;
+
+    // Block heights.
+    tables
+        .block_heights_mut()
+        .put(&block.block_hash, &block.height)?;
+
+    Ok(())
 }
 
-/// TODO
-pub fn add_block_data() {
-    todo!()
+//---------------------------------------------------------------------------------------------------- `pop_block`
+/// Remove the top/latest block from the database.
+///
+/// The removed block's data is returned.
+#[doc = doc_error!()]
+///
+/// In `pop_block()`'s case, [`RuntimeError::KeyNotFound`]
+/// will be returned if there are no blocks left.
+// no inline, too big
+pub fn pop_block(
+    tables: &mut impl TablesMut,
+) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> {
+    //------------------------------------------------------ Block Info
+    // Remove block data from tables.
+    let (block_height, block_hash) = {
+        let (block_height, block_info) = tables.block_infos_mut().pop_last()?;
+        (block_height, block_info.block_hash)
+    };
+
+    // Block heights.
+    tables.block_heights_mut().delete(&block_hash)?;
+
+    // Block blobs.
+    // We deserialize the block blob into a `Block`, such
+    // that we can remove the associated transactions later.
+    let block_blob = tables.block_blobs_mut().take(&block_height)?.0;
+    let block = Block::read(&mut block_blob.as_slice())?;
+
+    //------------------------------------------------------ Transaction / Outputs / Key Images
+    remove_tx(&block.miner_tx.hash(), tables)?;
+    for tx_hash in &block.txs {
+        remove_tx(tx_hash, tables)?;
+    }
+
+    Ok((block_height, block_hash, block))
 }
 
-/// TODO
-pub fn pop_block() {
-    todo!()
+//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*`
+/// Retrieve a [`ExtendedBlockHeader`] from the database.
+///
+/// This extracts all the data from the database tables
+/// needed to create a full `ExtendedBlockHeader`.
+///
+/// # Notes
+/// This is slightly more expensive than [`get_block_extended_header_from_height`]
+/// (1 more database lookup).
+#[doc = doc_error!()]
+#[inline]
+pub fn get_block_extended_header(
+    block_hash: &BlockHash,
+    tables: &impl Tables,
+) -> Result<ExtendedBlockHeader, RuntimeError> {
+    get_block_extended_header_from_height(&tables.block_heights().get(block_hash)?, tables)
 }
 
-/// TODO
-pub fn block_exists() {
-    todo!()
+/// Same as [`get_block_extended_header`] but with a [`BlockHeight`].
+#[doc = doc_error!()]
+#[inline]
+pub fn get_block_extended_header_from_height(
+    block_height: &BlockHeight,
+    tables: &impl Tables,
+) -> Result<ExtendedBlockHeader, RuntimeError> {
+    let block_info = tables.block_infos().get(block_height)?;
+    let block_blob = tables.block_blobs().get(block_height)?.0;
+    let block = Block::read(&mut block_blob.as_slice())?;
+
+    let cumulative_difficulty = combine_low_high_bits_to_u128(
+        block_info.cumulative_difficulty_low,
+        block_info.cumulative_difficulty_high,
+    );
+
+    // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
+    #[allow(clippy::cast_possible_truncation)]
+    Ok(ExtendedBlockHeader {
+        cumulative_difficulty,
+        version: block.header.major_version,
+        vote: block.header.minor_version,
+        timestamp: block.header.timestamp,
+        block_weight: block_info.weight as usize,
+        long_term_weight: block_info.long_term_weight as usize,
+    })
 }
 
-/// TODO
-pub fn get_block_hash() {
-    todo!()
+/// Return the top/latest [`ExtendedBlockHeader`] from the database.
+#[doc = doc_error!()]
+#[inline]
+pub fn get_block_extended_header_top(
+    tables: &impl Tables,
+) -> Result<(ExtendedBlockHeader, BlockHeight), RuntimeError> {
+    let height = chain_height(tables.block_heights())?.saturating_sub(1);
+    let header = get_block_extended_header_from_height(&height, tables)?;
+    Ok((header, height))
 }
 
-/// TODO
-pub fn get_block_height() {
-    todo!()
+//---------------------------------------------------------------------------------------------------- Misc
+/// Retrieve a [`BlockInfo`] via its [`BlockHeight`].
+#[doc = doc_error!()]
+#[inline]
+pub fn get_block_info(
+    block_height: &BlockHeight,
+    table_block_infos: &impl DatabaseRo<BlockInfos>,
+) -> Result<BlockInfo, RuntimeError> {
+    table_block_infos.get(block_height)
 }
 
-/// TODO
-pub fn get_block_weight() {
-    todo!()
+/// Retrieve a [`BlockHeight`] via its [`BlockHash`].
+#[doc = doc_error!()]
+#[inline]
+pub fn get_block_height(
+    block_hash: &BlockHash,
+    table_block_heights: &impl DatabaseRo<BlockHeights>,
+) -> Result<BlockHeight, RuntimeError> {
+    table_block_heights.get(block_hash)
 }
 
-/// TODO
-pub fn get_block_already_generated_coins() {
-    todo!()
+/// Check if a block exists in the database.
+///
+/// # Errors
+/// Note that this will never return `Err(RuntimeError::KeyNotFound)`,
+/// as in that case, `Ok(false)` will be returned.
+///
+/// Other errors may still occur.
+#[inline]
+pub fn block_exists(
+    block_hash: &BlockHash,
+    table_block_heights: &impl DatabaseRo<BlockHeights>,
+) -> Result<bool, RuntimeError> {
+    table_block_heights.contains(block_hash)
 }
 
-/// TODO
-pub fn get_block_long_term_weight() {
-    todo!()
-}
+//---------------------------------------------------------------------------------------------------- Tests
+#[cfg(test)]
+#[allow(
+    clippy::significant_drop_tightening,
+    clippy::cognitive_complexity,
+    clippy::too_many_lines
+)]
+mod test {
+    use pretty_assertions::assert_eq;
 
-/// TODO
-pub fn get_block_timestamp() {
-    todo!()
-}
+    use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
 
-/// TODO
-pub fn get_block_cumulative_rct_outputs() {
-    todo!()
-}
+    use super::*;
+    use crate::{
+        ops::tx::{get_tx, tx_exists},
+        tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
+        transaction::TxRw,
+        Env, EnvInner,
+    };
 
-/// TODO
-pub fn get_block() {
-    todo!()
-}
+    /// Tests all above block functions.
+    ///
+    /// Note that this doesn't test the correctness of values added, as the
+    /// functions have a pre-condition that the caller handles this.
+    ///
+    /// It simply tests if the proper tables are mutated, and if the data
+    /// stored and retrieved is the same.
+    #[test]
+    fn all_block_functions() {
+        let (env, _tmp) = tmp_concrete_env();
+        let env_inner = env.env_inner();
+        assert_all_tables_are_empty(&env);
 
-/// TODO
-pub fn get_block_from_height() {
-    todo!()
-}
+        let mut blocks = [
+            block_v1_tx2().clone(),
+            block_v9_tx3().clone(),
+            block_v16_tx0().clone(),
+        ];
+        // HACK: `add_block()` asserts blocks with non-sequential heights
+        // cannot be added, to get around this, manually edit the block height.
+        for (height, block) in blocks.iter_mut().enumerate() {
+            block.height = height as u64;
+            assert_eq!(block.block.serialize(), block.block_blob);
+        }
+        let generated_coins_sum = blocks
+            .iter()
+            .map(|block| block.generated_coins)
+            .sum::<u64>();
 
-/// TODO
-pub fn get_block_header() {
-    todo!()
-}
+        // Add blocks.
+        {
+            let tx_rw = env_inner.tx_rw().unwrap();
+            let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
 
-/// TODO
-pub fn get_block_header_from_height() {
-    todo!()
-}
+            for block in &blocks {
+                // println!("add_block: {block:#?}");
+                add_block(block, &mut tables).unwrap();
+            }
 
-/// TODO
-pub fn get_top_block() {
-    todo!()
-}
+            drop(tables);
+            TxRw::commit(tx_rw).unwrap();
+        }
 
-/// TODO
-pub fn get_top_block_hash() {
-    todo!()
+        // Assert all reads are OK.
+        let block_hashes = {
+            let tx_ro = env_inner.tx_ro().unwrap();
+            let tables = env_inner.open_tables(&tx_ro).unwrap();
+
+            // Assert only the proper tables were added to.
+            AssertTableLen {
+                block_infos: 3,
+                block_blobs: 3,
+                block_heights: 3,
+                key_images: 69,
+                num_outputs: 41,
+                pruned_tx_blobs: 0,
+                prunable_hashes: 0,
+                outputs: 111,
+                prunable_tx_blobs: 0,
+                rct_outputs: 8,
+                tx_blobs: 8,
+                tx_ids: 8,
+                tx_heights: 8,
+                tx_unlock_time: 3,
+            }
+            .assert(&tables);
+
+            // Check `cumulative` functions work.
+            assert_eq!(
+                cumulative_generated_coins(&2, tables.block_infos()).unwrap(),
+                generated_coins_sum,
+            );
+
+            // Both height and hash should result in getting the same data.
+            let mut block_hashes = vec![];
+            for block in &blocks {
+                println!("blocks.iter(): hash: {}", hex::encode(block.block_hash));
+
+                let height = get_block_height(&block.block_hash, tables.block_heights()).unwrap();
+
+                println!("blocks.iter(): height: {height}");
+
+                assert!(block_exists(&block.block_hash, tables.block_heights()).unwrap());
+
+                let block_header_from_height =
+                    get_block_extended_header_from_height(&height, &tables).unwrap();
+                let block_header_from_hash =
+                    get_block_extended_header(&block.block_hash, &tables).unwrap();
+
+                // Just an alias, these names are long.
+                let b1 = block_header_from_hash;
+                let b2 = block;
+                assert_eq!(b1, block_header_from_height);
+                assert_eq!(b1.version, b2.block.header.major_version);
+                assert_eq!(b1.vote, b2.block.header.minor_version);
+                assert_eq!(b1.timestamp, b2.block.header.timestamp);
+                assert_eq!(b1.cumulative_difficulty, b2.cumulative_difficulty);
+                assert_eq!(b1.block_weight, b2.weight);
+                assert_eq!(b1.long_term_weight, b2.long_term_weight);
+
+                block_hashes.push(block.block_hash);
+
+                // Assert transaction reads are OK.
+                for (i, tx) in block.txs.iter().enumerate() {
+                    println!("tx_hash: {:?}", hex::encode(tx.tx_hash));
+
+                    assert!(tx_exists(&tx.tx_hash, tables.tx_ids()).unwrap());
+
+                    let tx2 = get_tx(&tx.tx_hash, tables.tx_ids(), tables.tx_blobs()).unwrap();
+
+                    assert_eq!(tx.tx_blob, tx2.serialize());
+                    assert_eq!(tx.tx_weight, tx2.weight());
+                    assert_eq!(tx.tx_hash, block.block.txs[i]);
+                    assert_eq!(tx.tx_hash, tx2.hash());
+                }
+            }
+
+            block_hashes
+        };
+
+        {
+            let len = block_hashes.len();
+            let hashes: Vec<String> = block_hashes.iter().map(hex::encode).collect();
+            println!("block_hashes: len: {len}, hashes: {hashes:?}");
+        }
+
+        // Remove the blocks.
+        {
+            let tx_rw = env_inner.tx_rw().unwrap();
+            let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+            for block_hash in block_hashes.into_iter().rev() {
+                println!("pop_block(): block_hash: {}", hex::encode(block_hash));
+
+                let (_popped_height, popped_hash, _popped_block) = pop_block(&mut tables).unwrap();
+
+                assert_eq!(block_hash, popped_hash);
+
+                assert!(matches!(
+                    get_block_extended_header(&block_hash, &tables),
+                    Err(RuntimeError::KeyNotFound)
+                ));
+            }
+
+            drop(tables);
+            TxRw::commit(tx_rw).unwrap();
+        }
+
+        assert_all_tables_are_empty(&env);
+    }
+
+    /// We should panic if: `block.height` > `u32::MAX`
+    #[test]
+    #[should_panic(expected = "block.height (4294967296) > u32::MAX")]
+    fn block_height_gt_u32_max() {
+        let (env, _tmp) = tmp_concrete_env();
+        let env_inner = env.env_inner();
+        assert_all_tables_are_empty(&env);
+
+        let tx_rw = env_inner.tx_rw().unwrap();
+        let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+        let mut block = block_v9_tx3().clone();
+
+        block.height = u64::from(u32::MAX) + 1;
+        add_block(&block, &mut tables).unwrap();
+    }
+
+    /// We should panic if: `block.height` != the chain height
+    #[test]
+    #[should_panic(
+        expected = "assertion `left == right` failed: block.height (123) != chain_height (1)\n  left: 123\n right: 1"
+    )]
+    fn block_height_not_chain_height() {
+        let (env, _tmp) = tmp_concrete_env();
+        let env_inner = env.env_inner();
+        assert_all_tables_are_empty(&env);
+
+        let tx_rw = env_inner.tx_rw().unwrap();
+        let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+        let mut block = block_v9_tx3().clone();
+        // HACK: `add_block()` asserts blocks with non-sequential heights
+        // cannot be added, to get around this, manually edit the block height.
+        block.height = 0;
+
+        // OK, `0 == 0`
+        assert_eq!(block.height, 0);
+        add_block(&block, &mut tables).unwrap();
+
+        // FAIL, `123 != 1`
+        block.height = 123;
+        add_block(&block, &mut tables).unwrap();
+    }
 }
diff --git a/database/src/ops/blockchain.rs b/database/src/ops/blockchain.rs
index 28c2284c..ce9cd69d 100644
--- a/database/src/ops/blockchain.rs
+++ b/database/src/ops/blockchain.rs
@@ -1,9 +1,182 @@
-//! Blockchain.
+//! Blockchain functions - chain height, generated coins, etc.
 
 //---------------------------------------------------------------------------------------------------- Import
+use crate::{
+    database::DatabaseRo,
+    error::RuntimeError,
+    ops::macros::doc_error,
+    tables::{BlockHeights, BlockInfos},
+    types::BlockHeight,
+};
 
 //---------------------------------------------------------------------------------------------------- Free Functions
-/// TODO
-pub fn height() {
-    todo!()
+/// Retrieve the height of the chain.
+///
+/// This returns the chain-tip, not the [`top_block_height`].
+///
+/// For example:
+/// - The blockchain has 0 blocks => this returns `0`
+/// - The blockchain has 1 block (height 0) => this returns `1`
+/// - The blockchain has 2 blocks (height 1) => this returns `2`
+///
+/// So the height of a new block would be `chain_height()`.
+#[doc = doc_error!()]
+#[inline]
+pub fn chain_height(
+    table_block_heights: &impl DatabaseRo<BlockHeights>,
+) -> Result<BlockHeight, RuntimeError> {
+    table_block_heights.len()
+}
+
+/// Retrieve the height of the top block.
+///
+/// This returns the height of the top block, not the [`chain_height`].
+///
+/// For example:
+/// - The blockchain has 0 blocks => this returns `Err(RuntimeError::KeyNotFound)`
+/// - The blockchain has 1 block (height 0) => this returns `Ok(0)`
+/// - The blockchain has 2 blocks (height 1) => this returns `Ok(1)`
+///
+/// Note that in cases where no blocks have been written to the
+/// database yet, an error is returned: `Err(RuntimeError::KeyNotFound)`.
+///
+#[doc = doc_error!()]
+#[inline]
+pub fn top_block_height(
+    table_block_heights: &impl DatabaseRo<BlockHeights>,
+) -> Result<BlockHeight, RuntimeError> {
+    match table_block_heights.len()? {
+        0 => Err(RuntimeError::KeyNotFound),
+        height => Ok(height - 1),
+    }
+}
+
+/// Check how many cumulative generated coins there have been until a certain [`BlockHeight`].
+///
+/// This returns the total amount of Monero generated up to `block_height`
+/// (including the block itself) in atomic units.
+///
+/// For example:
+/// - on the genesis block `0`, this returns the amount block `0` generated
+/// - on the next block `1`, this returns the amount block `0` and `1` generated
+///
+/// If no blocks have been added and `block_height == 0`
+/// (i.e., the cumulative generated coins before genesis block is being calculated),
+/// this returns `Ok(0)`.
+#[doc = doc_error!()]
+#[inline]
+pub fn cumulative_generated_coins(
+    block_height: &BlockHeight,
+    table_block_infos: &impl DatabaseRo<BlockInfos>,
+) -> Result<u64, RuntimeError> {
+    match table_block_infos.get(block_height) {
+        Ok(block_info) => Ok(block_info.cumulative_generated_coins),
+        Err(RuntimeError::KeyNotFound) if block_height == &0 => Ok(0),
+        Err(e) => Err(e),
+    }
+}
+
+//---------------------------------------------------------------------------------------------------- Tests
+#[cfg(test)]
+mod test {
+    use pretty_assertions::assert_eq;
+
+    use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
+
+    use super::*;
+    use crate::{
+        ops::block::add_block,
+        tables::Tables,
+        tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
+        transaction::TxRw,
+        Env, EnvInner,
+    };
+
+    /// Tests all above functions.
+    ///
+    /// Note that this doesn't test the correctness of values added, as the
+    /// functions have a pre-condition that the caller handles this.
+    ///
+    /// It simply tests if the proper tables are mutated, and if the data
+    /// stored and retrieved is the same.
+    #[test]
+    fn all_blockchain_functions() {
+        let (env, _tmp) = tmp_concrete_env();
+        let env_inner = env.env_inner();
+        assert_all_tables_are_empty(&env);
+
+        let mut blocks = [
+            block_v1_tx2().clone(),
+            block_v9_tx3().clone(),
+            block_v16_tx0().clone(),
+        ];
+        let blocks_len = u64::try_from(blocks.len()).unwrap();
+
+        // Add blocks.
+        {
+            let tx_rw = env_inner.tx_rw().unwrap();
+            let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+            assert!(matches!(
+                top_block_height(tables.block_heights()),
+                Err(RuntimeError::KeyNotFound),
+            ));
+            assert_eq!(
+                0,
+                cumulative_generated_coins(&0, tables.block_infos()).unwrap()
+            );
+
+            for (i, block) in blocks.iter_mut().enumerate() {
+                let i = u64::try_from(i).unwrap();
+                // HACK: `add_block()` asserts blocks with non-sequential heights
+                // cannot be added, to get around this, manually edit the block height.
+                block.height = i;
+                add_block(block, &mut tables).unwrap();
+            }
+
+            // Assert reads are correct.
+            AssertTableLen {
+                block_infos: 3,
+                block_blobs: 3,
+                block_heights: 3,
+                key_images: 69,
+                num_outputs: 41,
+                pruned_tx_blobs: 0,
+                prunable_hashes: 0,
+                outputs: 111,
+                prunable_tx_blobs: 0,
+                rct_outputs: 8,
+                tx_blobs: 8,
+                tx_ids: 8,
+                tx_heights: 8,
+                tx_unlock_time: 3,
+            }
+            .assert(&tables);
+
+            assert_eq!(blocks_len, chain_height(tables.block_heights()).unwrap());
+            assert_eq!(
+                blocks_len - 1,
+                top_block_height(tables.block_heights()).unwrap()
+            );
+            assert_eq!(
+                cumulative_generated_coins(&0, tables.block_infos()).unwrap(),
+                14_535_350_982_449,
+            );
+            assert_eq!(
+                cumulative_generated_coins(&1, tables.block_infos()).unwrap(),
+                17_939_125_004_612,
+            );
+            assert_eq!(
+                cumulative_generated_coins(&2, tables.block_infos()).unwrap(),
+                18_539_125_004_612,
+            );
+            assert!(matches!(
+                cumulative_generated_coins(&3, tables.block_infos()),
+                Err(RuntimeError::KeyNotFound),
+            ));
+
+            drop(tables);
+            TxRw::commit(tx_rw).unwrap();
+        }
+    }
 }
diff --git a/database/src/ops/key_image.rs b/database/src/ops/key_image.rs
new file mode 100644
index 00000000..5d0786c3
--- /dev/null
+++ b/database/src/ops/key_image.rs
@@ -0,0 +1,127 @@
+//! Key image functions.
+
+//---------------------------------------------------------------------------------------------------- Import
+use crate::{
+    database::{DatabaseRo, DatabaseRw},
+    error::RuntimeError,
+    ops::macros::{doc_add_block_inner_invariant, doc_error},
+    tables::KeyImages,
+    types::KeyImage,
+};
+
+//---------------------------------------------------------------------------------------------------- Key image functions
+/// Add a [`KeyImage`] to the "spent" set in the database.
+#[doc = doc_add_block_inner_invariant!()]
+#[doc = doc_error!()]
+#[inline]
+pub fn add_key_image(
+    key_image: &KeyImage,
+    table_key_images: &mut impl DatabaseRw<KeyImages>,
+) -> Result<(), RuntimeError> {
+    table_key_images.put(key_image, &())
+}
+
+/// Remove a [`KeyImage`] from the "spent" set in the database.
+#[doc = doc_add_block_inner_invariant!()]
+#[doc = doc_error!()]
+#[inline]
+pub fn remove_key_image(
+    key_image: &KeyImage,
+    table_key_images: &mut impl DatabaseRw<KeyImages>,
+) -> Result<(), RuntimeError> {
+    table_key_images.delete(key_image)
+}
+
+/// Check if a [`KeyImage`] exists - i.e. if it is "spent".
+#[doc = doc_error!()]
+#[inline]
+pub fn key_image_exists(
+    key_image: &KeyImage,
+    table_key_images: &impl DatabaseRo<KeyImages>,
+) -> Result<bool, RuntimeError> {
+    table_key_images.contains(key_image)
+}
+
+//---------------------------------------------------------------------------------------------------- Tests
+#[cfg(test)]
+mod test {
+    use hex_literal::hex;
+
+    use super::*;
+    use crate::{
+        tables::{Tables, TablesMut},
+        tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
+        transaction::TxRw,
+        Env, EnvInner,
+    };
+
+    /// Tests all above key-image functions.
+    ///
+    /// Note that this doesn't test the correctness of values added, as the
+    /// functions have a pre-condition that the caller handles this.
+    ///
+    /// It simply tests if the proper tables are mutated, and if the data
+    /// stored and retrieved is the same.
+    #[test]
+    fn all_key_image_functions() {
+        let (env, _tmp) = tmp_concrete_env();
+        let env_inner = env.env_inner();
+        assert_all_tables_are_empty(&env);
+
+        let key_images = [
+            hex!("be1c87fc8f958f68fbe346a18dfb314204dca7573f61aae14840b8037da5c286"),
+            hex!("c5e4a592c11f34a12e13516ab2883b7c580d47b286b8fe8b15d57d2a18ade275"),
+            hex!("93288b646f858edfb0997ae08d7c76f4599b04c127f108e8e69a0696ae7ba334"),
+            hex!("726e9e3d8f826d24811183f94ff53aeba766c9efe6274eb80806f69b06bfa3fc"),
+        ];
+
+        // Add.
+        {
+            let tx_rw = env_inner.tx_rw().unwrap();
+            let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+            for key_image in &key_images {
+                println!("add_key_image(): {}", hex::encode(key_image));
+                add_key_image(key_image, tables.key_images_mut()).unwrap();
+            }
+
+            drop(tables);
+            TxRw::commit(tx_rw).unwrap();
+        }
+
+        // Assert all reads are OK.
+        {
+            let tx_ro = env_inner.tx_ro().unwrap();
+            let tables = env_inner.open_tables(&tx_ro).unwrap();
+
+            // Assert only the proper tables were added to.
+            AssertTableLen {
+                key_images: tables.key_images().len().unwrap(),
+                ..Default::default()
+            }
+            .assert(&tables);
+
+            for key_image in &key_images {
+                println!("key_image_exists(): {}", hex::encode(key_image));
+                key_image_exists(key_image, tables.key_images()).unwrap();
+            }
+        }
+
+        // Remove.
+        {
+            let tx_rw = env_inner.tx_rw().unwrap();
+            let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+            for key_image in key_images {
+                println!("remove_key_image(): {}", hex::encode(key_image));
+                remove_key_image(&key_image, tables.key_images_mut()).unwrap();
+                assert!(!key_image_exists(&key_image, tables.key_images()).unwrap());
+            }
+
+            drop(tables);
+            TxRw::commit(tx_rw).unwrap();
+        }
+
+        assert_all_tables_are_empty(&env);
+    }
+}
diff --git a/database/src/ops/macros.rs b/database/src/ops/macros.rs
new file mode 100644
index 00000000..b7cdba47
--- /dev/null
+++ b/database/src/ops/macros.rs
@@ -0,0 +1,33 @@
+//! Macros.
+//!
+//! These generate repetitive documentation
+//! for all the functions defined in `ops/`.
+
+//---------------------------------------------------------------------------------------------------- Documentation macros
+/// Generate documentation for the required `# Error` section.
+macro_rules! doc_error {
+    () => {
+        r#"# Errors
+This function returns [`RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."#
+    };
+}
+pub(super) use doc_error;
+
+/// Generate `# Invariant` documentation for internal `fn`'s
+/// that should be called directly with caution.
+macro_rules! doc_add_block_inner_invariant {
+    () => {
+            r#"# ⚠️ Invariant ⚠️
+This function mainly exists to be used internally by the parent function [`crate::ops::block::add_block`].
+
+`add_block()` makes sure all data related to the input is mutated, while
+this function _does not_, it specifically mutates _particular_ tables.
+
+This is usually undesired - although this function is still available to call directly.
+
+When calling this function, ensure that either:
+1. This effect (incomplete database mutation) is what is desired, or that...
+2. ...the other tables will also be mutated to a correct state"#
+    };
+}
+pub(super) use doc_add_block_inner_invariant;
diff --git a/database/src/ops/mod.rs b/database/src/ops/mod.rs
index c4468e92..9f48bd65 100644
--- a/database/src/ops/mod.rs
+++ b/database/src/ops/mod.rs
@@ -4,18 +4,107 @@
 //! traits in this crate to generically call Monero-related
 //! database operations.
 //!
-//! # TODO
-//! TODO: These functions should pretty much map 1-1 to the `Request` enum.
+//! # `impl Table`
+//! `ops/` functions take [`Tables`](crate::tables::Tables) and
+//! [`TablesMut`](crate::tables::TablesMut) directly - these are
+//! _already opened_ database tables.
 //!
-//! TODO: These are function names from `old_database/` for now.
-//! The actual underlying functions (e.g `get()`) aren't implemented.
+//! As such, the function puts the responsibility
+//! of transactions, tables, etc on the caller.
 //!
-//! TODO: All of these functions need to take in generic
-//! database trait parameters (and their actual inputs).
+//! This does mean these functions are mostly as lean
+//! as possible, so calling them in a loop should be okay.
+//!
+//! # Atomicity
+//! As transactions are handled by the _caller_ of these functions,
+//! it is up to the caller to decide what happens if one them return
+//! an error.
+//!
+//! To maintain atomicity, transactions should be [`abort`](crate::transaction::TxRw::abort)ed
+//! if one of the functions failed.
+//!
+//! For example, if [`add_block()`](block::add_block) is called and returns an [`Err`],
+//! `abort`ing the transaction that opened the input `TableMut` would reverse all tables
+//! mutated by `add_block()` up until the error, leaving it in the state it was in before
+//! `add_block()` was called.
+//!
+//! # Sub-functions
+//! The main functions within this module are mostly within the [`block`] module.
+//!
+//! Practically speaking, you should only be using 2 functions for mutation:
+//! - [`add_block`](block::add_block)
+//! - [`pop_block`](block::pop_block)
+//!
+//! The `block` functions are "parent" functions, calling other
+//! sub-functions such as [`add_output()`](output::add_output).
+//!
+//! `add_output()` itself only modifies output-related tables, while the `block` "parent"
+//! functions (like `add_block` and `pop_block`) modify all tables required.
+//!
+//! `add_block()` makes sure all data related to the input is mutated, while
+//! this sub-function _do not_, it specifically mutates _particular_ tables.
+//!
+//! When calling this sub-functions, ensure that either:
+//! 1. This effect (incomplete database mutation) is what is desired, or that...
+//! 2. ...the other tables will also be mutated to a correct state
+//!
+//! # Example
+//! Simple usage of `ops`.
+//!
+//! ```rust
+//! use hex_literal::hex;
+//!
+//! use cuprate_test_utils::data::block_v16_tx0;
+//!
+//! use cuprate_database::{
+//!     ConcreteEnv,
+//!     config::ConfigBuilder,
+//!     Env, EnvInner,
+//!     tables::{Tables, TablesMut},
+//!     DatabaseRo, DatabaseRw, TxRo, TxRw,
+//!     ops::block::{add_block, pop_block},
+//! };
+//!
+//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! // Create a configuration for the database environment.
+//! let db_dir = tempfile::tempdir()?;
+//! let config = ConfigBuilder::new()
+//!     .db_directory(db_dir.path().to_path_buf())
+//!     .build();
+//!
+//! // Initialize the database environment.
+//! let env = ConcreteEnv::open(config)?;
+//!
+//! // Open up a transaction + tables for writing.
+//! let env_inner = env.env_inner();
+//! let tx_rw = env_inner.tx_rw()?;
+//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
+//!
+//! // Write a block to the database.
+//! let mut block = block_v16_tx0().clone();
+//! # block.height = 0;
+//! add_block(&block, &mut tables)?;
+//!
+//! // Commit the data written.
+//! drop(tables);
+//! TxRw::commit(tx_rw)?;
+//!
+//! // Read the data, assert it is correct.
+//! let tx_rw = env_inner.tx_rw()?;
+//! let mut tables = env_inner.open_tables_mut(&tx_rw)?;
+//! let (height, hash, serai_block) = pop_block(&mut tables)?;
+//!
+//! assert_eq!(height, 0);
+//! assert_eq!(serai_block, block.block);
+//! assert_eq!(hash, hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428"));
+//! # Ok(()) }
+//! ```
 
-pub mod alt_block;
 pub mod block;
+pub mod blockchain;
+pub mod key_image;
 pub mod output;
 pub mod property;
-pub mod spent_key;
 pub mod tx;
+
+mod macros;
diff --git a/database/src/ops/output.rs b/database/src/ops/output.rs
index e0db143f..5b7620e4 100644
--- a/database/src/ops/output.rs
+++ b/database/src/ops/output.rs
@@ -1,34 +1,371 @@
-//! Outputs.
+//! Output functions.
 
 //---------------------------------------------------------------------------------------------------- Import
+use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
+use monero_serai::{transaction::Timelock, H};
 
-//---------------------------------------------------------------------------------------------------- Free Functions
-/// TODO
-pub fn add_output() {
-    todo!()
+use cuprate_helper::map::u64_to_timelock;
+use cuprate_types::OutputOnChain;
+
+use crate::{
+    database::{DatabaseRo, DatabaseRw},
+    error::RuntimeError,
+    ops::macros::{doc_add_block_inner_invariant, doc_error},
+    tables::{Outputs, RctOutputs, Tables, TablesMut, TxUnlockTime},
+    types::{Amount, AmountIndex, Output, OutputFlags, PreRctOutputId, RctOutput},
+};
+
+//---------------------------------------------------------------------------------------------------- Pre-RCT Outputs
+/// Add a Pre-RCT [`Output`] to the database.
+///
+/// Upon [`Ok`], this function returns the [`PreRctOutputId`] that
+/// can be used to lookup the `Output` in [`get_output()`].
+///
+#[doc = doc_add_block_inner_invariant!()]
+#[doc = doc_error!()]
+#[inline]
+pub fn add_output(
+    amount: Amount,
+    output: &Output,
+    tables: &mut impl TablesMut,
+) -> Result<PreRctOutputId, RuntimeError> {
+    // FIXME: this would be much better expressed with a
+    // `btree_map::Entry`-like API, fix `trait DatabaseRw`.
+    let num_outputs = match tables.num_outputs().get(&amount) {
+        // Entry with `amount` already exists.
+        Ok(num_outputs) => num_outputs,
+        // Entry with `amount` didn't exist, this is
+        // the 1st output with this amount.
+        Err(RuntimeError::KeyNotFound) => 0,
+        Err(e) => return Err(e),
+    };
+    // Update the amount of outputs.
+    tables.num_outputs_mut().put(&amount, &(num_outputs + 1))?;
+
+    let pre_rct_output_id = PreRctOutputId {
+        amount,
+        // The new `amount_index` is the length of amount of outputs with same amount.
+        amount_index: num_outputs,
+    };
+
+    tables.outputs_mut().put(&pre_rct_output_id, output)?;
+    Ok(pre_rct_output_id)
 }
 
-/// TODO
-pub fn remove_output() {
-    todo!()
+/// Remove a Pre-RCT [`Output`] from the database.
+#[doc = doc_add_block_inner_invariant!()]
+#[doc = doc_error!()]
+#[inline]
+pub fn remove_output(
+    pre_rct_output_id: &PreRctOutputId,
+    tables: &mut impl TablesMut,
+) -> Result<(), RuntimeError> {
+    // Decrement the amount index by 1, or delete the entry out-right.
+    // FIXME: this would be much better expressed with a
+    // `btree_map::Entry`-like API, fix `trait DatabaseRw`.
+    tables
+        .num_outputs_mut()
+        .update(&pre_rct_output_id.amount, |num_outputs| {
+            // INVARIANT: Should never be 0.
+            if num_outputs == 1 {
+                None
+            } else {
+                Some(num_outputs - 1)
+            }
+        })?;
+
+    // Delete the output data itself.
+    tables.outputs_mut().delete(pre_rct_output_id)
 }
 
-/// TODO
-pub fn get_output() {
-    todo!()
+/// Retrieve a Pre-RCT [`Output`] from the database.
+#[doc = doc_error!()]
+#[inline]
+pub fn get_output(
+    pre_rct_output_id: &PreRctOutputId,
+    table_outputs: &impl DatabaseRo<Outputs>,
+) -> Result<Output, RuntimeError> {
+    table_outputs.get(pre_rct_output_id)
 }
 
-/// TODO
-pub fn get_output_list() {
-    todo!()
+/// How many pre-RCT [`Output`]s are there?
+///
+/// This returns the amount of pre-RCT outputs currently stored.
+#[doc = doc_error!()]
+#[inline]
+pub fn get_num_outputs(table_outputs: &impl DatabaseRo<Outputs>) -> Result<u64, RuntimeError> {
+    table_outputs.len()
 }
 
-/// TODO
-pub fn get_rct_num_outputs() {
-    todo!()
+//---------------------------------------------------------------------------------------------------- RCT Outputs
+/// Add an [`RctOutput`] to the database.
+///
+/// Upon [`Ok`], this function returns the [`AmountIndex`] that
+/// can be used to lookup the `RctOutput` in [`get_rct_output()`].
+#[doc = doc_add_block_inner_invariant!()]
+#[doc = doc_error!()]
+#[inline]
+pub fn add_rct_output(
+    rct_output: &RctOutput,
+    table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
+) -> Result<AmountIndex, RuntimeError> {
+    let amount_index = get_rct_num_outputs(table_rct_outputs)?;
+    table_rct_outputs.put(&amount_index, rct_output)?;
+    Ok(amount_index)
 }
 
-/// TODO
-pub fn get_pre_rct_num_outputs() {
-    todo!()
+/// Remove an [`RctOutput`] from the database.
+#[doc = doc_add_block_inner_invariant!()]
+#[doc = doc_error!()]
+#[inline]
+pub fn remove_rct_output(
+    amount_index: &AmountIndex,
+    table_rct_outputs: &mut impl DatabaseRw<RctOutputs>,
+) -> Result<(), RuntimeError> {
+    table_rct_outputs.delete(amount_index)
+}
+
+/// Retrieve an [`RctOutput`] from the database.
+#[doc = doc_error!()]
+#[inline]
+pub fn get_rct_output(
+    amount_index: &AmountIndex,
+    table_rct_outputs: &impl DatabaseRo<RctOutputs>,
+) -> Result<RctOutput, RuntimeError> {
+    table_rct_outputs.get(amount_index)
+}
+
+/// How many [`RctOutput`]s are there?
+///
+/// This returns the amount of RCT outputs currently stored.
+#[doc = doc_error!()]
+#[inline]
+pub fn get_rct_num_outputs(
+    table_rct_outputs: &impl DatabaseRo<RctOutputs>,
+) -> Result<u64, RuntimeError> {
+    table_rct_outputs.len()
+}
+
+//---------------------------------------------------------------------------------------------------- Mapping functions
+/// Map an [`Output`] to a [`cuprate_types::OutputOnChain`].
+#[doc = doc_error!()]
+pub fn output_to_output_on_chain(
+    output: &Output,
+    amount: Amount,
+    table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
+) -> Result<OutputOnChain, RuntimeError> {
+    // FIXME: implement lookup table for common values:
+    // <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
+    let commitment = ED25519_BASEPOINT_POINT + H() * Scalar::from(amount);
+
+    let time_lock = if output
+        .output_flags
+        .contains(OutputFlags::NON_ZERO_UNLOCK_TIME)
+    {
+        u64_to_timelock(table_tx_unlock_time.get(&output.tx_idx)?)
+    } else {
+        Timelock::None
+    };
+
+    let key = CompressedEdwardsY::from_slice(&output.key)
+        .map(|y| y.decompress())
+        .unwrap_or(None);
+
+    Ok(OutputOnChain {
+        height: u64::from(output.height),
+        time_lock,
+        key,
+        commitment,
+    })
+}
+
+/// Map an [`RctOutput`] to a [`cuprate_types::OutputOnChain`].
+///
+/// # Panics
+/// This function will panic if `rct_output`'s `commitment` fails to decompress
+/// into a valid [`EdwardsPoint`](curve25519_dalek::edwards::EdwardsPoint).
+///
+/// This should normally not happen as commitments that
+/// are stored in the database should always be valid.
+#[doc = doc_error!()]
+pub fn rct_output_to_output_on_chain(
+    rct_output: &RctOutput,
+    table_tx_unlock_time: &impl DatabaseRo<TxUnlockTime>,
+) -> Result<OutputOnChain, RuntimeError> {
+    // INVARIANT: Commitments stored are valid when stored by the database.
+    let commitment = CompressedEdwardsY::from_slice(&rct_output.commitment)
+        .unwrap()
+        .decompress()
+        .unwrap();
+
+    let time_lock = if rct_output
+        .output_flags
+        .contains(OutputFlags::NON_ZERO_UNLOCK_TIME)
+    {
+        u64_to_timelock(table_tx_unlock_time.get(&rct_output.tx_idx)?)
+    } else {
+        Timelock::None
+    };
+
+    let key = CompressedEdwardsY::from_slice(&rct_output.key)
+        .map(|y| y.decompress())
+        .unwrap_or(None);
+
+    Ok(OutputOnChain {
+        height: u64::from(rct_output.height),
+        time_lock,
+        key,
+        commitment,
+    })
+}
+
+/// Map an [`PreRctOutputId`] to an [`OutputOnChain`].
+///
+/// Note that this still support RCT outputs, in that case, [`PreRctOutputId::amount`] should be `0`.
+#[doc = doc_error!()]
+pub fn id_to_output_on_chain(
+    id: &PreRctOutputId,
+    tables: &impl Tables,
+) -> Result<OutputOnChain, RuntimeError> {
+    // v2 transactions.
+    if id.amount == 0 {
+        let rct_output = get_rct_output(&id.amount_index, tables.rct_outputs())?;
+        let output_on_chain = rct_output_to_output_on_chain(&rct_output, tables.tx_unlock_time())?;
+
+        Ok(output_on_chain)
+    } else {
+        // v1 transactions.
+        let output = get_output(id, tables.outputs())?;
+        let output_on_chain =
+            output_to_output_on_chain(&output, id.amount, tables.tx_unlock_time())?;
+
+        Ok(output_on_chain)
+    }
+}
+
+//---------------------------------------------------------------------------------------------------- Tests
+#[cfg(test)]
+mod test {
+    use super::*;
+    use crate::{
+        tables::{Tables, TablesMut},
+        tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
+        types::OutputFlags,
+        Env, EnvInner,
+    };
+
+    use pretty_assertions::assert_eq;
+
+    /// Dummy `Output`.
+    const OUTPUT: Output = Output {
+        key: [44; 32],
+        height: 0,
+        output_flags: OutputFlags::NON_ZERO_UNLOCK_TIME,
+        tx_idx: 0,
+    };
+
+    /// Dummy `RctOutput`.
+    const RCT_OUTPUT: RctOutput = RctOutput {
+        key: [88; 32],
+        height: 1,
+        output_flags: OutputFlags::empty(),
+        tx_idx: 1,
+        commitment: [100; 32],
+    };
+
+    /// Dummy `Amount`
+    const AMOUNT: Amount = 22;
+
+    /// Tests all above output functions when only inputting `Output` data (no Block).
+    ///
+    /// Note that this doesn't test the correctness of values added, as the
+    /// functions have a pre-condition that the caller handles this.
+    ///
+    /// It simply tests if the proper tables are mutated, and if the data
+    /// stored and retrieved is the same.
+    #[test]
+    fn all_output_functions() {
+        let (env, _tmp) = tmp_concrete_env();
+        let env_inner = env.env_inner();
+        assert_all_tables_are_empty(&env);
+
+        let tx_rw = env_inner.tx_rw().unwrap();
+        let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+        // Assert length is correct.
+        assert_eq!(get_num_outputs(tables.outputs()).unwrap(), 0);
+        assert_eq!(get_rct_num_outputs(tables.rct_outputs()).unwrap(), 0);
+
+        // Add outputs.
+        let pre_rct_output_id = add_output(AMOUNT, &OUTPUT, &mut tables).unwrap();
+        let amount_index = add_rct_output(&RCT_OUTPUT, tables.rct_outputs_mut()).unwrap();
+
+        assert_eq!(
+            pre_rct_output_id,
+            PreRctOutputId {
+                amount: AMOUNT,
+                amount_index: 0,
+            }
+        );
+
+        // Assert all reads of the outputs are OK.
+        {
+            // Assert proper tables were added to.
+            AssertTableLen {
+                block_infos: 0,
+                block_blobs: 0,
+                block_heights: 0,
+                key_images: 0,
+                num_outputs: 1,
+                pruned_tx_blobs: 0,
+                prunable_hashes: 0,
+                outputs: 1,
+                prunable_tx_blobs: 0,
+                rct_outputs: 1,
+                tx_blobs: 0,
+                tx_ids: 0,
+                tx_heights: 0,
+                tx_unlock_time: 0,
+            }
+            .assert(&tables);
+
+            // Assert length is correct.
+            assert_eq!(get_num_outputs(tables.outputs()).unwrap(), 1);
+            assert_eq!(get_rct_num_outputs(tables.rct_outputs()).unwrap(), 1);
+            assert_eq!(1, tables.num_outputs().get(&AMOUNT).unwrap());
+
+            // Assert value is save after retrieval.
+            assert_eq!(
+                OUTPUT,
+                get_output(&pre_rct_output_id, tables.outputs()).unwrap(),
+            );
+
+            assert_eq!(
+                RCT_OUTPUT,
+                get_rct_output(&amount_index, tables.rct_outputs()).unwrap(),
+            );
+        }
+
+        // Remove the outputs.
+        {
+            remove_output(&pre_rct_output_id, &mut tables).unwrap();
+            remove_rct_output(&amount_index, tables.rct_outputs_mut()).unwrap();
+
+            // Assert value no longer exists.
+            assert!(matches!(
+                get_output(&pre_rct_output_id, tables.outputs()),
+                Err(RuntimeError::KeyNotFound)
+            ));
+            assert!(matches!(
+                get_rct_output(&amount_index, tables.rct_outputs()),
+                Err(RuntimeError::KeyNotFound)
+            ));
+
+            // Assert length is correct.
+            assert_eq!(get_num_outputs(tables.outputs()).unwrap(), 0);
+            assert_eq!(get_rct_num_outputs(tables.rct_outputs()).unwrap(), 0);
+        }
+
+        assert_all_tables_are_empty(&env);
+    }
 }
diff --git a/database/src/ops/property.rs b/database/src/ops/property.rs
index 8801ae8f..279c3552 100644
--- a/database/src/ops/property.rs
+++ b/database/src/ops/property.rs
@@ -1,9 +1,39 @@
-//! Properties.
+//! Database properties functions - version, pruning, etc.
+//!
+//! SOMEDAY: the database `properties` table is not yet implemented.
 
 //---------------------------------------------------------------------------------------------------- Import
+use monero_pruning::PruningSeed;
 
+use crate::{error::RuntimeError, ops::macros::doc_error};
 //---------------------------------------------------------------------------------------------------- Free Functions
-/// TODO
-pub fn get_blockchain_pruning_seed() {
-    todo!()
+/// SOMEDAY
+///
+#[doc = doc_error!()]
+///
+/// # Example
+/// ```rust
+/// # use cuprate_database::{*, tables::*, ops::block::*, ops::tx::*};
+/// // SOMEDAY
+/// ```
+#[inline]
+pub const fn get_blockchain_pruning_seed() -> Result<PruningSeed, RuntimeError> {
+    // SOMEDAY: impl pruning.
+    // We need a DB properties table.
+    Ok(PruningSeed::NotPruned)
+}
+
+/// SOMEDAY
+///
+#[doc = doc_error!()]
+///
+/// # Example
+/// ```rust
+/// # use cuprate_database::{*, tables::*, ops::block::*, ops::tx::*};
+/// // SOMEDAY
+/// ```
+#[inline]
+pub const fn db_version() -> Result<u64, RuntimeError> {
+    // SOMEDAY: We need a DB properties table.
+    Ok(crate::constants::DATABASE_VERSION)
 }
diff --git a/database/src/ops/spent_key.rs b/database/src/ops/spent_key.rs
deleted file mode 100644
index a8e6fe02..00000000
--- a/database/src/ops/spent_key.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-//! Spent keys.
-
-//---------------------------------------------------------------------------------------------------- Import
-
-//---------------------------------------------------------------------------------------------------- Free Functions
-/// TODO
-pub fn add_spent_key() {
-    todo!()
-}
-
-/// TODO
-pub fn remove_spent_key() {
-    todo!()
-}
-
-/// TODO
-pub fn is_spent_key_recorded() {
-    todo!()
-}
diff --git a/database/src/ops/tx.rs b/database/src/ops/tx.rs
index 9acafc9c..b4f2984b 100644
--- a/database/src/ops/tx.rs
+++ b/database/src/ops/tx.rs
@@ -1,64 +1,434 @@
-//! Transactions.
+//! Transaction functions.
 
 //---------------------------------------------------------------------------------------------------- Import
+use bytemuck::TransparentWrapper;
+use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar};
+use monero_serai::transaction::{Input, Timelock, Transaction};
 
-//---------------------------------------------------------------------------------------------------- Free Functions
-/// TODO
-pub fn add_transaction() {
-    todo!()
+use crate::{
+    database::{DatabaseRo, DatabaseRw},
+    error::RuntimeError,
+    ops::{
+        key_image::{add_key_image, remove_key_image},
+        macros::{doc_add_block_inner_invariant, doc_error},
+        output::{
+            add_output, add_rct_output, get_rct_num_outputs, remove_output, remove_rct_output,
+        },
+    },
+    tables::{TablesMut, TxBlobs, TxIds},
+    types::{BlockHeight, Output, OutputFlags, PreRctOutputId, RctOutput, TxHash, TxId},
+    StorableVec,
+};
+
+//---------------------------------------------------------------------------------------------------- Private
+/// Add a [`Transaction`] (and related data) to the database.
+///
+/// The `block_height` is the block that this `tx` belongs to.
+///
+/// Note that the caller's input is trusted implicitly and no checks
+/// are done (in this function) whether the `block_height` is correct or not.
+///
+#[doc = doc_add_block_inner_invariant!()]
+///
+/// # Notes
+/// This function is different from other sub-functions and slightly more similar to
+/// [`add_block()`](crate::ops::block::add_block) in that it calls other sub-functions.
+///
+/// This function calls:
+/// - [`add_output()`]
+/// - [`add_rct_output()`]
+/// - [`add_key_image()`]
+///
+/// Thus, after [`add_tx`], those values (outputs and key images)
+/// will be added to database tables as well.
+///
+/// # Panics
+/// This function will panic if:
+/// - `block.height > u32::MAX` (not normally possible)
+#[doc = doc_error!()]
+#[inline]
+pub fn add_tx(
+    tx: &Transaction,
+    tx_blob: &Vec<u8>,
+    tx_hash: &TxHash,
+    block_height: &BlockHeight,
+    tables: &mut impl TablesMut,
+) -> Result<TxId, RuntimeError> {
+    let tx_id = get_num_tx(tables.tx_ids_mut())?;
+
+    //------------------------------------------------------ Transaction data
+    tables.tx_ids_mut().put(tx_hash, &tx_id)?;
+    tables.tx_heights_mut().put(&tx_id, block_height)?;
+    tables
+        .tx_blobs_mut()
+        .put(&tx_id, StorableVec::wrap_ref(tx_blob))?;
+
+    //------------------------------------------------------ Timelocks
+    // Height/time is not differentiated via type, but rather:
+    // "height is any value less than 500_000_000 and timestamp is any value above"
+    // so the `u64/usize` is stored without any tag.
+    //
+    // <https://github.com/Cuprate/cuprate/pull/102#discussion_r1558504285>
+    match tx.prefix.timelock {
+        Timelock::None => (),
+        Timelock::Block(height) => tables.tx_unlock_time_mut().put(&tx_id, &(height as u64))?,
+        Timelock::Time(time) => tables.tx_unlock_time_mut().put(&tx_id, &time)?,
+    }
+
+    //------------------------------------------------------ Pruning
+    // SOMEDAY: implement pruning after `monero-serai` does.
+    // if let PruningSeed::Pruned(decompressed_pruning_seed) = get_blockchain_pruning_seed()? {
+    // SOMEDAY: what to store here? which table?
+    // }
+
+    //------------------------------------------------------
+    let Ok(height) = u32::try_from(*block_height) else {
+        panic!("add_tx(): block_height ({block_height}) > u32::MAX");
+    };
+
+    //------------------------------------------------------ Key Images
+    // Is this a miner transaction?
+    // Which table we add the output data to depends on this.
+    // <https://github.com/monero-project/monero/blob/eac1b86bb2818ac552457380c9dd421fb8935e5b/src/blockchain_db/blockchain_db.cpp#L212-L216>
+    let mut miner_tx = false;
+
+    // Key images.
+    for inputs in &tx.prefix.inputs {
+        match inputs {
+            // Key images.
+            Input::ToKey { key_image, .. } => {
+                add_key_image(key_image.compress().as_bytes(), tables.key_images_mut())?;
+            }
+            // This is a miner transaction, set it for later use.
+            Input::Gen(_) => miner_tx = true,
+        }
+    }
+
+    //------------------------------------------------------ Outputs
+    // Output bit flags.
+    // Set to a non-zero bit value if the unlock time is non-zero.
+    let output_flags = match tx.prefix.timelock {
+        Timelock::None => OutputFlags::empty(),
+        Timelock::Block(_) | Timelock::Time(_) => OutputFlags::NON_ZERO_UNLOCK_TIME,
+    };
+
+    let mut amount_indices = Vec::with_capacity(tx.prefix.outputs.len());
+
+    for (i, output) in tx.prefix.outputs.iter().enumerate() {
+        let key = *output.key.as_bytes();
+
+        // Outputs with clear amounts.
+        let amount_index = if let Some(amount) = output.amount {
+            // RingCT (v2 transaction) miner outputs.
+            if miner_tx && tx.prefix.version == 2 {
+                // Create commitment.
+                // <https://github.com/Cuprate/cuprate/pull/102#discussion_r1559489302>
+                // FIXME: implement lookup table for common values:
+                // <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
+                let commitment = (ED25519_BASEPOINT_POINT
+                    + monero_serai::H() * Scalar::from(amount))
+                .compress()
+                .to_bytes();
+
+                add_rct_output(
+                    &RctOutput {
+                        key,
+                        height,
+                        output_flags,
+                        tx_idx: tx_id,
+                        commitment,
+                    },
+                    tables.rct_outputs_mut(),
+                )?
+            // Pre-RingCT outputs.
+            } else {
+                add_output(
+                    amount,
+                    &Output {
+                        key,
+                        height,
+                        output_flags,
+                        tx_idx: tx_id,
+                    },
+                    tables,
+                )?
+                .amount_index
+            }
+        // RingCT outputs.
+        } else {
+            let commitment = tx.rct_signatures.base.commitments[i].compress().to_bytes();
+            add_rct_output(
+                &RctOutput {
+                    key,
+                    height,
+                    output_flags,
+                    tx_idx: tx_id,
+                    commitment,
+                },
+                tables.rct_outputs_mut(),
+            )?
+        };
+
+        amount_indices.push(amount_index);
+    } // for each output
+
+    tables
+        .tx_outputs_mut()
+        .put(&tx_id, &StorableVec(amount_indices))?;
+
+    Ok(tx_id)
 }
 
-/// TODO
-pub fn add_transaction_data() {
-    todo!()
+/// Remove a transaction from the database with its [`TxHash`].
+///
+/// This returns the [`TxId`] and [`TxBlob`](crate::types::TxBlob) of the removed transaction.
+///
+#[doc = doc_add_block_inner_invariant!()]
+///
+/// # Notes
+/// As mentioned in [`add_tx`], this function will call other sub-functions:
+/// - [`remove_output()`]
+/// - [`remove_rct_output()`]
+/// - [`remove_key_image()`]
+///
+/// Thus, after [`remove_tx`], those values (outputs and key images)
+/// will be remove from database tables as well.
+///
+#[doc = doc_error!()]
+#[inline]
+pub fn remove_tx(
+    tx_hash: &TxHash,
+    tables: &mut impl TablesMut,
+) -> Result<(TxId, Transaction), RuntimeError> {
+    //------------------------------------------------------ Transaction data
+    let tx_id = tables.tx_ids_mut().take(tx_hash)?;
+    let tx_blob = tables.tx_blobs_mut().take(&tx_id)?;
+    tables.tx_heights_mut().delete(&tx_id)?;
+    tables.tx_outputs_mut().delete(&tx_id)?;
+
+    //------------------------------------------------------ Pruning
+    // SOMEDAY: implement pruning after `monero-serai` does.
+    // table_prunable_hashes.delete(&tx_id)?;
+    // table_prunable_tx_blobs.delete(&tx_id)?;
+    // if let PruningSeed::Pruned(decompressed_pruning_seed) = get_blockchain_pruning_seed()? {
+    // SOMEDAY: what to remove here? which table?
+    // }
+
+    //------------------------------------------------------ Unlock Time
+    match tables.tx_unlock_time_mut().delete(&tx_id) {
+        Ok(()) | Err(RuntimeError::KeyNotFound) => (),
+        // An actual error occurred, return.
+        Err(e) => return Err(e),
+    }
+
+    //------------------------------------------------------
+    // Refer to the inner transaction type from now on.
+    let tx = Transaction::read(&mut tx_blob.0.as_slice())?;
+
+    //------------------------------------------------------ Key Images
+    // Is this a miner transaction?
+    let mut miner_tx = false;
+    for inputs in &tx.prefix.inputs {
+        match inputs {
+            // Key images.
+            Input::ToKey { key_image, .. } => {
+                remove_key_image(key_image.compress().as_bytes(), tables.key_images_mut())?;
+            }
+            // This is a miner transaction, set it for later use.
+            Input::Gen(_) => miner_tx = true,
+        }
+    } // for each input
+
+    //------------------------------------------------------ Outputs
+    // Remove each output in the transaction.
+    for output in &tx.prefix.outputs {
+        // Outputs with clear amounts.
+        if let Some(amount) = output.amount {
+            // RingCT miner outputs.
+            if miner_tx && tx.prefix.version == 2 {
+                let amount_index = get_rct_num_outputs(tables.rct_outputs())? - 1;
+                remove_rct_output(&amount_index, tables.rct_outputs_mut())?;
+            // Pre-RingCT outputs.
+            } else {
+                let amount_index = tables.num_outputs_mut().get(&amount)? - 1;
+                remove_output(
+                    &PreRctOutputId {
+                        amount,
+                        amount_index,
+                    },
+                    tables,
+                )?;
+            }
+        // RingCT outputs.
+        } else {
+            let amount_index = get_rct_num_outputs(tables.rct_outputs())? - 1;
+            remove_rct_output(&amount_index, tables.rct_outputs_mut())?;
+        }
+    } // for each output
+
+    Ok((tx_id, tx))
 }
 
-/// TODO
-pub fn remove_transaction() {
-    todo!()
+//---------------------------------------------------------------------------------------------------- `get_tx_*`
+/// Retrieve a [`Transaction`] from the database with its [`TxHash`].
+#[doc = doc_error!()]
+#[inline]
+pub fn get_tx(
+    tx_hash: &TxHash,
+    table_tx_ids: &impl DatabaseRo<TxIds>,
+    table_tx_blobs: &impl DatabaseRo<TxBlobs>,
+) -> Result<Transaction, RuntimeError> {
+    get_tx_from_id(&table_tx_ids.get(tx_hash)?, table_tx_blobs)
 }
 
-/// TODO
-pub fn remove_transaction_data() {
-    todo!()
+/// Retrieve a [`Transaction`] from the database with its [`TxId`].
+#[doc = doc_error!()]
+#[inline]
+pub fn get_tx_from_id(
+    tx_id: &TxId,
+    table_tx_blobs: &impl DatabaseRo<TxBlobs>,
+) -> Result<Transaction, RuntimeError> {
+    let tx_blob = table_tx_blobs.get(tx_id)?.0;
+    Ok(Transaction::read(&mut tx_blob.as_slice())?)
 }
 
-/// TODO
-pub fn remove_tx_outputs() {
-    todo!()
+//----------------------------------------------------------------------------------------------------
+/// How many [`Transaction`]s are there?
+///
+/// This returns the amount of transactions currently stored.
+///
+/// For example:
+/// - 0 transactions exist => returns 0
+/// - 1 transactions exist => returns 1
+/// - 5 transactions exist => returns 5
+/// - etc
+#[doc = doc_error!()]
+#[inline]
+pub fn get_num_tx(table_tx_ids: &impl DatabaseRo<TxIds>) -> Result<u64, RuntimeError> {
+    table_tx_ids.len()
 }
 
-/// TODO
-pub fn get_num_tx() {
-    todo!()
+//----------------------------------------------------------------------------------------------------
+/// Check if a transaction exists in the database.
+///
+/// Returns `true` if it does, else `false`.
+#[doc = doc_error!()]
+#[inline]
+pub fn tx_exists(
+    tx_hash: &TxHash,
+    table_tx_ids: &impl DatabaseRo<TxIds>,
+) -> Result<bool, RuntimeError> {
+    table_tx_ids.contains(tx_hash)
 }
 
-/// TODO
-pub fn tx_exists() {
-    todo!()
-}
+//---------------------------------------------------------------------------------------------------- Tests
+#[cfg(test)]
+mod test {
+    use super::*;
+    use crate::{
+        tables::Tables,
+        tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen},
+        transaction::TxRw,
+        Env, EnvInner,
+    };
+    use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3};
+    use pretty_assertions::assert_eq;
 
-/// TODO
-pub fn get_tx_unlock_time() {
-    todo!()
-}
+    /// Tests all above tx functions when only inputting `Transaction` data (no Block).
+    #[test]
+    fn all_tx_functions() {
+        let (env, _tmp) = tmp_concrete_env();
+        let env_inner = env.env_inner();
+        assert_all_tables_are_empty(&env);
 
-/// TODO
-pub fn get_tx() {
-    todo!()
-}
+        // Monero `Transaction`, not database tx.
+        let txs = [tx_v1_sig0(), tx_v1_sig2(), tx_v2_rct3()];
 
-/// TODO
-pub fn get_tx_list() {
-    todo!()
-}
+        // Add transactions.
+        let tx_ids = {
+            let tx_rw = env_inner.tx_rw().unwrap();
+            let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
 
-/// TODO
-pub fn get_pruned_tx() {
-    todo!()
-}
+            let tx_ids = txs
+                .iter()
+                .map(|tx| {
+                    println!("add_tx(): {tx:#?}");
+                    add_tx(&tx.tx, &tx.tx_blob, &tx.tx_hash, &0, &mut tables).unwrap()
+                })
+                .collect::<Vec<TxId>>();
 
-/// TODO
-pub fn get_tx_block_height() {
-    todo!()
+            drop(tables);
+            TxRw::commit(tx_rw).unwrap();
+
+            tx_ids
+        };
+
+        // Assert all reads of the transactions are OK.
+        let tx_hashes = {
+            let tx_ro = env_inner.tx_ro().unwrap();
+            let tables = env_inner.open_tables(&tx_ro).unwrap();
+
+            // Assert only the proper tables were added to.
+            AssertTableLen {
+                block_infos: 0,
+                block_blobs: 0,
+                block_heights: 0,
+                key_images: 4, // added to key images
+                pruned_tx_blobs: 0,
+                prunable_hashes: 0,
+                num_outputs: 9,
+                outputs: 10, // added to outputs
+                prunable_tx_blobs: 0,
+                rct_outputs: 2,
+                tx_blobs: 3,
+                tx_ids: 3,
+                tx_heights: 3,
+                tx_unlock_time: 1, // only 1 has a timelock
+            }
+            .assert(&tables);
+
+            // Both from ID and hash should result in getting the same transaction.
+            let mut tx_hashes = vec![];
+            for (i, tx_id) in tx_ids.iter().enumerate() {
+                println!("tx_ids.iter(): i: {i}, tx_id: {tx_id}");
+
+                let tx_get_from_id = get_tx_from_id(tx_id, tables.tx_blobs()).unwrap();
+                let tx_hash = tx_get_from_id.hash();
+                let tx_get = get_tx(&tx_hash, tables.tx_ids(), tables.tx_blobs()).unwrap();
+
+                println!("tx_ids.iter(): tx_get_from_id: {tx_get_from_id:#?}, tx_get: {tx_get:#?}");
+
+                assert_eq!(tx_get_from_id.hash(), tx_get.hash());
+                assert_eq!(tx_get_from_id.hash(), txs[i].tx_hash);
+                assert_eq!(tx_get_from_id, tx_get);
+                assert_eq!(tx_get, txs[i].tx);
+                assert!(tx_exists(&tx_hash, tables.tx_ids()).unwrap());
+
+                tx_hashes.push(tx_hash);
+            }
+
+            tx_hashes
+        };
+
+        // Remove the transactions.
+        {
+            let tx_rw = env_inner.tx_rw().unwrap();
+            let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap();
+
+            for tx_hash in tx_hashes {
+                println!("remove_tx(): tx_hash: {tx_hash:?}");
+
+                let (tx_id, _) = remove_tx(&tx_hash, &mut tables).unwrap();
+                assert!(matches!(
+                    get_tx_from_id(&tx_id, tables.tx_blobs()),
+                    Err(RuntimeError::KeyNotFound)
+                ));
+            }
+
+            drop(tables);
+            TxRw::commit(tx_rw).unwrap();
+        }
+
+        assert_all_tables_are_empty(&env);
+    }
 }
diff --git a/database/src/resize.rs b/database/src/resize.rs
index 62ecf5e7..cf185029 100644
--- a/database/src/resize.rs
+++ b/database/src/resize.rs
@@ -1,7 +1,7 @@
 //! Database memory map resizing algorithms.
 //!
 //! This modules contains [`ResizeAlgorithm`] which determines how the
-//! [`ConcreteEnv`](crate::ConcreteEnv) resizes it's memory map when needing more space.
+//! [`ConcreteEnv`](crate::ConcreteEnv) resizes its memory map when needing more space.
 //! This value is in [`Config`](crate::config::Config) and can be selected at runtime.
 //!
 //! Although, it is only used by `ConcreteEnv` if [`Env::MANUAL_RESIZE`](crate::env::Env::MANUAL_RESIZE) is `true`.
@@ -27,12 +27,12 @@ use std::{num::NonZeroUsize, sync::OnceLock};
 /// The function/algorithm used by the
 /// database when resizing the memory map.
 ///
-/// # TODO
-/// We could test around with different algorithms.
-/// Calling `heed::Env::resize` is surprisingly fast,
-/// around `0.0000082s` on my machine. We could probably
-/// get away with smaller and more frequent resizes.
-/// **With the caveat being we are taking a `WriteGuard` to a `RwLock`.**
+// # SOMEDAY
+// We could test around with different algorithms.
+// Calling `heed::Env::resize` is surprisingly fast,
+// around `0.0000082s` on my machine. We could probably
+// get away with smaller and more frequent resizes.
+// **With the caveat being we are taking a `WriteGuard` to a `RwLock`.**
 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
 #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
 pub enum ResizeAlgorithm {
@@ -59,6 +59,8 @@ impl ResizeAlgorithm {
     }
 
     /// Maps the `self` variant to the free functions in [`crate::resize`].
+    ///
+    /// This function returns the _new_ memory map size in bytes.
     #[inline]
     pub fn resize(&self, current_size_bytes: usize) -> NonZeroUsize {
         match self {
diff --git a/database/src/service/free.rs b/database/src/service/free.rs
index 53d0d095..fb40a065 100644
--- a/database/src/service/free.rs
+++ b/database/src/service/free.rs
@@ -6,7 +6,7 @@ use std::sync::Arc;
 use crate::{
     config::Config,
     error::InitError,
-    service::{write::DatabaseWriter, DatabaseReadHandle, DatabaseWriteHandle},
+    service::{DatabaseReadHandle, DatabaseWriteHandle},
     ConcreteEnv, Env,
 };
 
@@ -20,21 +20,11 @@ use crate::{
 ///
 /// # Errors
 /// This will forward the error if [`Env::open`] failed.
-//
-// INVARIANT:
-// `cuprate_database` depends on the fact that this is the only
-// function that hands out the handles. After that, they can be
-// cloned, however they must eventually be dropped and shouldn't
-// be leaked.
-//
-// As the reader thread-pool and writer thread both rely on the
-// disconnection (drop) of these channels for shutdown behavior,
-// leaking these handles could cause data to not get flushed to disk.
 pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle), InitError> {
     let reader_threads = config.reader_threads;
 
     // Initialize the database itself.
-    let db: Arc<ConcreteEnv> = Arc::new(ConcreteEnv::open(config)?);
+    let db = Arc::new(ConcreteEnv::open(config)?);
 
     // Spawn the Reader thread pool and Writer.
     let readers = DatabaseReadHandle::init(&db, reader_threads);
diff --git a/database/src/service/mod.rs b/database/src/service/mod.rs
index 83f8088a..ca5c9e6f 100644
--- a/database/src/service/mod.rs
+++ b/database/src/service/mod.rs
@@ -5,7 +5,7 @@
 //! along with the reader/writer thread-pool system.
 //!
 //! The thread-pool allows outside crates to communicate with it by
-//! sending database [`Request`](ReadRequest)s and receiving [`Response`]s `async`hronously -
+//! sending database [`Request`][req_r]s and receiving [`Response`][resp]s `async`hronously -
 //! without having to actually worry and handle the database themselves.
 //!
 //! The system is managed by this crate, and only requires [`init`] by the user.
@@ -17,9 +17,9 @@
 //! - [`DatabaseReadHandle`]
 //! - [`DatabaseWriteHandle`]
 //!
-//! The 1st allows any caller to send [`ReadRequest`]s.
+//! The 1st allows any caller to send [`ReadRequest`][req_r]s.
 //!
-//! The 2nd allows any caller to send [`WriteRequest`]s.
+//! The 2nd allows any caller to send [`WriteRequest`][req_w]s.
 //!
 //! The `DatabaseReadHandle` can be shared as it is cheaply [`Clone`]able, however,
 //! the `DatabaseWriteHandle` cannot be cloned. There is only 1 place in Cuprate that
@@ -49,6 +49,70 @@
 //! An `async`hronous channel will be returned from the call.
 //! This channel can be `.await`ed upon to (eventually) receive
 //! the corresponding `Response` to your `Request`.
+//!
+//! [req_r]: cuprate_types::service::ReadRequest
+//!
+//! [req_w]: cuprate_types::service::WriteRequest
+//!
+//! [resp]: cuprate_types::service::Response
+//!
+//! # Example
+//! Simple usage of `service`.
+//!
+//! ```rust
+//! use hex_literal::hex;
+//! use tower::{Service, ServiceExt};
+//!
+//! use cuprate_types::service::{ReadRequest, WriteRequest, Response};
+//! use cuprate_test_utils::data::block_v16_tx0;
+//!
+//! use cuprate_database::{ConcreteEnv, config::ConfigBuilder, Env};
+//!
+//! # #[tokio::main]
+//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! // Create a configuration for the database environment.
+//! let db_dir = tempfile::tempdir()?;
+//! let config = ConfigBuilder::new()
+//!     .db_directory(db_dir.path().to_path_buf())
+//!     .build();
+//!
+//! // Initialize the database thread-pool.
+//! let (mut read_handle, mut write_handle) = cuprate_database::service::init(config)?;
+//!
+//! // Prepare a request to write block.
+//! let mut block = block_v16_tx0().clone();
+//! # block.height = 0 as u64; // must be 0th height or panic in `add_block()`
+//! let request = WriteRequest::WriteBlock(block);
+//!
+//! // Send the request.
+//! // We receive back an `async` channel that will
+//! // eventually yield the result when `service`
+//! // is done writing the block.
+//! let response_channel = write_handle.ready().await?.call(request);
+//!
+//! // Block write was OK.
+//! let response = response_channel.await?;
+//! assert_eq!(response, Response::WriteBlockOk);
+//!
+//! // Now, let's try getting the block hash
+//! // of the block we just wrote.
+//! let request = ReadRequest::BlockHash(0);
+//! let response_channel = read_handle.ready().await?.call(request);
+//! let response = response_channel.await?;
+//! assert_eq!(
+//!     response,
+//!     Response::BlockHash(
+//!         hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428")
+//!     )
+//! );
+//!
+//! // This causes the writer thread on the
+//! // other side of this handle to exit...
+//! drop(write_handle);
+//! // ...and this causes the reader thread-pool to exit.
+//! drop(read_handle);
+//! # Ok(()) }
+//! ```
 
 mod read;
 pub use read::DatabaseReadHandle;
@@ -59,11 +123,8 @@ pub use write::DatabaseWriteHandle;
 mod free;
 pub use free::init;
 
-mod request;
-pub use request::{ReadRequest, WriteRequest};
-
-mod response;
-pub use response::Response;
+// Internal type aliases for `service`.
+mod types;
 
 #[cfg(test)]
 mod tests;
diff --git a/database/src/service/read.rs b/database/src/service/read.rs
index 7361ce72..e53c7f88 100644
--- a/database/src/service/read.rs
+++ b/database/src/service/read.rs
@@ -2,47 +2,38 @@
 
 //---------------------------------------------------------------------------------------------------- Import
 use std::{
+    collections::{HashMap, HashSet},
     sync::Arc,
     task::{Context, Poll},
 };
 
-use crossbeam::channel::Receiver;
-
 use futures::{channel::oneshot, ready};
-
+use rayon::iter::{IntoParallelIterator, ParallelIterator};
+use thread_local::ThreadLocal;
 use tokio::sync::{OwnedSemaphorePermit, Semaphore};
 use tokio_util::sync::PollSemaphore;
 
 use cuprate_helper::asynch::InfallibleOneshotReceiver;
+use cuprate_types::{
+    service::{ReadRequest, Response},
+    ExtendedBlockHeader, OutputOnChain,
+};
 
 use crate::{
     config::ReaderThreads,
     error::RuntimeError,
-    service::{request::ReadRequest, response::Response},
-    ConcreteEnv,
+    ops::{
+        block::{get_block_extended_header_from_height, get_block_info},
+        blockchain::{cumulative_generated_coins, top_block_height},
+        key_image::key_image_exists,
+        output::id_to_output_on_chain,
+    },
+    service::types::{ResponseReceiver, ResponseResult, ResponseSender},
+    tables::{BlockHeights, BlockInfos, Tables},
+    types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId},
+    ConcreteEnv, DatabaseRo, Env, EnvInner,
 };
 
-//---------------------------------------------------------------------------------------------------- Types
-/// The actual type of the response.
-///
-/// Either our [`Response`], or a database error occurred.
-type ResponseResult = Result<Response, RuntimeError>;
-
-/// The `Receiver` channel that receives the read response.
-///
-/// This is owned by the caller (the reader)
-/// who `.await`'s for the response.
-///
-/// The channel itself should never fail,
-/// but the actual database operation might.
-type ResponseReceiver = InfallibleOneshotReceiver<ResponseResult>;
-
-/// The `Sender` channel for the response.
-///
-/// The database reader thread uses this to send
-/// the database result to the caller.
-type ResponseSender = oneshot::Sender<ResponseResult>;
-
 //---------------------------------------------------------------------------------------------------- DatabaseReadHandle
 /// Read handle to the database.
 ///
@@ -82,10 +73,10 @@ pub struct DatabaseReadHandle {
 impl Clone for DatabaseReadHandle {
     fn clone(&self) -> Self {
         Self {
-            pool: self.pool.clone(),
+            pool: Arc::clone(&self.pool),
             semaphore: self.semaphore.clone(),
             permit: None,
-            env: self.env.clone(),
+            env: Arc::clone(&self.env),
         }
     }
 }
@@ -123,23 +114,21 @@ impl DatabaseReadHandle {
         }
     }
 
-    /// TODO
+    /// Access to the actual database environment.
+    ///
+    /// # ⚠️ Warning
+    /// This function gives you access to the actual
+    /// underlying database connected to by `self`.
+    ///
+    /// I.e. it allows you to read/write data _directly_
+    /// instead of going through a request.
+    ///
+    /// Be warned that using the database directly
+    /// in this manner has not been tested.
     #[inline]
     pub const fn env(&self) -> &Arc<ConcreteEnv> {
         &self.env
     }
-
-    /// TODO
-    #[inline]
-    pub const fn semaphore(&self) -> &PollSemaphore {
-        &self.semaphore
-    }
-
-    /// TODO
-    #[inline]
-    pub const fn permit(&self) -> &Option<OwnedSemaphorePermit> {
-        &self.permit
-    }
 }
 
 impl tower::Service<ReadRequest> for DatabaseReadHandle {
@@ -155,15 +144,14 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
         }
 
         // Acquire a permit before returning `Ready`.
-        let Some(permit) = ready!(self.semaphore.poll_acquire(cx)) else {
-            // `self` itself owns the backing semaphore, so it can't be closed.
-            unreachable!();
-        };
+        let permit =
+            ready!(self.semaphore.poll_acquire(cx)).expect("this semaphore is never closed");
 
         self.permit = Some(permit);
         Poll::Ready(Ok(()))
     }
 
+    #[inline]
     fn call(&mut self, request: ReadRequest) -> Self::Future {
         let permit = self
             .permit
@@ -181,9 +169,11 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
         //
         // INVARIANT:
         // The below `DatabaseReader` function impl block relies on this behavior.
-        let env = Arc::clone(self.env());
-        self.pool
-            .spawn(move || map_request(permit, env, request, response_sender));
+        let env = Arc::clone(&self.env);
+        self.pool.spawn(move || {
+            let _permit: OwnedSemaphorePermit = permit;
+            map_request(&env, request, response_sender);
+        }); // drop(permit/env);
 
         InfallibleOneshotReceiver::from(receiver)
     }
@@ -193,36 +183,98 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
 // This function maps [`Request`]s to function calls
 // executed by the rayon DB reader threadpool.
 
-#[inline]
-#[allow(clippy::needless_pass_by_value)]
 /// Map [`Request`]'s to specific database handler functions.
 ///
 /// This is the main entrance into all `Request` handler functions.
 /// The basic structure is:
-///
 /// 1. `Request` is mapped to a handler function
 /// 2. Handler function is called
 /// 3. [`Response`] is sent
 fn map_request(
-    _permit: OwnedSemaphorePermit,   // Permit for this request
-    env: Arc<ConcreteEnv>,           // Access to the database
+    env: &ConcreteEnv,               // Access to the database
     request: ReadRequest,            // The request we must fulfill
     response_sender: ResponseSender, // The channel we must send the response back to
 ) {
-    /* TODO: pre-request handling, run some code for each request? */
+    use ReadRequest as R;
 
-    match request {
-        ReadRequest::Example1 => example_handler_1(env, response_sender),
-        ReadRequest::Example2(x) => example_handler_2(env, response_sender, x),
-        ReadRequest::Example3(x) => example_handler_3(env, response_sender, x),
+    /* SOMEDAY: pre-request handling, run some code for each request? */
+
+    let response = match request {
+        R::BlockExtendedHeader(block) => block_extended_header(env, block),
+        R::BlockHash(block) => block_hash(env, block),
+        R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range),
+        R::ChainHeight => chain_height(env),
+        R::GeneratedCoins => generated_coins(env),
+        R::Outputs(map) => outputs(env, map),
+        R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec),
+        R::CheckKIsNotSpent(set) => check_k_is_not_spent(env, set),
+    };
+
+    if let Err(e) = response_sender.send(response) {
+        // TODO: use tracing.
+        println!("database reader failed to send response: {e:?}");
     }
 
-    /* TODO: post-request handling, run some code for each request? */
+    /* SOMEDAY: post-request handling, run some code for each request? */
+}
+
+//---------------------------------------------------------------------------------------------------- Thread Local
+/// Q: Why does this exist?
+///
+/// A1: `heed`'s transactions and tables are not `Sync`, so we cannot use
+/// them with rayon, however, we set a feature such that they are `Send`.
+///
+/// A2: When sending to rayon, we want to ensure each read transaction
+/// is only being used by 1 thread only to scale reads
+///
+/// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576762346>
+#[inline]
+fn thread_local<T: Send>(env: &impl Env) -> ThreadLocal<T> {
+    ThreadLocal::with_capacity(env.config().reader_threads.as_threads().get())
+}
+
+/// Take in a `ThreadLocal<impl Tables>` and return an `&impl Tables + Send`.
+///
+/// # Safety
+/// See [`DatabaseRo`] docs.
+///
+/// We are safely using `UnsafeSendable` in `service`'s reader thread-pool
+/// as we are pairing our usage with `ThreadLocal` - only 1 thread
+/// will ever access a transaction at a time. This is an INVARIANT.
+///
+/// A `Mutex` was considered but:
+/// - It is less performant
+/// - It isn't technically needed for safety in our use-case
+/// - It causes `DatabaseIter` function return issues as there is a `MutexGuard` object
+///
+/// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1581684698>
+///
+/// # Notes
+/// This is used for other backends as well instead of branching with `cfg_if`.
+/// The other backends (as of current) are `Send + Sync` so this is fine.
+/// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1585618374>
+macro_rules! get_tables {
+    ($env_inner:ident, $tx_ro:ident, $tables:ident) => {{
+        $tables.get_or_try(|| {
+            #[allow(clippy::significant_drop_in_scrutinee)]
+            match $env_inner.open_tables($tx_ro) {
+                // SAFETY: see above macro doc comment.
+                Ok(tables) => Ok(unsafe { crate::unsafe_sendable::UnsafeSendable::new(tables) }),
+                Err(e) => Err(e),
+            }
+        })
+    }};
 }
 
 //---------------------------------------------------------------------------------------------------- Handler functions
 // These are the actual functions that do stuff according to the incoming [`Request`].
 //
+// Each function name is a 1-1 mapping (from CamelCase -> snake_case) to
+// the enum variant name, e.g: `BlockExtendedHeader` -> `block_extended_header`.
+//
+// Each function will return the [`Response`] that we
+// should send back to the caller in [`map_request()`].
+//
 // INVARIANT:
 // These functions are called above in `tower::Service::call()`
 // using a custom threadpool which means any call to `par_*()` functions
@@ -231,26 +283,211 @@ fn map_request(
 // All functions below assume that this is the case, such that
 // `par_*()` functions will not block the _global_ rayon thread-pool.
 
-/// TODO
+// FIXME: implement multi-transaction read atomicity.
+// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576874589>.
+
+/// [`ReadRequest::BlockExtendedHeader`].
 #[inline]
-#[allow(clippy::needless_pass_by_value)] // TODO: remove me
-fn example_handler_1(env: Arc<ConcreteEnv>, response_sender: ResponseSender) {
-    let db_result = Ok(Response::Example1);
-    response_sender.send(db_result).unwrap();
+fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
+    // Single-threaded, no `ThreadLocal` required.
+    let env_inner = env.env_inner();
+    let tx_ro = env_inner.tx_ro()?;
+    let tables = env_inner.open_tables(&tx_ro)?;
+
+    Ok(Response::BlockExtendedHeader(
+        get_block_extended_header_from_height(&block_height, &tables)?,
+    ))
 }
 
-/// TODO
+/// [`ReadRequest::BlockHash`].
 #[inline]
-#[allow(clippy::needless_pass_by_value)] // TODO: remove me
-fn example_handler_2(env: Arc<ConcreteEnv>, response_sender: ResponseSender, x: usize) {
-    let db_result = Ok(Response::Example2(x));
-    response_sender.send(db_result).unwrap();
+fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
+    // Single-threaded, no `ThreadLocal` required.
+    let env_inner = env.env_inner();
+    let tx_ro = env_inner.tx_ro()?;
+    let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
+
+    Ok(Response::BlockHash(
+        get_block_info(&block_height, &table_block_infos)?.block_hash,
+    ))
 }
 
-/// TODO
+/// [`ReadRequest::BlockExtendedHeaderInRange`].
 #[inline]
-#[allow(clippy::needless_pass_by_value)] // TODO: remove me
-fn example_handler_3(env: Arc<ConcreteEnv>, response_sender: ResponseSender, x: String) {
-    let db_result = Ok(Response::Example3(x));
-    response_sender.send(db_result).unwrap();
+fn block_extended_header_in_range(
+    env: &ConcreteEnv,
+    range: std::ops::Range<BlockHeight>,
+) -> ResponseResult {
+    // Prepare tx/tables in `ThreadLocal`.
+    let env_inner = env.env_inner();
+    let tx_ro = thread_local(env);
+    let tables = thread_local(env);
+
+    // Collect results using `rayon`.
+    let vec = range
+        .into_par_iter()
+        .map(|block_height| {
+            let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
+            let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
+            get_block_extended_header_from_height(&block_height, tables)
+        })
+        .collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?;
+
+    Ok(Response::BlockExtendedHeaderInRange(vec))
+}
+
+/// [`ReadRequest::ChainHeight`].
+#[inline]
+fn chain_height(env: &ConcreteEnv) -> ResponseResult {
+    // Single-threaded, no `ThreadLocal` required.
+    let env_inner = env.env_inner();
+    let tx_ro = env_inner.tx_ro()?;
+    let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
+    let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
+
+    let chain_height = crate::ops::blockchain::chain_height(&table_block_heights)?;
+    let block_hash =
+        get_block_info(&chain_height.saturating_sub(1), &table_block_infos)?.block_hash;
+
+    Ok(Response::ChainHeight(chain_height, block_hash))
+}
+
+/// [`ReadRequest::GeneratedCoins`].
+#[inline]
+fn generated_coins(env: &ConcreteEnv) -> ResponseResult {
+    // Single-threaded, no `ThreadLocal` required.
+    let env_inner = env.env_inner();
+    let tx_ro = env_inner.tx_ro()?;
+    let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
+    let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
+
+    let top_height = top_block_height(&table_block_heights)?;
+
+    Ok(Response::GeneratedCoins(cumulative_generated_coins(
+        &top_height,
+        &table_block_infos,
+    )?))
+}
+
+/// [`ReadRequest::Outputs`].
+#[inline]
+fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) -> ResponseResult {
+    // Prepare tx/tables in `ThreadLocal`.
+    let env_inner = env.env_inner();
+    let tx_ro = thread_local(env);
+    let tables = thread_local(env);
+
+    // The 2nd mapping function.
+    // This is pulled out from the below `map()` for readability.
+    let inner_map = |amount, amount_index| -> Result<(AmountIndex, OutputOnChain), RuntimeError> {
+        let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
+        let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
+
+        let id = PreRctOutputId {
+            amount,
+            amount_index,
+        };
+
+        let output_on_chain = id_to_output_on_chain(&id, tables)?;
+
+        Ok((amount_index, output_on_chain))
+    };
+
+    // Collect results using `rayon`.
+    let map = outputs
+        .into_par_iter()
+        .map(|(amount, amount_index_set)| {
+            Ok((
+                amount,
+                amount_index_set
+                    .into_par_iter()
+                    .map(|amount_index| inner_map(amount, amount_index))
+                    .collect::<Result<HashMap<AmountIndex, OutputOnChain>, RuntimeError>>()?,
+            ))
+        })
+        .collect::<Result<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>, RuntimeError>>()?;
+
+    Ok(Response::Outputs(map))
+}
+
+/// [`ReadRequest::NumberOutputsWithAmount`].
+#[inline]
+fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> ResponseResult {
+    // Prepare tx/tables in `ThreadLocal`.
+    let env_inner = env.env_inner();
+    let tx_ro = thread_local(env);
+    let tables = thread_local(env);
+
+    // Cache the amount of RCT outputs once.
+    // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
+    #[allow(clippy::cast_possible_truncation)]
+    let num_rct_outputs = {
+        let tx_ro = env_inner.tx_ro()?;
+        let tables = env_inner.open_tables(&tx_ro)?;
+        tables.rct_outputs().len()? as usize
+    };
+
+    // Collect results using `rayon`.
+    let map = amounts
+        .into_par_iter()
+        .map(|amount| {
+            let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
+            let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
+
+            if amount == 0 {
+                // v2 transactions.
+                Ok((amount, num_rct_outputs))
+            } else {
+                // v1 transactions.
+                match tables.num_outputs().get(&amount) {
+                    // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
+                    #[allow(clippy::cast_possible_truncation)]
+                    Ok(count) => Ok((amount, count as usize)),
+                    // If we get a request for an `amount` that doesn't exist,
+                    // we return `0` instead of an error.
+                    Err(RuntimeError::KeyNotFound) => Ok((amount, 0)),
+                    Err(e) => Err(e),
+                }
+            }
+        })
+        .collect::<Result<HashMap<Amount, usize>, RuntimeError>>()?;
+
+    Ok(Response::NumberOutputsWithAmount(map))
+}
+
+/// [`ReadRequest::CheckKIsNotSpent`].
+#[inline]
+fn check_k_is_not_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> ResponseResult {
+    // Prepare tx/tables in `ThreadLocal`.
+    let env_inner = env.env_inner();
+    let tx_ro = thread_local(env);
+    let tables = thread_local(env);
+
+    // Key image check function.
+    let key_image_exists = |key_image| {
+        let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?;
+        let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref();
+        key_image_exists(&key_image, tables.key_images())
+    };
+
+    // FIXME:
+    // Create/use `enum cuprate_types::Exist { Does, DoesNot }`
+    // or similar instead of `bool` for clarity.
+    // <https://github.com/Cuprate/cuprate/pull/113#discussion_r1581536526>
+    //
+    // Collect results using `rayon`.
+    match key_images
+        .into_par_iter()
+        .map(key_image_exists)
+        // If the result is either:
+        // `Ok(true)` => a key image was found, return early
+        // `Err` => an error was found, return early
+        //
+        // Else, `Ok(false)` will continue the iterator.
+        .find_any(|result| !matches!(result, Ok(false)))
+    {
+        None | Some(Ok(false)) => Ok(Response::CheckKIsNotSpent(true)), // Key image was NOT found.
+        Some(Ok(true)) => Ok(Response::CheckKIsNotSpent(false)),        // Key image was found.
+        Some(Err(e)) => Err(e), // A database error occurred.
+    }
 }
diff --git a/database/src/service/request.rs b/database/src/service/request.rs
deleted file mode 100644
index 93877ecd..00000000
--- a/database/src/service/request.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-//! Read/write `Request`s to the database.
-//!
-//! TODO: could add `strum` derives.
-
-//---------------------------------------------------------------------------------------------------- Import
-
-//---------------------------------------------------------------------------------------------------- Constants
-
-//---------------------------------------------------------------------------------------------------- ReadRequest
-#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-/// A read request to the database.
-pub enum ReadRequest {
-    /// TODO
-    Example1,
-    /// TODO
-    Example2(usize),
-    /// TODO
-    Example3(String),
-}
-
-//---------------------------------------------------------------------------------------------------- WriteRequest
-#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-/// A write request to the database.
-pub enum WriteRequest {
-    /// TODO
-    Example1,
-    /// TODO
-    Example2(usize),
-    /// TODO
-    Example3(String),
-}
-
-//---------------------------------------------------------------------------------------------------- IMPL
-
-//---------------------------------------------------------------------------------------------------- Trait Impl
-
-//---------------------------------------------------------------------------------------------------- Tests
-#[cfg(test)]
-mod test {
-    // use super::*;
-}
diff --git a/database/src/service/response.rs b/database/src/service/response.rs
deleted file mode 100644
index 6977efdb..00000000
--- a/database/src/service/response.rs
+++ /dev/null
@@ -1,38 +0,0 @@
-//! Read/write `Response`'s from the database.
-//!
-//! TODO: could add `strum` derives.
-
-//---------------------------------------------------------------------------------------------------- Import
-
-//---------------------------------------------------------------------------------------------------- Constants
-
-//---------------------------------------------------------------------------------------------------- Response
-#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-/// A response from the database.
-///
-/// TODO
-pub enum Response {
-    //-------------------------------------------------------- Read responses
-    /// TODO
-    Example1,
-    /// TODO
-    Example2(usize),
-    /// TODO
-    Example3(String),
-
-    //-------------------------------------------------------- Write responses
-    /// The response
-    ///
-    /// TODO
-    ExampleWriteResponse, // Probably will be just `Ok`
-}
-
-//---------------------------------------------------------------------------------------------------- IMPL
-
-//---------------------------------------------------------------------------------------------------- Trait Impl
-
-//---------------------------------------------------------------------------------------------------- Tests
-#[cfg(test)]
-mod test {
-    // use super::*;
-}
diff --git a/database/src/service/tests.rs b/database/src/service/tests.rs
index a01f67a5..77c10cdd 100644
--- a/database/src/service/tests.rs
+++ b/database/src/service/tests.rs
@@ -1,76 +1,377 @@
 //! `crate::service` tests.
 //!
 //! This module contains general tests for the `service` implementation.
-//!
-//! Testing a thread-pool is slightly more complicated,
-//! so this file provides TODO.
 
 // This is only imported on `#[cfg(test)]` in `mod.rs`.
-
-#![allow(unused_mut, clippy::significant_drop_tightening)]
+#![allow(clippy::await_holding_lock, clippy::too_many_lines)]
 
 //---------------------------------------------------------------------------------------------------- Use
-use tower::{Service, ServiceExt};
-
-use crate::{
-    config::Config,
-    service::{init, DatabaseReadHandle, DatabaseWriteHandle, ReadRequest, Response, WriteRequest},
+use std::{
+    collections::{HashMap, HashSet},
+    sync::Arc,
 };
 
-//---------------------------------------------------------------------------------------------------- Tests
+use pretty_assertions::assert_eq;
+use tower::{Service, ServiceExt};
+
+use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
+use cuprate_types::{
+    service::{ReadRequest, Response, WriteRequest},
+    OutputOnChain, VerifiedBlockInformation,
+};
+
+use crate::{
+    config::ConfigBuilder,
+    ops::{
+        block::{get_block_extended_header_from_height, get_block_info},
+        blockchain::chain_height,
+        output::id_to_output_on_chain,
+    },
+    service::{init, DatabaseReadHandle, DatabaseWriteHandle},
+    tables::{Tables, TablesIter},
+    tests::AssertTableLen,
+    types::{Amount, AmountIndex, PreRctOutputId},
+    ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError,
+};
+
+//---------------------------------------------------------------------------------------------------- Helper functions
 /// Initialize the `service`.
-fn init_service() -> (DatabaseReadHandle, DatabaseWriteHandle, tempfile::TempDir) {
+fn init_service() -> (
+    DatabaseReadHandle,
+    DatabaseWriteHandle,
+    Arc<ConcreteEnv>,
+    tempfile::TempDir,
+) {
     let tempdir = tempfile::tempdir().unwrap();
-    let config = Config::low_power(Some(tempdir.path().into()));
+    let config = ConfigBuilder::new()
+        .db_directory(tempdir.path().into())
+        .low_power()
+        .build();
     let (reader, writer) = init(config).unwrap();
-    (reader, writer, tempdir)
+    let env = reader.env().clone();
+    (reader, writer, env, tempdir)
 }
 
+/// This is the template used in the actual test functions below.
+///
+/// - Send write request(s)
+/// - Receive response(s)
+/// - Assert proper tables were mutated
+/// - Assert read requests lead to expected responses
+#[allow(clippy::future_not_send)] // INVARIANT: tests are using a single threaded runtime
+async fn test_template(
+    // Which block(s) to add?
+    block_fns: &[fn() -> &'static VerifiedBlockInformation],
+    // Total amount of generated coins after the block(s) have been added.
+    cumulative_generated_coins: u64,
+    // What are the table lengths be after the block(s) have been added?
+    assert_table_len: AssertTableLen,
+) {
+    //----------------------------------------------------------------------- Write requests
+    let (reader, mut writer, env, _tempdir) = init_service();
+
+    let env_inner = env.env_inner();
+    let tx_ro = env_inner.tx_ro().unwrap();
+    let tables = env_inner.open_tables(&tx_ro).unwrap();
+
+    // HACK: `add_block()` asserts blocks with non-sequential heights
+    // cannot be added, to get around this, manually edit the block height.
+    for (i, block_fn) in block_fns.iter().enumerate() {
+        let mut block = block_fn().clone();
+        block.height = i as u64;
+
+        // Request a block to be written, assert it was written.
+        let request = WriteRequest::WriteBlock(block);
+        let response_channel = writer.call(request);
+        let response = response_channel.await.unwrap();
+        assert_eq!(response, Response::WriteBlockOk);
+    }
+
+    //----------------------------------------------------------------------- Reset the transaction
+    drop(tables);
+    drop(tx_ro);
+    let tx_ro = env_inner.tx_ro().unwrap();
+    let tables = env_inner.open_tables(&tx_ro).unwrap();
+
+    //----------------------------------------------------------------------- Assert all table lengths are correct
+    assert_table_len.assert(&tables);
+
+    //----------------------------------------------------------------------- Read request prep
+    // Next few lines are just for preparing the expected responses,
+    // see further below for usage.
+
+    let extended_block_header_0 = Ok(Response::BlockExtendedHeader(
+        get_block_extended_header_from_height(&0, &tables).unwrap(),
+    ));
+
+    let extended_block_header_1 = if block_fns.len() > 1 {
+        Ok(Response::BlockExtendedHeader(
+            get_block_extended_header_from_height(&1, &tables).unwrap(),
+        ))
+    } else {
+        Err(RuntimeError::KeyNotFound)
+    };
+
+    let block_hash_0 = Ok(Response::BlockHash(
+        get_block_info(&0, tables.block_infos()).unwrap().block_hash,
+    ));
+
+    let block_hash_1 = if block_fns.len() > 1 {
+        Ok(Response::BlockHash(
+            get_block_info(&1, tables.block_infos()).unwrap().block_hash,
+        ))
+    } else {
+        Err(RuntimeError::KeyNotFound)
+    };
+
+    let range_0_1 = Ok(Response::BlockExtendedHeaderInRange(vec![
+        get_block_extended_header_from_height(&0, &tables).unwrap(),
+    ]));
+
+    let range_0_2 = if block_fns.len() >= 2 {
+        Ok(Response::BlockExtendedHeaderInRange(vec![
+            get_block_extended_header_from_height(&0, &tables).unwrap(),
+            get_block_extended_header_from_height(&1, &tables).unwrap(),
+        ]))
+    } else {
+        Err(RuntimeError::KeyNotFound)
+    };
+
+    let chain_height = {
+        let height = chain_height(tables.block_heights()).unwrap();
+        let block_info = get_block_info(&height.saturating_sub(1), tables.block_infos()).unwrap();
+        Ok(Response::ChainHeight(height, block_info.block_hash))
+    };
+
+    let cumulative_generated_coins = Ok(Response::GeneratedCoins(cumulative_generated_coins));
+
+    let num_req = tables
+        .outputs_iter()
+        .keys()
+        .unwrap()
+        .map(Result::unwrap)
+        .map(|key| key.amount)
+        .collect::<Vec<Amount>>();
+
+    let num_resp = Ok(Response::NumberOutputsWithAmount(
+        num_req
+            .iter()
+            .map(|amount| match tables.num_outputs().get(amount) {
+                // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`
+                #[allow(clippy::cast_possible_truncation)]
+                Ok(count) => (*amount, count as usize),
+                Err(RuntimeError::KeyNotFound) => (*amount, 0),
+                Err(e) => panic!("{e:?}"),
+            })
+            .collect::<HashMap<Amount, usize>>(),
+    ));
+
+    // Contains a fake non-spent key-image.
+    let ki_req = HashSet::from([[0; 32]]);
+    let ki_resp = Ok(Response::CheckKIsNotSpent(true));
+
+    //----------------------------------------------------------------------- Assert expected response
+    // Assert read requests lead to the expected responses.
+    for (request, expected_response) in [
+        (ReadRequest::BlockExtendedHeader(0), extended_block_header_0),
+        (ReadRequest::BlockExtendedHeader(1), extended_block_header_1),
+        (ReadRequest::BlockHash(0), block_hash_0),
+        (ReadRequest::BlockHash(1), block_hash_1),
+        (ReadRequest::BlockExtendedHeaderInRange(0..1), range_0_1),
+        (ReadRequest::BlockExtendedHeaderInRange(0..2), range_0_2),
+        (ReadRequest::ChainHeight, chain_height),
+        (ReadRequest::GeneratedCoins, cumulative_generated_coins),
+        (ReadRequest::NumberOutputsWithAmount(num_req), num_resp),
+        (ReadRequest::CheckKIsNotSpent(ki_req), ki_resp),
+    ] {
+        let response = reader.clone().oneshot(request).await;
+        println!("response: {response:#?}, expected_response: {expected_response:#?}");
+        match response {
+            Ok(resp) => assert_eq!(resp, expected_response.unwrap()),
+            Err(_) => assert!(matches!(response, _expected_response)),
+        }
+    }
+
+    //----------------------------------------------------------------------- Key image checks
+    // Assert each key image we inserted comes back as "spent".
+    for key_image in tables.key_images_iter().keys().unwrap() {
+        let key_image = key_image.unwrap();
+        let request = ReadRequest::CheckKIsNotSpent(HashSet::from([key_image]));
+        let response = reader.clone().oneshot(request).await;
+        println!("response: {response:#?}, key_image: {key_image:#?}");
+        assert_eq!(response.unwrap(), Response::CheckKIsNotSpent(false));
+    }
+
+    //----------------------------------------------------------------------- Output checks
+    // Create the map of amounts and amount indices.
+    //
+    // FIXME: There's definitely a better way to map
+    // `Vec<PreRctOutputId>` -> `HashMap<u64, HashSet<u64>>`
+    let (map, output_count) = {
+        let mut ids = tables
+            .outputs_iter()
+            .keys()
+            .unwrap()
+            .map(Result::unwrap)
+            .collect::<Vec<PreRctOutputId>>();
+
+        ids.extend(
+            tables
+                .rct_outputs_iter()
+                .keys()
+                .unwrap()
+                .map(Result::unwrap)
+                .map(|amount_index| PreRctOutputId {
+                    amount: 0,
+                    amount_index,
+                }),
+        );
+
+        // Used later to compare the amount of Outputs
+        // returned in the Response is equal to the amount
+        // we asked for.
+        let output_count = ids.len();
+
+        let mut map = HashMap::<Amount, HashSet<AmountIndex>>::new();
+        for id in ids {
+            map.entry(id.amount)
+                .and_modify(|set| {
+                    set.insert(id.amount_index);
+                })
+                .or_insert_with(|| HashSet::from([id.amount_index]));
+        }
+
+        (map, output_count)
+    };
+
+    // Map `Output` -> `OutputOnChain`
+    // This is the expected output from the `Response`.
+    let outputs_on_chain = map
+        .iter()
+        .flat_map(|(amount, amount_index_set)| {
+            amount_index_set.iter().map(|amount_index| {
+                let id = PreRctOutputId {
+                    amount: *amount,
+                    amount_index: *amount_index,
+                };
+                id_to_output_on_chain(&id, &tables).unwrap()
+            })
+        })
+        .collect::<Vec<OutputOnChain>>();
+
+    // Send a request for every output we inserted before.
+    let request = ReadRequest::Outputs(map.clone());
+    let response = reader.clone().oneshot(request).await;
+    println!("Response::Outputs response: {response:#?}");
+    let Ok(Response::Outputs(response)) = response else {
+        panic!("{response:#?}")
+    };
+
+    // Assert amount of `Amount`'s are the same.
+    assert_eq!(map.len(), response.len());
+
+    // Assert we get back the same map of
+    // `Amount`'s and `AmountIndex`'s.
+    let mut response_output_count = 0;
+    for (amount, output_map) in response {
+        let amount_index_set = map.get(&amount).unwrap();
+
+        for (amount_index, output) in output_map {
+            response_output_count += 1;
+            assert!(amount_index_set.contains(&amount_index));
+            assert!(outputs_on_chain.contains(&output));
+        }
+    }
+
+    // Assert the amount of `Output`'s returned is as expected.
+    let table_output_len = tables.outputs().len().unwrap() + tables.rct_outputs().len().unwrap();
+    assert_eq!(output_count as u64, table_output_len);
+    assert_eq!(output_count, response_output_count);
+}
+
+//---------------------------------------------------------------------------------------------------- Tests
 /// Simply `init()` the service and then drop it.
 ///
 /// If this test fails, something is very wrong.
 #[test]
 fn init_drop() {
-    let (reader, writer, _tempdir) = init_service();
+    let (_reader, _writer, _env, _tempdir) = init_service();
 }
 
-/// Send a read request, and receive a response,
-/// asserting the response the expected value.
+/// Assert write/read correctness of [`block_v1_tx2`].
 #[tokio::test]
-async fn read_request() {
-    let (reader, writer, _tempdir) = init_service();
-
-    for (request, expected_response) in [
-        (ReadRequest::Example1, Response::Example1),
-        (ReadRequest::Example2(123), Response::Example2(123)),
-        (
-            ReadRequest::Example3("hello".into()),
-            Response::Example3("hello".into()),
-        ),
-    ] {
-        // This calls `poll_ready()` asserting we have a permit before `call()`.
-        let response_channel = reader.clone().oneshot(request);
-        let response = response_channel.await.unwrap();
-        assert_eq!(response, expected_response);
-    }
+async fn v1_tx2() {
+    test_template(
+        &[block_v1_tx2],
+        14_535_350_982_449,
+        AssertTableLen {
+            block_infos: 1,
+            block_blobs: 1,
+            block_heights: 1,
+            key_images: 65,
+            num_outputs: 41,
+            pruned_tx_blobs: 0,
+            prunable_hashes: 0,
+            outputs: 111,
+            prunable_tx_blobs: 0,
+            rct_outputs: 0,
+            tx_blobs: 3,
+            tx_ids: 3,
+            tx_heights: 3,
+            tx_unlock_time: 1,
+        },
+    )
+    .await;
 }
 
-/// Send a write request, and receive a response,
-/// asserting the response the expected value.
+/// Assert write/read correctness of [`block_v9_tx3`].
 #[tokio::test]
-async fn write_request() {
-    let (reader, mut writer, _tempdir) = init_service();
-
-    for (request, expected_response) in [
-        (WriteRequest::Example1, Response::Example1),
-        (WriteRequest::Example2(123), Response::Example2(123)),
-        (
-            WriteRequest::Example3("hello".into()),
-            Response::Example3("hello".into()),
-        ),
-    ] {
-        let response_channel = writer.call(request);
-        let response = response_channel.await.unwrap();
-        assert_eq!(response, expected_response);
-    }
+async fn v9_tx3() {
+    test_template(
+        &[block_v9_tx3],
+        3_403_774_022_163,
+        AssertTableLen {
+            block_infos: 1,
+            block_blobs: 1,
+            block_heights: 1,
+            key_images: 4,
+            num_outputs: 0,
+            pruned_tx_blobs: 0,
+            prunable_hashes: 0,
+            outputs: 0,
+            prunable_tx_blobs: 0,
+            rct_outputs: 7,
+            tx_blobs: 4,
+            tx_ids: 4,
+            tx_heights: 4,
+            tx_unlock_time: 1,
+        },
+    )
+    .await;
+}
+
+/// Assert write/read correctness of [`block_v16_tx0`].
+#[tokio::test]
+async fn v16_tx0() {
+    test_template(
+        &[block_v16_tx0],
+        600_000_000_000,
+        AssertTableLen {
+            block_infos: 1,
+            block_blobs: 1,
+            block_heights: 1,
+            key_images: 0,
+            num_outputs: 0,
+            pruned_tx_blobs: 0,
+            prunable_hashes: 0,
+            outputs: 0,
+            prunable_tx_blobs: 0,
+            rct_outputs: 1,
+            tx_blobs: 1,
+            tx_ids: 1,
+            tx_heights: 1,
+            tx_unlock_time: 1,
+        },
+    )
+    .await;
 }
diff --git a/database/src/service/types.rs b/database/src/service/types.rs
new file mode 100644
index 00000000..265bf42c
--- /dev/null
+++ b/database/src/service/types.rs
@@ -0,0 +1,31 @@
+//! Database service type aliases.
+//!
+//! Only used internally for our `tower::Service` impls.
+
+//---------------------------------------------------------------------------------------------------- Use
+use futures::channel::oneshot::Sender;
+
+use cuprate_helper::asynch::InfallibleOneshotReceiver;
+use cuprate_types::service::Response;
+
+use crate::error::RuntimeError;
+
+//---------------------------------------------------------------------------------------------------- Types
+/// The actual type of the response.
+///
+/// Either our [`Response`], or a database error occurred.
+pub(super) type ResponseResult = Result<Response, RuntimeError>;
+
+/// The `Receiver` channel that receives the read response.
+///
+/// This is owned by the caller (the reader/writer thread)
+/// who `.await`'s for the response.
+///
+/// The channel itself should never fail,
+/// but the actual database operation might.
+pub(super) type ResponseReceiver = InfallibleOneshotReceiver<ResponseResult>;
+
+/// The `Sender` channel for the response.
+///
+/// The database reader/writer thread uses this to send the database result to the caller.
+pub(super) type ResponseSender = Sender<ResponseResult>;
diff --git a/database/src/service/write.rs b/database/src/service/write.rs
index 13e6f979..d6747e97 100644
--- a/database/src/service/write.rs
+++ b/database/src/service/write.rs
@@ -9,31 +9,22 @@ use std::{
 use futures::channel::oneshot;
 
 use cuprate_helper::asynch::InfallibleOneshotReceiver;
+use cuprate_types::{
+    service::{Response, WriteRequest},
+    VerifiedBlockInformation,
+};
 
 use crate::{
+    env::{Env, EnvInner},
     error::RuntimeError,
-    service::{request::WriteRequest, response::Response},
-    ConcreteEnv, Env,
+    service::types::{ResponseReceiver, ResponseResult, ResponseSender},
+    transaction::TxRw,
+    ConcreteEnv,
 };
 
 //---------------------------------------------------------------------------------------------------- Constants
 /// Name of the writer thread.
-const WRITER_THREAD_NAME: &str = "cuprate_helper::service::read::DatabaseWriter";
-
-//---------------------------------------------------------------------------------------------------- Types
-/// The actual type of the response.
-///
-/// Either our [Response], or a database error occurred.
-type ResponseResult = Result<Response, RuntimeError>;
-
-/// The `Receiver` channel that receives the write response.
-///
-/// The channel itself should never fail,
-/// but the actual database operation might.
-type ResponseReceiver = InfallibleOneshotReceiver<ResponseResult>;
-
-/// The `Sender` channel for the response.
-type ResponseSender = oneshot::Sender<ResponseResult>;
+const WRITER_THREAD_NAME: &str = concat!(module_path!(), "::DatabaseWriter");
 
 //---------------------------------------------------------------------------------------------------- DatabaseWriteHandle
 /// Write handle to the database.
@@ -57,7 +48,7 @@ impl DatabaseWriteHandle {
     /// Initialize the single `DatabaseWriter` thread.
     #[cold]
     #[inline(never)] // Only called once.
-    pub(super) fn init(db: Arc<ConcreteEnv>) -> Self {
+    pub(super) fn init(env: Arc<ConcreteEnv>) -> Self {
         // Initialize `Request/Response` channels.
         let (sender, receiver) = crossbeam::channel::unbounded();
 
@@ -65,7 +56,7 @@ impl DatabaseWriteHandle {
         std::thread::Builder::new()
             .name(WRITER_THREAD_NAME.into())
             .spawn(move || {
-                let this = DatabaseWriter { receiver, db };
+                let this = DatabaseWriter { receiver, env };
                 DatabaseWriter::main(this);
             })
             .unwrap();
@@ -107,7 +98,7 @@ pub(super) struct DatabaseWriter {
     receiver: crossbeam::channel::Receiver<(WriteRequest, ResponseSender)>,
 
     /// Access to the database.
-    db: Arc<ConcreteEnv>,
+    env: Arc<ConcreteEnv>,
 }
 
 impl Drop for DatabaseWriter {
@@ -119,7 +110,8 @@ impl Drop for DatabaseWriter {
 impl DatabaseWriter {
     /// The `DatabaseWriter`'s main function.
     ///
-    /// The writer just loops in this function.
+    /// The writer just loops in this function, handling requests forever
+    /// until the request channel is dropped or a panic occurs.
     #[cold]
     #[inline(never)] // Only called once.
     fn main(self) {
@@ -127,7 +119,7 @@ impl DatabaseWriter {
         // 2. Map request to some database function
         // 3. Execute that function, get the result
         // 4. Return the result via channel
-        loop {
+        'main: loop {
             let Ok((request, response_sender)) = self.receiver.recv() else {
                 // If this receive errors, it means that the channel is empty
                 // and disconnected, meaning the other side (all senders) have
@@ -140,60 +132,114 @@ impl DatabaseWriter {
                 return;
             };
 
+            /// How many times should we retry handling the request on resize errors?
+            ///
+            /// This is 1 on automatically resizing databases, meaning there is only 1 iteration.
+            const REQUEST_RETRY_LIMIT: usize = if ConcreteEnv::MANUAL_RESIZE { 3 } else { 1 };
+
             // Map [`Request`]'s to specific database functions.
-            match request {
-                WriteRequest::Example1 => self.example_handler_1(response_sender),
-                WriteRequest::Example2(x) => self.example_handler_2(response_sender, x),
-                WriteRequest::Example3(x) => self.example_handler_3(response_sender, x),
+            //
+            // Both will:
+            // 1. Map the request to a function
+            // 2. Call the function
+            // 3. (manual resize only) If resize is needed, resize and retry
+            // 4. (manual resize only) Redo step {1, 2}
+            // 5. Send the function's `Result` back to the requester
+            //
+            // FIXME: there's probably a more elegant way
+            // to represent this retry logic with recursive
+            // functions instead of a loop.
+            'retry: for retry in 0..REQUEST_RETRY_LIMIT {
+                // FIXME: will there be more than 1 write request?
+                // this won't have to be an enum.
+                let response = match &request {
+                    WriteRequest::WriteBlock(block) => write_block(&self.env, block),
+                };
+
+                // If the database needs to resize, do so.
+                if ConcreteEnv::MANUAL_RESIZE && matches!(response, Err(RuntimeError::ResizeNeeded))
+                {
+                    // If this is the last iteration of the outer `for` loop and we
+                    // encounter a resize error _again_, it means something is wrong.
+                    assert_ne!(
+                        retry, REQUEST_RETRY_LIMIT,
+                        "database resize failed maximum of {REQUEST_RETRY_LIMIT} times"
+                    );
+
+                    // Resize the map, and retry the request handling loop.
+                    //
+                    // FIXME:
+                    // We could pass in custom resizes to account for
+                    // batches, i.e., we're about to add ~5GB of data,
+                    // add that much instead of the default 1GB.
+                    // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L665-L695>
+                    let old = self.env.current_map_size();
+                    let new = self.env.resize_map(None);
+
+                    // TODO: use tracing.
+                    println!("resizing database memory map, old: {old}B, new: {new}B");
+
+                    // Try handling the request again.
+                    continue 'retry;
+                }
+
+                // Automatically resizing databases should not be returning a resize error.
+                #[cfg(debug_assertions)]
+                if !ConcreteEnv::MANUAL_RESIZE {
+                    assert!(
+                        !matches!(response, Err(RuntimeError::ResizeNeeded)),
+                        "auto-resizing database returned a ResizeNeeded error"
+                    );
+                }
+
+                // Send the response back, whether if it's an `Ok` or `Err`.
+                if let Err(e) = response_sender.send(response) {
+                    // TODO: use tracing.
+                    println!("database writer failed to send response: {e:?}");
+                }
+
+                continue 'main;
             }
+
+            // Above retry loop should either:
+            // - continue to the next ['main] loop or...
+            // - ...retry until panic
+            unreachable!();
+        }
+    }
+}
+
+//---------------------------------------------------------------------------------------------------- Handler functions
+// These are the actual functions that do stuff according to the incoming [`Request`].
+//
+// Each function name is a 1-1 mapping (from CamelCase -> snake_case) to
+// the enum variant name, e.g: `BlockExtendedHeader` -> `block_extended_header`.
+//
+// Each function will return the [`Response`] that we
+// should send back to the caller in [`map_request()`].
+
+/// [`WriteRequest::WriteBlock`].
+#[inline]
+fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult {
+    let env_inner = env.env_inner();
+    let tx_rw = env_inner.tx_rw()?;
+
+    let result = {
+        let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?;
+        crate::ops::block::add_block(block, &mut tables_mut)
+    };
+
+    match result {
+        Ok(()) => {
+            TxRw::commit(tx_rw)?;
+            Ok(Response::WriteBlockOk)
+        }
+        Err(e) => {
+            // INVARIANT: ensure database atomicity by aborting
+            // the transaction on `add_block()` failures.
+            TxRw::abort(tx_rw)
+                .expect("could not maintain database atomicity by aborting write transaction");
+            Err(e)
         }
     }
-
-    /// Resize the database's memory map.
-    fn resize_map(&self) {
-        // The compiler most likely optimizes out this
-        // entire function call if this returns here.
-        if !ConcreteEnv::MANUAL_RESIZE {
-            return;
-        }
-
-        // INVARIANT:
-        // [`Env`]'s that are `MANUAL_RESIZE` are expected to implement
-        // their internals such that we have exclusive access when calling
-        // this function. We do not handle the exclusion part, `resize_map()`
-        // itself does. The `heed` backend does this with `RwLock`.
-        //
-        // We need mutual exclusion due to:
-        // <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
-        self.db.resize_map(None);
-        // TODO:
-        // We could pass in custom resizes to account for
-        // batch transactions, i.e., we're about to add ~5GB
-        // of data, add that much instead of the default 1GB.
-        // <https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L665-L695>
-    }
-
-    /// TODO
-    #[inline]
-    #[allow(clippy::unused_self)] // TODO: remove me
-    fn example_handler_1(&self, response_sender: ResponseSender) {
-        let db_result = Ok(Response::Example1);
-        response_sender.send(db_result).unwrap();
-    }
-
-    /// TODO
-    #[inline]
-    #[allow(clippy::unused_self)] // TODO: remove me
-    fn example_handler_2(&self, response_sender: ResponseSender, x: usize) {
-        let db_result = Ok(Response::Example2(x));
-        response_sender.send(db_result).unwrap();
-    }
-
-    /// TODO
-    #[inline]
-    #[allow(clippy::unused_self)] // TODO: remove me
-    fn example_handler_3(&self, response_sender: ResponseSender, x: String) {
-        let db_result = Ok(Response::Example3(x));
-        response_sender.send(db_result).unwrap();
-    }
 }
diff --git a/database/src/storable.rs b/database/src/storable.rs
index ac3f263a..f259523f 100644
--- a/database/src/storable.rs
+++ b/database/src/storable.rs
@@ -1,15 +1,9 @@
 //! (De)serialization for table keys & values.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::{
-    borrow::{Borrow, Cow},
-    char::ToLowercase,
-    fmt::Debug,
-    io::{Read, Write},
-    sync::Arc,
-};
+use std::{borrow::Borrow, fmt::Debug};
 
-use bytemuck::{Pod, Zeroable};
+use bytemuck::Pod;
 use bytes::Bytes;
 
 //---------------------------------------------------------------------------------------------------- Storable
@@ -25,16 +19,14 @@ use bytes::Bytes;
 /// Any type that implements:
 /// - [`bytemuck::Pod`]
 /// - [`Debug`]
-/// - [`ToOwned`]
 ///
 /// will automatically implement [`Storable`].
 ///
 /// This includes:
 /// - Most primitive types
 /// - All types in [`tables`](crate::tables)
-/// - Slices, e.g, `[T] where T: Storable`
 ///
-/// See [`StorableVec`] for storing slices of `T: Storable`.
+/// See [`StorableVec`] & [`StorableBytes`] for storing slices of `T: Storable`.
 ///
 /// ```rust
 /// # use cuprate_database::*;
@@ -142,6 +134,7 @@ where
 ///
 /// This is needed as `impl Storable for Vec<T>` runs into impl conflicts.
 ///
+/// # Example
 /// ```rust
 /// # use cuprate_database::*;
 /// //---------------------------------------------------- u8
@@ -284,7 +277,7 @@ mod test {
             println!("serialized: {se:?}, deserialized: {de:?}\n");
 
             // Assert we wrote correct amount of bytes.
-            if let Some(len) = T::BYTE_LENGTH {
+            if T::BYTE_LENGTH.is_some() {
                 assert_eq!(se.len(), expected_bytes.len());
             }
             // Assert the data is the same.
diff --git a/database/src/table.rs b/database/src/table.rs
index e117dc15..966a9873 100644
--- a/database/src/table.rs
+++ b/database/src/table.rs
@@ -1,7 +1,6 @@
 //! Database table abstraction; `trait Table`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::fmt::Debug;
 
 use crate::{key::Key, storable::Storable};
 
@@ -13,7 +12,7 @@ use crate::{key::Key, storable::Storable};
 /// ## Sealed
 /// This trait is [`Sealed`](https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed).
 ///
-/// It is, and can only be implemented on the types inside [`tables`][crate::tables].
+/// It is only implemented on the types inside [`tables`][crate::tables].
 pub trait Table: crate::tables::private::Sealed + 'static {
     /// Name of the database table.
     const NAME: &'static str;
diff --git a/database/src/tables.rs b/database/src/tables.rs
index 7944b3ce..0056b0bd 100644
--- a/database/src/tables.rs
+++ b/database/src/tables.rs
@@ -1,22 +1,35 @@
 //! Database tables.
 //!
-//! This module contains all the table definitions used by `cuprate-database`.
+//! # Table marker structs
+//! This module contains all the table definitions used by `cuprate_database`.
+//!
+//! The zero-sized structs here represents the table type;
+//! they all are essentially marker types that implement [`Table`].
+//!
+//! Table structs are `CamelCase`, and their static string
+//! names used by the actual database backend are `snake_case`.
+//!
+//! For example: [`BlockBlobs`] -> `block_blobs`.
+//!
+//! # Traits
+//! This module also contains a set of traits for
+//! accessing _all_ tables defined here at once.
+//!
+//! For example, this is the object returned by [`EnvInner::open_tables`](crate::EnvInner::open_tables).
 
 //---------------------------------------------------------------------------------------------------- Import
 use crate::{
+    database::{DatabaseIter, DatabaseRo, DatabaseRw},
     table::Table,
     types::{
-        Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfoV1,
-        BlockInfoV2, BlockInfoV3, KeyImage, Output, PreRctOutputId, PrunableBlob, PrunableHash,
-        PrunedBlob, RctOutput, TxHash, TxId, UnlockTime,
+        Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage,
+        Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput, TxBlob, TxHash,
+        TxId, UnlockTime,
     },
 };
 
-//---------------------------------------------------------------------------------------------------- Tables
+//---------------------------------------------------------------------------------------------------- Sealed
 /// Private module, should not be accessible outside this crate.
-///
-/// Used to block outsiders implementing [`Table`].
-/// All [`Table`] types must also implement [`Sealed`].
 pub(super) mod private {
     /// Private sealed trait.
     ///
@@ -24,6 +37,272 @@ pub(super) mod private {
     pub trait Sealed {}
 }
 
+//---------------------------------------------------------------------------------------------------- `trait Tables[Mut]`
+/// Creates:
+/// - `pub trait Tables`
+/// - `pub trait TablesIter`
+/// - `pub trait TablesMut`
+/// - Blanket implementation for `(tuples, containing, all, open, database, tables, ...)`
+///
+/// For why this exists, see: <https://github.com/Cuprate/cuprate/pull/102#pullrequestreview-1978348871>.
+macro_rules! define_trait_tables {
+    ($(
+        // The `T: Table` type     The index in a tuple
+        // |                       containing all tables
+        // v                         v
+        $table:ident => $index:literal
+    ),* $(,)?) => { paste::paste! {
+        /// Object containing all opened [`Table`]s in read-only mode.
+        ///
+        /// This is an encapsulated object that contains all
+        /// available [`Table`]'s in read-only mode.
+        ///
+        /// It is a `Sealed` trait and is only implemented on a
+        /// `(tuple, containing, all, table, types, ...)`.
+        ///
+        /// This is used to return a _single_ object from functions like
+        /// [`EnvInner::open_tables`](crate::EnvInner::open_tables) rather
+        /// than the tuple containing the tables itself.
+        ///
+        /// To replace `tuple.0` style indexing, `field_accessor_functions()`
+        /// are provided on this trait, which essentially map the object to
+        /// fields containing the particular database table, for example:
+        /// ```rust,ignore
+        /// let tables = open_tables();
+        ///
+        /// // The accessor function `block_infos()` returns the field
+        /// // containing an open database table for `BlockInfos`.
+        /// let _ = tables.block_infos();
+        /// ```
+        ///
+        /// See also:
+        /// - [`TablesMut`]
+        /// - [`TablesIter`]
+        pub trait Tables: private::Sealed {
+            // This expands to creating `fn field_accessor_functions()`
+            // for each passed `$table` type.
+            //
+            // It is essentially a mapping to the field
+            // containing the proper opened database table.
+            //
+            // The function name of the function is
+            // the table type in `snake_case`, e.g., `block_info_v1s()`.
+            $(
+                /// Access an opened
+                #[doc = concat!("[`", stringify!($table), "`]")]
+                /// database.
+                fn [<$table:snake>](&self) -> &impl DatabaseRo<$table>;
+            )*
+
+            /// This returns `true` if all tables are empty.
+            ///
+            /// # Errors
+            /// This returns errors on regular database errors.
+            fn all_tables_empty(&self) -> Result<bool, $crate::error::RuntimeError>;
+        }
+
+        /// Object containing all opened [`Table`]s in read + iter mode.
+        ///
+        /// This is the same as [`Tables`] but includes `_iter()` variants.
+        ///
+        /// Note that this trait is a supertrait of `Tables`,
+        /// as in it can use all of its functions as well.
+        ///
+        /// See [`Tables`] for documentation - this trait exists for the same reasons.
+        pub trait TablesIter: private::Sealed + Tables {
+            $(
+                /// Access an opened read-only + iterable
+                #[doc = concat!("[`", stringify!($table), "`]")]
+                /// database.
+                fn [<$table:snake _iter>](&self) -> &(impl DatabaseRo<$table> + DatabaseIter<$table>);
+            )*
+        }
+
+        /// Object containing all opened [`Table`]s in write mode.
+        ///
+        /// This is the same as [`Tables`] but for mutable accesses.
+        ///
+        /// Note that this trait is a supertrait of `Tables`,
+        /// as in it can use all of its functions as well.
+        ///
+        /// See [`Tables`] for documentation - this trait exists for the same reasons.
+        pub trait TablesMut: private::Sealed + Tables {
+            $(
+                /// Access an opened
+                #[doc = concat!("[`", stringify!($table), "`]")]
+                /// database.
+                fn [<$table:snake _mut>](&mut self) -> &mut impl DatabaseRw<$table>;
+            )*
+        }
+
+        // Implement `Sealed` for all table types.
+        impl<$([<$table:upper>]),*> private::Sealed for ($([<$table:upper>]),*) {}
+
+        // This creates a blanket-implementation for
+        // `(tuple, containing, all, table, types)`.
+        //
+        // There is a generic defined here _for each_ `$table` input.
+        // Specifically, the generic letters are just the table types in UPPERCASE.
+        // Concretely, this expands to something like:
+        // ```rust
+        // impl<BLOCKINFOSV1S, BLOCKINFOSV2S, BLOCKINFOSV3S, [...]>
+        // ```
+        impl<$([<$table:upper>]),*> Tables
+            // We are implementing `Tables` on a tuple that
+            // contains all those generics specified, i.e.,
+            // a tuple containing all open table types.
+            //
+            // Concretely, this expands to something like:
+            // ```rust
+            // (BLOCKINFOSV1S, BLOCKINFOSV2S, BLOCKINFOSV3S, [...])
+            // ```
+            // which is just a tuple of the generics defined above.
+            for ($([<$table:upper>]),*)
+        where
+            // This expands to a where bound that asserts each element
+            // in the tuple implements some database table type.
+            //
+            // Concretely, this expands to something like:
+            // ```rust
+            // BLOCKINFOSV1S: DatabaseRo<BlockInfoV1s> + DatabaseIter<BlockInfoV1s>,
+            // BLOCKINFOSV2S: DatabaseRo<BlockInfoV2s> + DatabaseIter<BlockInfoV2s>,
+            // [...]
+            // ```
+            $(
+                [<$table:upper>]: DatabaseRo<$table>,
+            )*
+        {
+            $(
+                // The function name of the accessor function is
+                // the table type in `snake_case`, e.g., `block_info_v1s()`.
+                #[inline]
+                fn [<$table:snake>](&self) -> &impl DatabaseRo<$table> {
+                    // The index of the database table in
+                    // the tuple implements the table trait.
+                    &self.$index
+                }
+            )*
+
+            fn all_tables_empty(&self) -> Result<bool, $crate::error::RuntimeError> {
+                $(
+                     if !DatabaseRo::is_empty(&self.$index)? {
+                        return Ok(false);
+                     }
+                )*
+                Ok(true)
+            }
+        }
+
+        // This is the same as the above
+        // `Tables`, but for `TablesIter`.
+        impl<$([<$table:upper>]),*> TablesIter
+            for ($([<$table:upper>]),*)
+        where
+            $(
+                [<$table:upper>]: DatabaseRo<$table> + DatabaseIter<$table>,
+            )*
+        {
+            $(
+                // The function name of the accessor function is
+                // the table type in `snake_case` + `_iter`, e.g., `block_info_v1s_iter()`.
+                #[inline]
+                fn [<$table:snake _iter>](&self) -> &(impl DatabaseRo<$table> + DatabaseIter<$table>) {
+                    &self.$index
+                }
+            )*
+        }
+
+        // This is the same as the above
+        // `Tables`, but for `TablesMut`.
+        impl<$([<$table:upper>]),*> TablesMut
+            for ($([<$table:upper>]),*)
+        where
+            $(
+                [<$table:upper>]: DatabaseRw<$table>,
+            )*
+        {
+            $(
+                // The function name of the mutable accessor function is
+                // the table type in `snake_case` + `_mut`, e.g., `block_info_v1s_mut()`.
+                #[inline]
+                fn [<$table:snake _mut>](&mut self) -> &mut impl DatabaseRw<$table> {
+                    &mut self.$index
+                }
+            )*
+        }
+    }};
+}
+
+// Input format: $table_type => $index
+//
+// The $index:
+// - Simply increments by 1 for each table
+// - Must be 0..
+// - Must end at the total amount of table types - 1
+//
+// Compile errors will occur if these aren't satisfied.
+//
+// $index is just the `tuple.$index`, as the above [`define_trait_tables`]
+// macro has a blanket impl for `(all, table, types, ...)` and we must map
+// each type to a tuple index explicitly.
+//
+// FIXME: there's definitely an automatic way to this :)
+define_trait_tables! {
+    BlockInfos => 0,
+    BlockBlobs => 1,
+    BlockHeights => 2,
+    KeyImages => 3,
+    NumOutputs => 4,
+    PrunedTxBlobs => 5,
+    PrunableHashes => 6,
+    Outputs => 7,
+    PrunableTxBlobs => 8,
+    RctOutputs => 9,
+    TxBlobs => 10,
+    TxIds => 11,
+    TxHeights => 12,
+    TxOutputs => 13,
+    TxUnlockTime => 14,
+}
+
+//---------------------------------------------------------------------------------------------------- Table function macro
+/// `crate`-private macro for callings functions on all tables.
+///
+/// This calls the function `$fn` with the optional
+/// arguments `$args` on all tables - returning early
+/// (within whatever scope this is called) if any
+/// of the function calls error.
+///
+/// Else, it evaluates to an `Ok((tuple, of, all, table, types, ...))`,
+/// i.e., an `impl Table[Mut]` wrapped in `Ok`.
+macro_rules! call_fn_on_all_tables_or_early_return {
+    (
+        $($fn:ident $(::)?)*
+        (
+            $($arg:ident),* $(,)?
+        )
+    ) => {{
+        Ok((
+            $($fn ::)*<$crate::tables::BlockInfos>($($arg),*)?,
+            $($fn ::)*<$crate::tables::BlockBlobs>($($arg),*)?,
+            $($fn ::)*<$crate::tables::BlockHeights>($($arg),*)?,
+            $($fn ::)*<$crate::tables::KeyImages>($($arg),*)?,
+            $($fn ::)*<$crate::tables::NumOutputs>($($arg),*)?,
+            $($fn ::)*<$crate::tables::PrunedTxBlobs>($($arg),*)?,
+            $($fn ::)*<$crate::tables::PrunableHashes>($($arg),*)?,
+            $($fn ::)*<$crate::tables::Outputs>($($arg),*)?,
+            $($fn ::)*<$crate::tables::PrunableTxBlobs>($($arg),*)?,
+            $($fn ::)*<$crate::tables::RctOutputs>($($arg),*)?,
+            $($fn ::)*<$crate::tables::TxBlobs>($($arg),*)?,
+            $($fn ::)*<$crate::tables::TxIds>($($arg),*)?,
+            $($fn ::)*<$crate::tables::TxHeights>($($arg),*)?,
+            $($fn ::)*<$crate::tables::TxOutputs>($($arg),*)?,
+            $($fn ::)*<$crate::tables::TxUnlockTime>($($arg),*)?,
+        ))
+    }};
+}
+pub(crate) use call_fn_on_all_tables_or_early_return;
+
 //---------------------------------------------------------------------------------------------------- Table macro
 /// Create all tables, should be used _once_.
 ///
@@ -47,6 +326,9 @@ macro_rules! tables {
             // Table struct.
             $(#[$attr])*
             // The below test show the `snake_case` table name in cargo docs.
+            #[doc = concat!("- Key: [`", stringify!($key), "`]")]
+            #[doc = concat!("- Value: [`", stringify!($value), "`]")]
+            ///
             /// ## Table Name
             /// ```rust
             /// # use cuprate_database::{*,tables::*};
@@ -80,66 +362,109 @@ macro_rules! tables {
 // Notes:
 // - Keep this sorted A-Z (by table name)
 // - Tables are defined in plural to avoid name conflicts with types
-// - If adding/changing a table, also edit the tests in `src/backend/tests.rs`
-//   and edit `Env::open` to make sure it creates the table
+// - If adding/changing a table also edit:
+//   a) the tests in `src/backend/tests.rs`
+//   b) `Env::open` to make sure it creates the table (for all backends)
+//   c) `call_fn_on_all_tables_or_early_return!()` macro defined in this file
 tables! {
-    /// TODO
+    /// Serialized block blobs (bytes).
+    ///
+    /// Contains the serialized version of all blocks.
     BlockBlobs,
     BlockHeight => BlockBlob,
 
-    /// TODO
+    /// Block heights.
+    ///
+    /// Contains the height of all blocks.
     BlockHeights,
     BlockHash => BlockHeight,
 
-    /// TODO
-    BlockInfoV1s,
-    BlockHeight => BlockInfoV1,
+    /// Block information.
+    ///
+    /// Contains metadata of all blocks.
+    BlockInfos,
+    BlockHeight => BlockInfo,
 
-    /// TODO
-    BlockInfoV2s,
-    BlockHeight => BlockInfoV2,
-
-    /// TODO
-    BlockInfoV3s,
-    BlockHeight => BlockInfoV3,
-
-    /// TODO
+    /// Set of key images.
+    ///
+    /// Contains all the key images known to be spent.
+    ///
+    /// This table has `()` as the value type, as in,
+    /// it is a set of key images.
     KeyImages,
     KeyImage => (),
 
-    /// TODO
+    /// Maps an output's amount to the number of outputs with that amount.
+    ///
+    /// For example, if there are 5 outputs with `amount = 123`
+    /// then calling `get(123)` on this table will return 5.
     NumOutputs,
-    Amount => AmountIndex,
+    Amount => u64,
 
-    /// TODO
-    PrunedTxBlobs,
-    TxId => PrunedBlob,
-
-    /// TODO
+    /// Pre-RCT output data.
     Outputs,
     PreRctOutputId => Output,
 
-    /// TODO
+    /// Pruned transaction blobs (bytes).
+    ///
+    /// Contains the pruned portion of serialized transaction data.
+    PrunedTxBlobs,
+    TxId => PrunedBlob,
+
+    /// Prunable transaction blobs (bytes).
+    ///
+    /// Contains the prunable portion of serialized transaction data.
+    // SOMEDAY: impl when `monero-serai` supports pruning
     PrunableTxBlobs,
     TxId => PrunableBlob,
 
-    /// TODO
+    /// Prunable transaction hashes.
+    ///
+    /// Contains the prunable portion of transaction hashes.
+    // SOMEDAY: impl when `monero-serai` supports pruning
     PrunableHashes,
     TxId => PrunableHash,
 
-    /// TODO
+    // SOMEDAY: impl a properties table:
+    // - db version
+    // - pruning seed
+    // Properties,
+    // StorableString => StorableVec,
+
+    /// RCT output data.
     RctOutputs,
     AmountIndex => RctOutput,
 
-    /// TODO
+    /// Transaction blobs (bytes).
+    ///
+    /// Contains the serialized version of all transactions.
+    // SOMEDAY: remove when `monero-serai` supports pruning
+    TxBlobs,
+    TxId => TxBlob,
+
+    /// Transaction indices.
+    ///
+    /// Contains the indices all transactions.
     TxIds,
     TxHash => TxId,
 
-    /// TODO
+    /// Transaction heights.
+    ///
+    /// Contains the block height associated with all transactions.
     TxHeights,
     TxId => BlockHeight,
 
-    /// TODO
+    /// Transaction outputs.
+    ///
+    /// Contains the list of `AmountIndex`'s of the
+    /// outputs associated with all transactions.
+    TxOutputs,
+    TxId => AmountIndices,
+
+    /// Transaction unlock time.
+    ///
+    /// Contains the unlock time of transactions IF they have one.
+    /// Transactions without unlock times will not exist in this table.
     TxUnlockTime,
     TxId => UnlockTime,
 }
diff --git a/database/src/tests.rs b/database/src/tests.rs
new file mode 100644
index 00000000..ba5e8550
--- /dev/null
+++ b/database/src/tests.rs
@@ -0,0 +1,85 @@
+//! Utilities for `cuprate_database` testing.
+//!
+//! These types/fn's are only:
+//! - enabled on #[cfg(test)]
+//! - only used internally
+
+//---------------------------------------------------------------------------------------------------- Import
+use std::fmt::Debug;
+
+use pretty_assertions::assert_eq;
+
+use crate::{config::ConfigBuilder, tables::Tables, ConcreteEnv, DatabaseRo, Env, EnvInner};
+
+//---------------------------------------------------------------------------------------------------- Struct
+/// Named struct to assert the length of all tables.
+///
+/// This is a struct with fields instead of a function
+/// so that callers can name arguments, otherwise the call-site
+/// is a little confusing, i.e. `assert_table_len(0, 25, 1, 123)`.
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub(crate) struct AssertTableLen {
+    pub(crate) block_infos: u64,
+    pub(crate) block_blobs: u64,
+    pub(crate) block_heights: u64,
+    pub(crate) key_images: u64,
+    pub(crate) num_outputs: u64,
+    pub(crate) pruned_tx_blobs: u64,
+    pub(crate) prunable_hashes: u64,
+    pub(crate) outputs: u64,
+    pub(crate) prunable_tx_blobs: u64,
+    pub(crate) rct_outputs: u64,
+    pub(crate) tx_blobs: u64,
+    pub(crate) tx_ids: u64,
+    pub(crate) tx_heights: u64,
+    pub(crate) tx_unlock_time: u64,
+}
+
+impl AssertTableLen {
+    /// Assert the length of all tables.
+    pub(crate) fn assert(self, tables: &impl Tables) {
+        let other = Self {
+            block_infos: tables.block_infos().len().unwrap(),
+            block_blobs: tables.block_blobs().len().unwrap(),
+            block_heights: tables.block_heights().len().unwrap(),
+            key_images: tables.key_images().len().unwrap(),
+            num_outputs: tables.num_outputs().len().unwrap(),
+            pruned_tx_blobs: tables.pruned_tx_blobs().len().unwrap(),
+            prunable_hashes: tables.prunable_hashes().len().unwrap(),
+            outputs: tables.outputs().len().unwrap(),
+            prunable_tx_blobs: tables.prunable_tx_blobs().len().unwrap(),
+            rct_outputs: tables.rct_outputs().len().unwrap(),
+            tx_blobs: tables.tx_blobs().len().unwrap(),
+            tx_ids: tables.tx_ids().len().unwrap(),
+            tx_heights: tables.tx_heights().len().unwrap(),
+            tx_unlock_time: tables.tx_unlock_time().len().unwrap(),
+        };
+
+        assert_eq!(self, other);
+    }
+}
+
+//---------------------------------------------------------------------------------------------------- fn
+/// Create an `Env` in a temporarily directory.
+/// The directory is automatically removed after the `TempDir` is dropped.
+///
+/// FIXME: changing this to `-> impl Env` causes lifetime errors...
+pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) {
+    let tempdir = tempfile::tempdir().unwrap();
+    let config = ConfigBuilder::new()
+        .db_directory(tempdir.path().into())
+        .low_power()
+        .build();
+    let env = ConcreteEnv::open(config).unwrap();
+
+    (env, tempdir)
+}
+
+/// Assert all the tables in the environment are empty.
+pub(crate) fn assert_all_tables_are_empty(env: &ConcreteEnv) {
+    let env_inner = env.env_inner();
+    let tx_ro = env_inner.tx_ro().unwrap();
+    let tables = env_inner.open_tables(&tx_ro).unwrap();
+    assert!(tables.all_tables_empty().unwrap());
+    assert_eq!(crate::ops::tx::get_num_tx(tables.tx_ids()).unwrap(), 0);
+}
diff --git a/database/src/transaction.rs b/database/src/transaction.rs
index 6dffae27..e4c310a0 100644
--- a/database/src/transaction.rs
+++ b/database/src/transaction.rs
@@ -1,21 +1,21 @@
 //! Database transaction abstraction; `trait TxRo`, `trait TxRw`.
 
 //---------------------------------------------------------------------------------------------------- Import
-use crate::{config::SyncMode, env::Env, error::RuntimeError};
+use crate::error::RuntimeError;
 
 //---------------------------------------------------------------------------------------------------- TxRo
 /// Read-only database transaction.
 ///
 /// Returned from [`EnvInner::tx_ro`](crate::EnvInner::tx_ro).
 ///
-/// # TODO
-/// I don't think we need this, we can just drop the `tx_ro`?
-/// <https://docs.rs/heed/0.20.0-alpha.9/heed/struct.RoTxn.html#method.commit>
+/// # Commit
+/// It's recommended but may not be necessary to call [`TxRo::commit`] in certain cases:
+/// - <https://docs.rs/heed/0.20.0-alpha.9/heed/struct.RoTxn.html#method.commit>
 pub trait TxRo<'env> {
     /// Commit the read-only transaction.
     ///
     /// # Errors
-    /// This operation is infallible (will always return `Ok(())`) with the `redb` backend.
+    /// This operation will always return `Ok(())` with the `redb` backend.
     fn commit(self) -> Result<(), RuntimeError>;
 }
 
@@ -29,20 +29,15 @@ pub trait TxRw<'env> {
     /// Note that this doesn't necessarily sync the database caches to disk.
     ///
     /// # Errors
-    /// This operation is infallible (will always return `Ok(())`) with the `redb` backend.
+    /// This operation will always return `Ok(())` with the `redb` backend.
     ///
-    /// Else, this will only return:
-    /// - [`RuntimeError::ResizeNeeded`] (if `Env::MANUAL_RESIZE == true`)
-    /// - [`RuntimeError::Io`]
+    /// If `Env::MANUAL_RESIZE == true`,
+    /// [`RuntimeError::ResizeNeeded`] may be returned.
     fn commit(self) -> Result<(), RuntimeError>;
 
     /// Abort the transaction, erasing any writes that have occurred.
     ///
     /// # Errors
-    /// This operation is infallible (will always return `Ok(())`) with the `heed` backend.
-    ///
-    /// Else, this will only return:
-    /// - [`RuntimeError::ResizeNeeded`] (if `Env::MANUAL_RESIZE == true`)
-    /// - [`RuntimeError::Io`]
+    /// This operation will always return `Ok(())` with the `heed` backend.
     fn abort(self) -> Result<(), RuntimeError>;
 }
diff --git a/database/src/types.rs b/database/src/types.rs
index 9ca9c598..5d89d4c4 100644
--- a/database/src/types.rs
+++ b/database/src/types.rs
@@ -1,8 +1,10 @@
 //! Database [table](crate::tables) types.
 //!
-//! This module contains all types used by the database tables.
+//! This module contains all types used by the database tables,
+//! and aliases for common Monero-related types that use the
+//! same underlying primitive type.
 //!
-//! TODO: Add schema here or a link to it.
+//! <!-- FIXME: Add schema here or a link to it when complete -->
 
 /*
  * <============================================> VERY BIG SCARY SAFETY MESSAGE <============================================>
@@ -39,7 +41,7 @@
 #![forbid(unsafe_code)] // if you remove this line i will steal your monero
 
 //---------------------------------------------------------------------------------------------------- Import
-use bytemuck::{AnyBitPattern, NoUninit, Pod, Zeroable};
+use bytemuck::{Pod, Zeroable};
 
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Serialize};
@@ -47,52 +49,59 @@ use serde::{Deserialize, Serialize};
 use crate::storable::StorableVec;
 
 //---------------------------------------------------------------------------------------------------- Aliases
-// TODO: document these, why they exist, and their purpose.
-//
-// Notes:
-// - Keep this sorted A-Z
+// These type aliases exist as many Monero-related types are the exact same.
+// For clarity, they're given type aliases as to not confuse them.
 
-/// TODO
+/// An output's amount.
 pub type Amount = u64;
 
-/// TODO
+/// The index of an [`Amount`] in a list of duplicate `Amount`s.
 pub type AmountIndex = u64;
 
-/// TODO
+/// A list of [`AmountIndex`]s.
 pub type AmountIndices = StorableVec<AmountIndex>;
 
-/// TODO
+/// A serialized block.
 pub type BlockBlob = StorableVec<u8>;
 
-/// TODO
+/// A block's hash.
 pub type BlockHash = [u8; 32];
 
-/// TODO
+/// A block's height.
 pub type BlockHeight = u64;
 
-/// TODO
+/// A key image.
 pub type KeyImage = [u8; 32];
 
-/// TODO
+/// Pruned serialized bytes.
 pub type PrunedBlob = StorableVec<u8>;
 
-/// TODO
+/// A prunable serialized bytes.
 pub type PrunableBlob = StorableVec<u8>;
 
-/// TODO
+/// A prunable hash.
 pub type PrunableHash = [u8; 32];
 
-/// TODO
+/// A serialized transaction.
+pub type TxBlob = StorableVec<u8>;
+
+/// A transaction's global index, or ID.
 pub type TxId = u64;
 
-/// TODO
+/// A transaction's hash.
 pub type TxHash = [u8; 32];
 
-/// TODO
+/// The unlock time value of an output.
 pub type UnlockTime = u64;
 
 //---------------------------------------------------------------------------------------------------- BlockInfoV1
-/// TODO
+/// A identifier for a pre-RCT [`Output`].
+///
+/// This can also serve as an identifier for [`RctOutput`]'s
+/// when [`PreRctOutputId::amount`] is set to `0`, although,
+/// in that case, only [`AmountIndex`] needs to be known.
+///
+/// This is the key to the [`Outputs`](crate::tables::Outputs) table.
 ///
 /// ```rust
 /// # use std::borrow::*;
@@ -118,121 +127,41 @@ pub type UnlockTime = u64;
 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
 #[repr(C)]
 pub struct PreRctOutputId {
-    /// TODO
+    /// Amount of the output.
+    ///
+    /// This should be `0` if the output is an [`RctOutput`].
     pub amount: Amount,
-    /// TODO
+    /// The index of the output with the same `amount`.
+    ///
+    /// In the case of [`Output`]'s, this is the index of the list
+    /// of outputs with the same clear amount.
+    ///
+    /// In the case of [`RctOutput`]'s, this is the
+    /// global index of _all_ `RctOutput`s
     pub amount_index: AmountIndex,
 }
 
-//---------------------------------------------------------------------------------------------------- BlockInfoV1
-/// TODO
-///
-/// ```rust
-/// # use std::borrow::*;
-/// # use cuprate_database::{*, types::*};
-/// // Assert Storable is correct.
-/// let a = BlockInfoV1 {
-///     timestamp: 1,
-///     total_generated_coins: 123,
-///     weight: 321,
-///     cumulative_difficulty: 111,
-///     block_hash: [54; 32],
-/// };
-/// let b = Storable::as_bytes(&a);
-/// let c: BlockInfoV1 = Storable::from_bytes(b);
-/// assert_eq!(a, c);
-/// ```
-///
-/// # Size & Alignment
-/// ```rust
-/// # use cuprate_database::types::*;
-/// # use std::mem::*;
-/// assert_eq!(size_of::<BlockInfoV1>(), 64);
-/// assert_eq!(align_of::<BlockInfoV1>(), 8);
-/// ```
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
-#[repr(C)]
-pub struct BlockInfoV1 {
-    /// TODO
-    pub timestamp: u64,
-    /// TODO
-    pub total_generated_coins: u64,
-    /// TODO
-    pub weight: u64,
-    /// TODO
-    pub cumulative_difficulty: u64,
-    /// TODO
-    pub block_hash: [u8; 32],
-}
-
-//---------------------------------------------------------------------------------------------------- BlockInfoV2
-/// TODO
-///
-/// ```rust
-/// # use std::borrow::*;
-/// # use cuprate_database::{*, types::*};
-/// // Assert Storable is correct.
-/// let a = BlockInfoV2 {
-///     timestamp: 1,
-///     total_generated_coins: 123,
-///     weight: 321,
-///     block_hash: [54; 32],
-///     cumulative_difficulty: 111,
-///     cumulative_rct_outs: 2389,
-/// };
-/// let b = Storable::as_bytes(&a);
-/// let c: BlockInfoV2 = Storable::from_bytes(b);
-/// assert_eq!(a, c);
-/// ```
-///
-/// # Size & Alignment
-/// ```rust
-/// # use cuprate_database::types::*;
-/// # use std::mem::*;
-/// assert_eq!(size_of::<BlockInfoV2>(), 72);
-/// assert_eq!(align_of::<BlockInfoV2>(), 8);
-/// ```
-#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
-#[repr(C)]
-pub struct BlockInfoV2 {
-    /// TODO
-    pub timestamp: u64,
-    /// TODO
-    pub total_generated_coins: u64,
-    /// TODO
-    pub weight: u64,
-    /// TODO
-    pub block_hash: [u8; 32],
-    /// TODO
-    pub cumulative_difficulty: u64,
-    /// TODO
-    ///
-    /// TODO: note that this is originally u32,
-    /// but is u64 here for padding reasons.
-    pub cumulative_rct_outs: u64,
-}
-
 //---------------------------------------------------------------------------------------------------- BlockInfoV3
-/// TODO
+/// Block information.
+///
+/// This is the value in the [`BlockInfos`](crate::tables::BlockInfos) table.
 ///
 /// ```rust
 /// # use std::borrow::*;
 /// # use cuprate_database::{*, types::*};
 /// // Assert Storable is correct.
-/// let a = BlockInfoV3 {
+/// let a = BlockInfo {
 ///     timestamp: 1,
-///     total_generated_coins: 123,
+///     cumulative_generated_coins: 123,
 ///     weight: 321,
-///     cumulative_difficulty_low: 111,
+///     cumulative_difficulty_low: 112,
 ///     cumulative_difficulty_high: 112,
 ///     block_hash: [54; 32],
 ///     cumulative_rct_outs: 2389,
 ///     long_term_weight: 2389,
 /// };
 /// let b = Storable::as_bytes(&a);
-/// let c: BlockInfoV3 = Storable::from_bytes(b);
+/// let c: BlockInfo = Storable::from_bytes(b);
 /// assert_eq!(a, c);
 /// ```
 ///
@@ -240,34 +169,70 @@ pub struct BlockInfoV2 {
 /// ```rust
 /// # use cuprate_database::types::*;
 /// # use std::mem::*;
-/// assert_eq!(size_of::<BlockInfoV3>(), 88);
-/// assert_eq!(align_of::<BlockInfoV3>(), 8);
+/// assert_eq!(size_of::<BlockInfo>(), 88);
+/// assert_eq!(align_of::<BlockInfo>(), 8);
 /// ```
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
 #[repr(C)]
-pub struct BlockInfoV3 {
-    /// TODO
+pub struct BlockInfo {
+    /// The UNIX time at which the block was mined.
     pub timestamp: u64,
-    /// TODO
-    pub total_generated_coins: u64,
-    /// TODO
+    /// The total amount of coins mined in all blocks so far, including this block's.
+    pub cumulative_generated_coins: u64,
+    /// The adjusted block size, in bytes.
+    ///
+    /// See [`block_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#blocks-weight).
     pub weight: u64,
-    // Maintain 8 byte alignment.
-    /// TODO
+    /// Least-significant 64 bits of the 128-bit cumulative difficulty.
     pub cumulative_difficulty_low: u64,
-    /// TODO
+    /// Most-significant 64 bits of the 128-bit cumulative difficulty.
     pub cumulative_difficulty_high: u64,
-    /// TODO
+    /// The block's hash.
     pub block_hash: [u8; 32],
-    /// TODO
+    /// The total amount of RCT outputs so far, including this block's.
     pub cumulative_rct_outs: u64,
-    /// TODO
+    /// The long term block weight, based on the median weight of the preceding `100_000` blocks.
+    ///
+    /// See [`long_term_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#long-term-block-weight).
     pub long_term_weight: u64,
 }
 
+//---------------------------------------------------------------------------------------------------- OutputFlags
+bitflags::bitflags! {
+    /// Bit flags for [`Output`]s and [`RctOutput`]s,
+    ///
+    /// Currently only the first bit is used and, if set,
+    /// it means this output has a non-zero unlock time.
+    ///
+    /// ```rust
+    /// # use std::borrow::*;
+    /// # use cuprate_database::{*, types::*};
+    /// // Assert Storable is correct.
+    /// let a = OutputFlags::NON_ZERO_UNLOCK_TIME;
+    /// let b = Storable::as_bytes(&a);
+    /// let c: OutputFlags = Storable::from_bytes(b);
+    /// assert_eq!(a, c);
+    /// ```
+    ///
+    /// # Size & Alignment
+    /// ```rust
+    /// # use cuprate_database::types::*;
+    /// # use std::mem::*;
+    /// assert_eq!(size_of::<OutputFlags>(), 4);
+    /// assert_eq!(align_of::<OutputFlags>(), 4);
+    /// ```
+    #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+    #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
+    #[repr(transparent)]
+    pub struct OutputFlags: u32 {
+        /// This output has a non-zero unlock time.
+        const NON_ZERO_UNLOCK_TIME = 0b0000_0001;
+    }
+}
+
 //---------------------------------------------------------------------------------------------------- Output
-/// TODO
+/// A pre-RCT (v1) output's data.
 ///
 /// ```rust
 /// # use std::borrow::*;
@@ -276,7 +241,7 @@ pub struct BlockInfoV3 {
 /// let a = Output {
 ///     key: [1; 32],
 ///     height: 1,
-///     output_flags: 0,
+///     output_flags: OutputFlags::empty(),
 ///     tx_idx: 3,
 /// };
 /// let b = Storable::as_bytes(&a);
@@ -295,18 +260,20 @@ pub struct BlockInfoV3 {
 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
 #[repr(C)]
 pub struct Output {
-    /// TODO
+    /// The public key of the output.
     pub key: [u8; 32],
-    /// We could get this from the tx_idx with the Tx Heights table but that would require another look up per out.
+    /// The block height this output belongs to.
+    // PERF: We could get this from the tx_idx with the `TxHeights`
+    // table but that would require another look up per out.
     pub height: u32,
-    /// Bit flags for this output, currently only the first bit is used and, if set, it means this output has a non-zero unlock time.
-    pub output_flags: u32,
-    /// TODO
+    /// Bit flags for this output.
+    pub output_flags: OutputFlags,
+    /// The index of the transaction this output belongs to.
     pub tx_idx: u64,
 }
 
 //---------------------------------------------------------------------------------------------------- RctOutput
-/// TODO
+/// An RCT (v2+) output's data.
 ///
 /// ```rust
 /// # use std::borrow::*;
@@ -315,7 +282,7 @@ pub struct Output {
 /// let a = RctOutput {
 ///     key: [1; 32],
 ///     height: 1,
-///     output_flags: 0,
+///     output_flags: OutputFlags::empty(),
 ///     tx_idx: 3,
 ///     commitment: [3; 32],
 /// };
@@ -335,13 +302,15 @@ pub struct Output {
 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
 #[repr(C)]
 pub struct RctOutput {
-    /// TODO
+    /// The public key of the output.
     pub key: [u8; 32],
-    /// We could get this from the tx_idx with the Tx Heights table but that would require another look up per out.
+    /// The block height this output belongs to.
+    // PERF: We could get this from the tx_idx with the `TxHeights`
+    // table but that would require another look up per out.
     pub height: u32,
     /// Bit flags for this output, currently only the first bit is used and, if set, it means this output has a non-zero unlock time.
-    pub output_flags: u32,
-    /// TODO
+    pub output_flags: OutputFlags,
+    /// The index of the transaction this output belongs to.
     pub tx_idx: u64,
     /// The amount commitment of this output.
     pub commitment: [u8; 32],
diff --git a/database/src/unsafe_sendable.rs b/database/src/unsafe_sendable.rs
new file mode 100644
index 00000000..94472933
--- /dev/null
+++ b/database/src/unsafe_sendable.rs
@@ -0,0 +1,85 @@
+//! Wrapper type for partially-`unsafe` usage of `T: !Send`.
+
+//---------------------------------------------------------------------------------------------------- Import
+use std::{
+    borrow::Borrow,
+    ops::{Deref, DerefMut},
+};
+
+use bytemuck::TransparentWrapper;
+
+//---------------------------------------------------------------------------------------------------- Aliases
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, TransparentWrapper)]
+#[repr(transparent)]
+/// A wrapper type that `unsafe`ly implements `Send` for any `T`.
+///
+/// This is a marker/wrapper type that allows wrapping
+/// any type `T` such that it implements `Send`.
+///
+/// This is to be used when `T` is `Send`, but only in certain
+/// situations not provable to the compiler, or is otherwise a
+/// a pain to prove and/or less efficient.
+///
+/// It is up to the users of this type to ensure their
+/// usage of `UnsafeSendable` are actually safe.
+///
+/// Notably, `heed`'s table type uses this inside `service`.
+pub(crate) struct UnsafeSendable<T>(T);
+
+#[allow(clippy::non_send_fields_in_send_ty)]
+// SAFETY: Users ensure that their usage of this type is safe.
+unsafe impl<T> Send for UnsafeSendable<T> {}
+
+impl<T> UnsafeSendable<T> {
+    /// Create a new [`UnsafeSendable`].
+    ///
+    /// # Safety
+    /// By constructing this type, you must ensure the usage
+    /// of the resulting `Self` is follows all the [`Send`] rules.
+    pub(crate) const unsafe fn new(t: T) -> Self {
+        Self(t)
+    }
+
+    /// Extract the inner `T`.
+    #[allow(dead_code)]
+    pub(crate) fn into_inner(self) -> T {
+        self.0
+    }
+}
+
+impl<T> Borrow<T> for UnsafeSendable<T> {
+    fn borrow(&self) -> &T {
+        &self.0
+    }
+}
+
+impl<T> AsRef<T> for UnsafeSendable<T> {
+    fn as_ref(&self) -> &T {
+        &self.0
+    }
+}
+
+impl<T> AsMut<T> for UnsafeSendable<T> {
+    fn as_mut(&mut self) -> &mut T {
+        &mut self.0
+    }
+}
+
+impl<T> Deref for UnsafeSendable<T> {
+    type Target = T;
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+impl<T> DerefMut for UnsafeSendable<T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.0
+    }
+}
+
+//---------------------------------------------------------------------------------------------------- Tests
+#[cfg(test)]
+mod test {
+    // use super::*;
+}
diff --git a/helper/Cargo.toml b/helper/Cargo.toml
index 306143a3..59e4e71d 100644
--- a/helper/Cargo.toml
+++ b/helper/Cargo.toml
@@ -10,22 +10,24 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
 
 [features]
 # All features on by default.
-default   = ["std", "atomic", "asynch", "fs", "num", "time", "thread", "constants"]
+default   = ["std", "atomic", "asynch", "fs", "num", "map", "time", "thread", "constants"]
 std       = []
 atomic    = ["dep:crossbeam"]
 asynch    = ["dep:futures", "dep:rayon"]
 constants = []
 fs        = ["dep:dirs"]
 num       = []
+map       = ["dep:monero-serai"]
 time      = ["dep:chrono", "std"]
 thread    = ["std", "dep:target_os_lib"]
 
 [dependencies]
-crossbeam = { workspace = true, optional = true }
-chrono    = { workspace = true, optional = true, features = ["std", "clock"] }
-dirs      = { workspace = true, optional = true }
-futures   = { workspace = true, optional = true, features = ["std"] }
-rayon     = { workspace = true, optional = true }
+crossbeam    = { workspace = true, optional = true }
+chrono       = { workspace = true, optional = true, features = ["std", "clock"] }
+dirs         = { workspace = true, optional = true }
+futures      = { workspace = true, optional = true, features = ["std"] }
+monero-serai = { workspace = true, optional = true }
+rayon        = { workspace = true, optional = true }
 
 # This is kinda a stupid work around.
 # [thread] needs to activate one of these libs (windows|libc)
diff --git a/helper/src/fs.rs b/helper/src/fs.rs
index 9a08b3c7..2e859229 100644
--- a/helper/src/fs.rs
+++ b/helper/src/fs.rs
@@ -1,11 +1,31 @@
 //! Cuprate directories and filenames.
 //!
-//! # TODO
-//! Document how environment variables can change these.
+//! # Environment variables on Linux
+//! Note that this module's functions uses [`dirs`],
+//! which adheres to the XDG standard on Linux.
 //!
-//! # Reference
-//! <https://github.com/Cuprate/cuprate/issues/46>
-//! <https://docs.rs/dirs>
+//! This means that the values returned by these functions
+//! may change at runtime depending on environment variables,
+//! for example:
+//!
+//! By default the config directory is `~/.config`, however
+//! if `$XDG_CONFIG_HOME` is set to something, that will be
+//! used instead.
+//!
+//! ```rust
+//! # use cuprate_helper::fs::*;
+//! # if cfg!(target_os = "linux") {
+//! std::env::set_var("XDG_CONFIG_HOME", "/custom/path");
+//! assert_eq!(
+//!     cuprate_config_dir().to_string_lossy(),
+//!     "/custom/path/cuprate"
+//! );
+//! # }
+//! ```
+//!
+//! Reference:
+//! - <https://github.com/Cuprate/cuprate/issues/46>
+//! - <https://docs.rs/dirs>
 
 //---------------------------------------------------------------------------------------------------- Use
 use std::{
diff --git a/helper/src/lib.rs b/helper/src/lib.rs
index fc947a65..90f420d6 100644
--- a/helper/src/lib.rs
+++ b/helper/src/lib.rs
@@ -51,6 +51,9 @@ pub mod network;
 #[cfg(feature = "num")]
 pub mod num;
 
+#[cfg(feature = "map")]
+pub mod map;
+
 #[cfg(feature = "thread")]
 pub mod thread;
 
diff --git a/helper/src/map.rs b/helper/src/map.rs
new file mode 100644
index 00000000..96d9f615
--- /dev/null
+++ b/helper/src/map.rs
@@ -0,0 +1,107 @@
+//! Mapping of data types.
+//!
+//! This module provides functions solely for mapping data types into others, mostly similar ones.
+//!
+//! `#[no_std]` compatible.
+
+//---------------------------------------------------------------------------------------------------- Use
+use monero_serai::transaction::Timelock;
+
+//---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128`
+/// Split a [`u128`] value into 2 64-bit values.
+///
+/// The tuple returned is `(low, high)` where `low` is the least significant
+/// 64-bits of `number`, and `high` is the most significant.
+///
+/// Note that the output of this function are `u64` representations of _bits_, not numerical values.
+///
+/// See [`combine_low_high_bits_to_u128`] for the inverse function.
+///
+/// ```rust
+/// # use cuprate_helper::map::*;
+/// let value = u128::MAX - 1;
+/// let low = u64::MAX - 1;
+/// let high = u64::MAX;
+///
+/// assert_eq!(split_u128_into_low_high_bits(value), (low, high));
+/// ```
+#[inline]
+pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) {
+    (value as u64, (value >> 64) as u64)
+}
+
+/// Combine 2 64-bit values into a single [`u128`] value.
+///
+/// The inputs:
+/// - `low_bits` are the _least_ significant 64-bits of `cumulative_difficulty`
+/// - `high_bits` are the _most_ significant 64-bits of `cumulative_difficulty`
+///
+/// Note that `low_bits` & `high_bits` should be `u64` representation of _bits_, not numerical values.
+///
+/// See [`split_u128_into_low_high_bits`] for the inverse function.
+///
+/// ```rust
+/// # use cuprate_helper::map::*;
+/// let value = u128::MAX - 1;
+/// let low = u64::MAX - 1;
+/// let high = u64::MAX;
+///
+/// assert_eq!(combine_low_high_bits_to_u128(low, high), value);
+/// ```
+#[inline]
+pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u128 {
+    let res = (high_bits as u128) << 64;
+    res | (low_bits as u128)
+}
+
+//---------------------------------------------------------------------------------------------------- Timelock
+/// Map a [`u64`] to a [`Timelock`].
+///
+/// Height/time is not differentiated via type, but rather:
+/// "height is any value less than 500_000_000 and timestamp is any value above"
+/// so the `u64/usize` is stored without any tag.
+///
+/// See [`timelock_to_u64`] for the inverse function.
+///
+/// - <https://github.com/Cuprate/cuprate/pull/102#discussion_r1558504285>
+/// - <https://github.com/serai-dex/serai/blob/bc1dec79917d37d326ac3d9bc571a64131b0424a/coins/monero/src/transaction.rs#L139>
+///
+/// ```rust
+/// # use cuprate_helper::map::*;
+/// # use monero_serai::transaction::*;
+/// assert_eq!(u64_to_timelock(0), Timelock::None);
+/// assert_eq!(u64_to_timelock(499_999_999), Timelock::Block(499_999_999));
+/// assert_eq!(u64_to_timelock(500_000_000), Timelock::Time(500_000_000));
+/// ```
+pub fn u64_to_timelock(u: u64) -> Timelock {
+    if u == 0 {
+        Timelock::None
+    } else if u < 500_000_000 {
+        Timelock::Block(usize::try_from(u).unwrap())
+    } else {
+        Timelock::Time(u)
+    }
+}
+
+/// Map [`Timelock`] to a [`u64`].
+///
+/// See [`u64_to_timelock`] for the inverse function and more documentation.
+///
+/// ```rust
+/// # use cuprate_helper::map::*;
+/// # use monero_serai::transaction::*;
+/// assert_eq!(timelock_to_u64(Timelock::None), 0);
+/// assert_eq!(timelock_to_u64(Timelock::Block(499_999_999)), 499_999_999);
+/// assert_eq!(timelock_to_u64(Timelock::Time(500_000_000)), 500_000_000);
+/// ```
+pub fn timelock_to_u64(timelock: Timelock) -> u64 {
+    match timelock {
+        Timelock::None => 0,
+        Timelock::Block(u) => u64::try_from(u).unwrap(),
+        Timelock::Time(u) => u,
+    }
+}
+
+//---------------------------------------------------------------------------------------------------- Tests
+#[cfg(test)]
+mod test {}
diff --git a/misc/gpg_keys/syntheticbird.asc b/misc/gpg_keys/syntheticbird.asc
new file mode 100644
index 00000000..963b7f73
--- /dev/null
+++ b/misc/gpg_keys/syntheticbird.asc
@@ -0,0 +1,14 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mDMEZb0y4RYJKwYBBAHaRw8BAQdAvMid+QsSxLULIkKPLf0XWgPxaoG89qPNiQ4S
+fXH0BfW0VlN5bnRoZXRpY0JpcmQ0NSAoQ3VwcmF0ZSdzIGRldmVsb3BlcikgPHNv
+bWVvbmVlbHNlLmlzX29uLmdpdGh1Yi5yaW83eEBzaW1wbGVsb2dpbi5jb20+iJME
+ExYKADsWIQQEmOfWc9FTBiAKoHnHaXP3SFIeEQUCZb0y4QIbAwULCQgHAgIiAgYV
+CgkICwIEFgIDAQIeBwIXgAAKCRDHaXP3SFIeEUx+AQDYd7t75+V4/aSTczLxMGuT
+A84qGRuYNStXUJzjV8F21wD/YVlybZcr9dDQ/+YOgh5aXBzo+oGm+XhhSbI3QdIX
+LAC4OARlvTLhEgorBgEEAZdVAQUBAQdAgRoSFUmnCqETElyry97kFwsdzlNyldk2
+ZPgH9J4fCHwDAQgHiHgEGBYKACAWIQQEmOfWc9FTBiAKoHnHaXP3SFIeEQUCZb0y
+4QIbDAAKCRDHaXP3SFIeETDSAP4k8+jUaStnjrkzN1jvRg136qNfwe8ZzjrsWJ0n
+FOS8zAEA/fwRjRyvEP28KJNiKdyhDYWYJTpyLGTiPP8b43NsHAM=
+=gqqy
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/misc/gpg_keys/syntheticbird45.asc b/misc/gpg_keys/syntheticbird45.asc
deleted file mode 100644
index 14d48cf2..00000000
--- a/misc/gpg_keys/syntheticbird45.asc
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-
-mE8EZAt90BMFK4EEAAoCAwS8WnB3wMu+JxWm3LpuHO1jcdwIlMjndqoGCcJnFEKm
-shkx1eE21AoCGJYYAjeVLrazF5hqTzs6UpBuP7ZNaXvJtEBTeW50aGV0aWNCaXJk
-NDUgPHNvbWVvbmVlbHNlLmlzX29uLmdpdGh1Yi5yaW83eEBzaW1wbGVsb2dpbi5j
-b20+iJAEExMIADgWIQTX0AOzMdcNEMyKDV31QokN0AEPEQUCZAt90AIbAwULCQgH
-AgYVCgkICwIEFgIDAQIeAQIXgAAKCRD1QokN0AEPEWp0AQCDCOdgi3LRFLrF/rR9
-zBy6ceMgAp4Z/GJMO66je3BeIgD9HPo7OkRsKvI1kCf7X9KDV6M0+bmYpC23HYpN
-1zWnq++4UwRkC33QEgUrgQQACgIDBGfPz0WQRKwicAMkUF2InuOns4aU/1bDwidd
-wP426408APfJ7vTtKOVFjfHzKLLiw1Z0texwhBL0y76nggkzVbMDAQgHiHgEGBMI
-ACAWIQTX0AOzMdcNEMyKDV31QokN0AEPEQUCZAt90AIbDAAKCRD1QokN0AEPERQg
-APsHUaCbt1BByhXpVu34C9bY6P1Sw9ARpfl9cc2kAEnQRQD+Klmx13c/WOj6euF6
-RMKtt34En+0xhP99yfEpoofta/0=
-=Pkk7
------END PGP PUBLIC KEY BLOCK-----
diff --git a/net/epee-encoding/src/container_as_blob.rs b/net/epee-encoding/src/container_as_blob.rs
index 164acb25..084b43bb 100644
--- a/net/epee-encoding/src/container_as_blob.rs
+++ b/net/epee-encoding/src/container_as_blob.rs
@@ -39,11 +39,7 @@ impl<T: Containerable + EpeeValue> EpeeValue for ContainerAsBlob<T> {
         }
 
         Ok(ContainerAsBlob(
-            bytes
-                .windows(T::SIZE)
-                .step_by(T::SIZE)
-                .map(T::from_bytes)
-                .collect(),
+            bytes.chunks(T::SIZE).map(T::from_bytes).collect(),
         ))
     }
 
diff --git a/net/epee-encoding/src/error.rs b/net/epee-encoding/src/error.rs
index e344322c..4b3c7b0d 100644
--- a/net/epee-encoding/src/error.rs
+++ b/net/epee-encoding/src/error.rs
@@ -1,5 +1,8 @@
-use core::fmt::{Debug, Formatter};
-use core::{num::TryFromIntError, str::Utf8Error};
+use core::{
+    fmt::{Debug, Formatter},
+    num::TryFromIntError,
+    str::Utf8Error,
+};
 
 pub type Result<T> = core::result::Result<T, Error>;
 
diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs
index 540b1295..ef42241b 100644
--- a/net/epee-encoding/src/value.rs
+++ b/net/epee-encoding/src/value.rs
@@ -308,11 +308,7 @@ impl<const N: usize> EpeeValue for ByteArrayVec<N> {
             return Err(Error::Format("Byte array exceeded max length"));
         }
 
-        if r.remaining()
-            < usize::try_from(len)?
-                .checked_mul(N)
-                .ok_or(Error::Value("Length of field is too long".to_string()))?
-        {
+        if r.remaining() < usize::try_from(len)? {
             return Err(Error::IO("Not enough bytes to fill object"));
         }
 
diff --git a/net/fixed-bytes/src/lib.rs b/net/fixed-bytes/src/lib.rs
index c7b71151..8776d309 100644
--- a/net/fixed-bytes/src/lib.rs
+++ b/net/fixed-bytes/src/lib.rs
@@ -1,8 +1,9 @@
-use core::ops::Deref;
-use std::fmt::{Debug, Formatter};
-use std::ops::Index;
+use core::{
+    fmt::{Debug, Formatter},
+    ops::{Deref, Index},
+};
 
-use bytes::Bytes;
+use bytes::{BufMut, Bytes, BytesMut};
 
 #[cfg_attr(feature = "std", derive(thiserror::Error))]
 pub enum FixedByteError {
@@ -101,6 +102,40 @@ impl<const N: usize> ByteArrayVec<N> {
     pub fn take_bytes(self) -> Bytes {
         self.0
     }
+
+    /// Splits the byte array vec into two at the given index.
+    ///
+    /// Afterwards self contains elements [0, at), and the returned [`ByteArrayVec`] contains elements [at, len).
+    ///
+    /// This is an O(1) operation that just increases the reference count and sets a few indices.
+    ///
+    /// # Panics
+    /// Panics if at > len.
+    pub fn split_off(&mut self, at: usize) -> Self {
+        Self(self.0.split_off(at * N))
+    }
+}
+
+impl<const N: usize> From<&ByteArrayVec<N>> for Vec<[u8; N]> {
+    fn from(value: &ByteArrayVec<N>) -> Self {
+        let mut out = Vec::with_capacity(value.len());
+        for i in 0..value.len() {
+            out.push(value[i])
+        }
+
+        out
+    }
+}
+
+impl<const N: usize> From<Vec<[u8; N]>> for ByteArrayVec<N> {
+    fn from(value: Vec<[u8; N]>) -> Self {
+        let mut bytes = BytesMut::with_capacity(N * value.len());
+        for i in value.into_iter() {
+            bytes.extend_from_slice(&i)
+        }
+
+        ByteArrayVec(bytes.freeze())
+    }
 }
 
 impl<const N: usize> TryFrom<Bytes> for ByteArrayVec<N> {
@@ -115,8 +150,38 @@ impl<const N: usize> TryFrom<Bytes> for ByteArrayVec<N> {
     }
 }
 
+impl<const N: usize> From<[u8; N]> for ByteArrayVec<N> {
+    fn from(value: [u8; N]) -> Self {
+        ByteArrayVec(Bytes::copy_from_slice(value.as_slice()))
+    }
+}
+
+impl<const N: usize, const LEN: usize> From<[[u8; N]; LEN]> for ByteArrayVec<N> {
+    fn from(value: [[u8; N]; LEN]) -> Self {
+        let mut bytes = BytesMut::with_capacity(N * LEN);
+
+        for val in value.into_iter() {
+            bytes.put_slice(val.as_slice());
+        }
+
+        ByteArrayVec(bytes.freeze())
+    }
+}
+
+impl<const N: usize> TryFrom<Vec<u8>> for ByteArrayVec<N> {
+    type Error = FixedByteError;
+
+    fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> {
+        if value.len() % N != 0 {
+            return Err(FixedByteError::InvalidLength);
+        }
+
+        Ok(ByteArrayVec(Bytes::from(value)))
+    }
+}
+
 impl<const N: usize> Index<usize> for ByteArrayVec<N> {
-    type Output = [u8; 32];
+    type Output = [u8; N];
 
     fn index(&self, index: usize) -> &Self::Output {
         if (index + 1) * N > self.0.len() {
diff --git a/net/monero-wire/Cargo.toml b/net/monero-wire/Cargo.toml
index 611fb080..882b3644 100644
--- a/net/monero-wire/Cargo.toml
+++ b/net/monero-wire/Cargo.toml
@@ -15,7 +15,8 @@ levin-cuprate = {path="../levin"}
 epee-encoding = { path = "../epee-encoding" }
 fixed-bytes = { path = "../fixed-bytes" }
 
-bytes = { workspace = true }
+bitflags = { workspace = true, features = ["std"] }
+bytes = { workspace = true, features = ["std"] }
 thiserror = { workspace = true }
 
 [dev-dependencies]
diff --git a/net/monero-wire/src/p2p/admin.rs b/net/monero-wire/src/p2p/admin.rs
index 95d2f1b0..95ffef2d 100644
--- a/net/monero-wire/src/p2p/admin.rs
+++ b/net/monero-wire/src/p2p/admin.rs
@@ -139,8 +139,7 @@ mod tests {
             my_port: 0,
             network_id: [
                 18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161, 161, 16,
-            ]
-            .into(),
+            ],
             peer_id: 9671405426614699871,
             support_flags: PeerSupportFlags::from(1_u32),
             rpc_port: 0,
@@ -945,8 +944,7 @@ mod tests {
             my_port: 18080,
             network_id: [
                 18, 48, 241, 113, 97, 4, 65, 97, 23, 49, 0, 130, 22, 161, 161, 16,
-            ]
-            .into(),
+            ],
             peer_id: 6037804360359455404,
             support_flags: PeerSupportFlags::from(1_u32),
             rpc_port: 18089,
diff --git a/net/monero-wire/src/p2p/common.rs b/net/monero-wire/src/p2p/common.rs
index c05bfcfa..74babefe 100644
--- a/net/monero-wire/src/p2p/common.rs
+++ b/net/monero-wire/src/p2p/common.rs
@@ -15,7 +15,9 @@
 
 //! Common types that are used across multiple messages.
 
+use bitflags::bitflags;
 use bytes::{Buf, BufMut, Bytes};
+
 use epee_encoding::{epee_object, EpeeValue, InnerMarker};
 use fixed_bytes::ByteArray;
 
@@ -24,6 +26,13 @@ use crate::NetworkAddress;
 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
 pub struct PeerSupportFlags(u32);
 
+bitflags! {
+    impl PeerSupportFlags: u32 {
+        const FLUFFY_BLOCKS = 0b0000_0001;
+        const _ = !0;
+    }
+}
+
 impl From<u32> for PeerSupportFlags {
     fn from(value: u32) -> Self {
         PeerSupportFlags(value)
@@ -42,27 +51,14 @@ impl<'a> From<&'a PeerSupportFlags> for &'a u32 {
     }
 }
 
-impl PeerSupportFlags {
-    //const FLUFFY_BLOCKS: u32 = 0b0000_0001;
-
-    pub fn is_empty(&self) -> bool {
-        self.0 == 0
-    }
-}
-
-impl From<u8> for PeerSupportFlags {
-    fn from(value: u8) -> Self {
-        PeerSupportFlags(value.into())
-    }
-}
-
 /// Basic Node Data, information on the connected peer
 #[derive(Debug, Clone, PartialEq, Eq)]
 pub struct BasicNodeData {
     /// Port
     pub my_port: u32,
     /// The Network Id
-    pub network_id: ByteArray<16>,
+    // We don't use ByteArray here to allow users to keep this data long term.
+    pub network_id: [u8; 16],
     /// Peer ID
     pub peer_id: u64,
     /// The Peers Support Flags
@@ -79,7 +75,7 @@ pub struct BasicNodeData {
 epee_object! {
     BasicNodeData,
     my_port: u32,
-    network_id: ByteArray<16>,
+    network_id: [u8; 16],
     peer_id: u64,
     support_flags: PeerSupportFlags as u32 = 0_u32,
     rpc_port: u16 = 0_u16,
@@ -101,7 +97,8 @@ pub struct CoreSyncData {
     /// (If this is not in the message the default is 0)
     pub pruning_seed: u32,
     /// Hash of the top block
-    pub top_id: ByteArray<32>,
+    // We don't use ByteArray here to allow users to keep this data long term.
+    pub top_id: [u8; 32],
     /// Version of the top block
     pub top_version: u8,
 }
@@ -112,7 +109,7 @@ epee_object! {
     cumulative_difficulty_top64: u64 = 0_u64,
     current_height: u64,
     pruning_seed: u32 = 0_u32,
-    top_id: ByteArray<32>,
+    top_id: [u8; 32],
     top_version: u8 = 0_u8,
 }
 
@@ -131,7 +128,7 @@ impl CoreSyncData {
             cumulative_difficulty_top64,
             current_height,
             pruning_seed,
-            top_id: top_id.into(),
+            top_id,
             top_version,
         }
     }
diff --git a/net/monero-wire/src/p2p/protocol.rs b/net/monero-wire/src/p2p/protocol.rs
index 1362e0ad..4dc9a928 100644
--- a/net/monero-wire/src/p2p/protocol.rs
+++ b/net/monero-wire/src/p2p/protocol.rs
@@ -114,7 +114,7 @@ pub struct ChainResponse {
     /// Total Height
     pub total_height: u64,
     /// Cumulative Difficulty Low
-    pub cumulative_difficulty: u64,
+    pub cumulative_difficulty_low64: u64,
     /// Cumulative Difficulty High
     pub cumulative_difficulty_top64: u64,
     /// Block IDs
@@ -125,11 +125,19 @@ pub struct ChainResponse {
     pub first_block: Bytes,
 }
 
+impl ChainResponse {
+    #[inline]
+    pub fn cumulative_difficulty(&self) -> u128 {
+        let cumulative_difficulty = self.cumulative_difficulty_top64 as u128;
+        cumulative_difficulty << 64 | self.cumulative_difficulty_low64 as u128
+    }
+}
+
 epee_object!(
     ChainResponse,
     start_height: u64,
     total_height: u64,
-    cumulative_difficulty: u64,
+    cumulative_difficulty_low64("cumulative_difficulty"): u64,
     cumulative_difficulty_top64: u64 = 0_u64,
     m_block_ids: ByteArrayVec<32>,
     m_block_weights: Vec<u64> as ContainerAsBlob<u64>,
diff --git a/old_database/Cargo.toml b/old_database/Cargo.toml
deleted file mode 100644
index 37c49ae4..00000000
--- a/old_database/Cargo.toml
+++ /dev/null
@@ -1,33 +0,0 @@
-[package]
-name = "cuprate-database"
-version = "0.0.1"
-edition = "2021"
-license = "AGPL-3.0-only"
-
-# All Contributors on github
-authors=[
-        "SyntheticBird45 <@someoneelse495495:matrix.org>",
-	"Boog900"
-        ]
-
-[features]
-mdbx = ["dep:libmdbx"]
-hse = []
-
-[dependencies]
-monero = {workspace = true, features = ["serde"]}
-tiny-keccak = { version = "2.0", features = ["sha3"] }
-serde = { workspace = true}
-thiserror = {workspace = true }
-bincode = { workspace = true }
-libmdbx = { version = "0.3.1", optional = true }
-
-[build]
-linker="clang"
-rustflags=[
-	"-Clink-arg=-fuse-ld=mold",
-	"-Zcf-protection=full", 
-	"-Zsanitizer=cfi", 
-	"-Crelocation-model=pie", 
-	"-Cstack-protector=all",
-]
\ No newline at end of file
diff --git a/old_database/LICENSE b/old_database/LICENSE
deleted file mode 100644
index e19903e6..00000000
--- a/old_database/LICENSE
+++ /dev/null
@@ -1,14 +0,0 @@
-  Copyright (C) 2023 Cuprate Contributors
-
-  This program is free software: you can redistribute it and/or modify
-  it under the terms of the GNU Affero General Public License as published by
-  the Free Software Foundation, either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU Affero General Public License for more details.
-
-  You should have received a copy of the GNU Affero General Public License
-  along with this program.  If not, see <https://www.gnu.org/licenses/>.
\ No newline at end of file
diff --git a/old_database/src/encoding.rs b/old_database/src/encoding.rs
deleted file mode 100644
index aa4681d6..00000000
--- a/old_database/src/encoding.rs
+++ /dev/null
@@ -1,78 +0,0 @@
-//! ### Encoding module
-//! The encoding module contains a trait that permit compatibility between `monero-rs` consensus encoding/decoding logic and `bincode` traits.
-//! The database tables only accept types that implement [`bincode::Encode`] and [`bincode::Decode`] and since we can't implement these on `monero-rs` types directly
-//! we use a wrapper struct `Compat<T>` that permit us to use `monero-rs`'s `consensus_encode`/`consensus_decode` functions under bincode traits.
-//! The choice of using `bincode` comes from performance measurement at encoding. Sometimes `bincode` implementations was 5 times faster than `monero-rs` impl.
-
-use bincode::{de::read::Reader, enc::write::Writer};
-use monero::consensus::{Decodable, Encodable};
-use std::{fmt::Debug, io::Read, ops::Deref};
-
-#[derive(Debug, Clone)]
-/// A single-tuple struct, used to contains monero-rs types that implement [`monero::consensus::Encodable`] and [`monero::consensus::Decodable`]
-pub struct Compat<T: Encodable + Decodable>(pub T);
-
-/// A wrapper around a [`bincode::de::read::Reader`] type. Permit us to use [`std::io::Read`] and feed monero-rs functions with an actual `&[u8]`
-pub struct ReaderCompat<'src, R: Reader>(pub &'src mut R);
-
-// Actual implementation of `std::io::read` for `bincode`'s `Reader` types
-impl<'src, R: Reader> Read for ReaderCompat<'src, R> {
-    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
-        self.0
-            .read(buf)
-            .map_err(|_| std::io::Error::new(std::io::ErrorKind::Other, "bincode reader Error"))?;
-        Ok(buf.len())
-    }
-}
-
-// Convenient implementation. `Deref` and `From`
-impl<T: Encodable + Decodable> Deref for Compat<T> {
-    type Target = T;
-
-    fn deref(&self) -> &Self::Target {
-        &self.0
-    }
-}
-
-impl<T: Encodable + Decodable> From<T> for Compat<T> {
-    fn from(value: T) -> Self {
-        Compat(value)
-    }
-}
-
-// TODO: Investigate specialization optimization
-// Implementation of `bincode::Decode` for monero-rs `Decodable` type
-impl<T: Encodable + Decodable + Debug> bincode::Decode for Compat<T> {
-    fn decode<D: bincode::de::Decoder>(
-        decoder: &mut D,
-    ) -> Result<Self, bincode::error::DecodeError> {
-        Ok(Compat(
-            Decodable::consensus_decode(&mut ReaderCompat(decoder.reader()))
-                .map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?,
-        ))
-    }
-}
-
-// Implementation of `bincode::BorrowDecode` for monero-rs `Decodable` type
-impl<'de, T: Encodable + Decodable + Debug> bincode::BorrowDecode<'de> for Compat<T> {
-    fn borrow_decode<D: bincode::de::BorrowDecoder<'de>>(
-        decoder: &mut D,
-    ) -> Result<Self, bincode::error::DecodeError> {
-        Ok(Compat(
-            Decodable::consensus_decode(&mut ReaderCompat(decoder.borrow_reader()))
-                .map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?,
-        ))
-    }
-}
-
-// Implementation of `bincode::Encode` for monero-rs `Encodable` type
-impl<T: Encodable + Decodable + Debug> bincode::Encode for Compat<T> {
-    fn encode<E: bincode::enc::Encoder>(
-        &self,
-        encoder: &mut E,
-    ) -> Result<(), bincode::error::EncodeError> {
-        let writer = encoder.writer();
-        let buf = monero::consensus::serialize(&self.0);
-        writer.write(&buf)
-    }
-}
diff --git a/old_database/src/error.rs b/old_database/src/error.rs
deleted file mode 100644
index 25b1f8d8..00000000
--- a/old_database/src/error.rs
+++ /dev/null
@@ -1,53 +0,0 @@
-//! ### Error module
-//! This module contains all errors abstraction used by the database crate. By implementing [`From<E>`] to the specific errors of storage engine crates, it let us
-//! handle more easily any type of error that can happen. This module does **NOT** contain interpretation of these errors, as these are defined for Blockchain abstraction. This is another difference
-//! from monerod which interpret these errors directly in its database functions:
-//! ```cpp
-//! /**
-//! * @brief A base class for BlockchainDB exceptions
-//! */
-//! class DB_EXCEPTION : public std::exception
-//! ```
-//! see `blockchain_db/blockchain_db.h` in monerod `src/` folder for more details.
-
-#[derive(thiserror::Error, Debug)]
-/// `DB_FAILURES` is an enum for backend-agnostic, internal database errors. The `From` Trait must be implemented to the specific backend errors to match DB_FAILURES.
-pub enum DB_FAILURES {
-    #[error("MDBX returned an error {0}")]
-    MDBX_Error(#[from] libmdbx::Error),
-
-    #[error("\n<DB_FAILURES::EncodingError> Failed to encode some data : `{0}`")]
-    SerializeIssue(DB_SERIAL),
-
-    #[error("\nObject already exist in the database : {0}")]
-    AlreadyExist(&'static str),
-
-    #[error("NotFound? {0}")]
-    NotFound(&'static str),
-
-    #[error("\n<DB_FAILURES::Other> `{0}`")]
-    Other(&'static str),
-
-    #[error(
-        "\n<DB_FAILURES::FailedToCommit> A transaction tried to commit to the db, but failed."
-    )]
-    FailedToCommit,
-}
-
-#[derive(thiserror::Error, Debug)]
-pub enum DB_SERIAL {
-    #[error("An object failed to be serialized into bytes. It is likely an issue from monero-rs library. Please report this error on cuprate's github : https://github.com/Cuprate/cuprate/issues")]
-    ConsensusEncode,
-
-    #[error("Bytes failed to be deserialized into the requested object. It is likely an issue from monero-rs library. Please report this error on cuprate's github : https://github.com/Cuprate/cuprate/issues")]
-    ConsensusDecode(Vec<u8>),
-
-    #[error("monero-rs encoding|decoding logic failed : {0}")]
-    MoneroEncode(#[from] monero::consensus::encode::Error),
-
-    #[error("Bincode failed to decode a type from the database : {0}")]
-    BincodeDecode(#[from] bincode::error::DecodeError),
-
-    #[error("Bincode failed to encode a type for the database : {0}")]
-    BincodeEncode(#[from] bincode::error::EncodeError),
-}
diff --git a/old_database/src/hse.rs b/old_database/src/hse.rs
deleted file mode 100644
index 2d07b3f7..00000000
--- a/old_database/src/hse.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-/* There is nothing here as no wrapper exist for HSE yet */
-
-/* KVS supported functions :
--------------------------------------
-hse_kvs_delete
-hse_kvs_get
-hse_kvs_name_get
-hse_kvs_param_get
-hse_kvs_prefix_delete
-hse_kvs_put
-*/
\ No newline at end of file
diff --git a/old_database/src/interface.rs b/old_database/src/interface.rs
deleted file mode 100644
index cde8c0ad..00000000
--- a/old_database/src/interface.rs
+++ /dev/null
@@ -1,1036 +0,0 @@
-//! ### Interface module
-//! This module contains all the implementations of the database interface.
-//! These are all the functions that can be executed through DatabaseRequest.
-//!
-//! The following functions have been separated through 6 categories:
-//! -| Blockchain   |-
-//! -| Blocks       |-
-//! -| Transactions |-
-//! -| Outputs      |-
-//! -| SpentKeys    |-
-//! -| Categories   |-
-
-// TODO: add_transaction() not finished due to ringct zeroCommit missing function
-// TODO: in add_transaction_data() Investigate unprunable_size == 0 condition of monerod
-// TODO: Do we need correct_block_cumulative_difficulties()
-// TODO: remove_tx_outputs() can be done otherwise since we don't use global output index
-// TODO: Check all documentations
-
-use crate::{
-    database::{Database, Interface},
-    error::DB_FAILURES,
-    table::{self},
-    transaction::{self, DupCursor, DupWriteCursor, Transaction, WriteCursor, WriteTransaction},
-    types::{
-        calculate_prunable_hash, get_transaction_prunable_blob, AltBlock, BlockMetadata,
-        OutputMetadata, TransactionPruned, TxIndex, TxOutputIdx,
-    },
-    BINCODE_CONFIG,
-};
-use monero::{
-    blockdata::transaction::KeyImage, cryptonote::hash::Hashable, util::ringct::Key, Block,
-    BlockHeader, Hash, TxIn, TxOut,
-};
-
-// Implementation of Interface
-impl<'service, D: Database<'service>> Interface<'service, D> {
-    // --------------------------------| Blockchain |--------------------------------
-
-    /// `height` fetch the current blockchain height.
-    ///
-    /// Return the current blockchain height. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// No parameters is required.
-    fn height(&'service self) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        ro_tx.num_entries::<table::blockhash>().map(|n| n as u64)
-    }
-
-    // ----------------------------------| Blocks |---------------------------------
-
-    /// `add_block` add the block and metadata to the db.
-    ///
-    /// In case of failures, a `DB_FAILURES`
-    ///
-    /// Parameters:
-    /// `blk`: is the block to be added
-    /// `txs`: is the collection of transactions related to this block
-    /// `block_weight`: is the weight of the block (data's total)
-    /// `long_term_block_weight`: is the long term weight of the block (data's total)
-    /// `cumulative_difficulty`: is the accumulated difficulty at this block.
-    /// `coins_generated` is the number of coins generated after this block.
-    fn add_block(
-        &'service self,
-        blk: Block,
-        txs: Vec<monero::Transaction>,
-        block_weight: u64,
-        long_term_block_weight: u64,
-        cumulative_difficulty: u128,
-        coins_generated: u64,
-    ) -> Result<(), DB_FAILURES> {
-        // *sanity*
-        if blk.tx_hashes.len() != txs.len() {
-            return Err(DB_FAILURES::Other("sanity : Inconsistent tx/hashed sizes"));
-        }
-
-        let blk_hash = blk.id();
-
-        // let parent_height = self.height()?;
-
-        let mut num_rct_outs = 0u64;
-        self.add_transaction(blk.miner_tx.clone())?;
-
-        if blk.miner_tx.prefix.version.0 == 2 {
-            num_rct_outs += blk.miner_tx.prefix.outputs.len() as u64;
-        }
-
-        // let mut tx_hash = Hash::null();
-        for tx in txs.into_iter()
-        /*.zip(0usize..)*/
-        {
-            // tx_hash = blk.tx_hashes[tx.1];
-            for out in tx.prefix.outputs.iter() {
-                if out.amount.0 == 0 {
-                    num_rct_outs += 1;
-                }
-            }
-            self.add_transaction(tx /*.0*/)?;
-        }
-
-        let blk_metadata = BlockMetadata {
-            timestamp: blk.header.timestamp.0,
-            total_coins_generated: coins_generated,
-            weight: block_weight,
-            cumulative_difficulty,
-            block_hash: blk_hash.into(),
-            cum_rct: num_rct_outs, // num_rct_outs here is the rct outs of the block only. The cumulative rct will be added in `add_block_data` fn
-            long_term_block_weight,
-        };
-
-        self.add_block_data(blk, blk_metadata)
-    }
-
-    /// `add_block_data` add the actual block's data and metadata to the db. Underlying function of `add_block`
-    ///
-    /// In case of failures, a `DB_FAILURES` will be return.
-    ///
-    /// Parameters:
-    /// `blk`: is the block to add
-    /// `blk_metadata`: is the block's metadata to add
-    fn add_block_data(
-        &'service self,
-        blk: Block,
-        mut blk_metadata: BlockMetadata,
-    ) -> Result<(), DB_FAILURES> {
-        let height = self.height()?;
-
-        let mut cursor_blockhash = self.write_cursor_dup::<table::blockhash>()?;
-        let mut cursor_blockmetadata = self.write_cursor_dup::<table::blockmetadata>()?;
-
-        if cursor_blockhash
-            .get_dup(&(), &blk_metadata.block_hash)?
-            .is_some()
-        {
-            return Err(DB_FAILURES::AlreadyExist(
-                "Attempting to insert a block already existent in the database",
-            ))?;
-        }
-
-        if height > 0 {
-            let parent_height: u64 = cursor_blockhash
-                .get_dup(&(), &blk.header.prev_id.into())?
-                .ok_or(DB_FAILURES::NotFound("Can't find parent block"))?;
-
-            if parent_height != height - 1 {
-                return Err(DB_FAILURES::Other("Top block is not a new block's parent"));
-            }
-        }
-
-        if blk.header.major_version.0 > 3 {
-            let last_height = height - 1;
-
-            let parent_cum_rct = self.get_block_cumulative_rct_outputs(last_height)?;
-            blk_metadata.cum_rct += parent_cum_rct;
-        }
-        self.put::<table::blocks>(&height, &blk.into())?;
-        cursor_blockhash.put_cursor_dup(&(), &blk_metadata.block_hash, &height)?;
-        cursor_blockmetadata.put_cursor_dup(&(), &height, &blk_metadata)
-        // blockhfversion missing but do we really need this table?
-    }
-
-    /// `pop_block` pops the top block off the blockchain.
-    ///
-    /// Return the block that was popped. In case of failures, a `DB_FAILURES` will be return.
-    ///
-    /// No parameters is required.
-    fn pop_block(&'service self) -> Result<Block, DB_FAILURES> {
-        // First we delete block from table
-        let height = self.height()?;
-        if height == 0 {
-            return Err(DB_FAILURES::Other(
-                "Attempting to remove block from an empty blockchain",
-            ));
-        }
-
-        let mut cursor_blockhash = self.write_cursor_dup::<table::blockhash>()?;
-        let mut cursor_blockmetadata = self.write_cursor_dup::<table::blockmetadata>()?;
-
-        let blk = self
-            .get::<table::blocks>(&(height - 1))?
-            .ok_or(DB_FAILURES::NotFound(
-                "Attempting to remove block that's not in the db",
-            ))?
-            .0;
-
-        let hash = cursor_blockmetadata
-            .get_dup(&(), &(height - 1))?
-            .ok_or(DB_FAILURES::NotFound("Failed to retrieve block metadata"))?
-            .block_hash;
-
-        self.delete::<table::blocks>(&(height - 1), &None)?;
-        if cursor_blockhash.get_dup(&(), &hash)?.is_some() {
-            cursor_blockhash.del()?;
-        }
-
-        cursor_blockmetadata.del()?;
-
-        // Then we delete all its relevant txs
-        for tx_hash in blk.tx_hashes.iter() {
-            // 1 more condition in monerod TODO:
-            self.remove_transaction(*tx_hash)?;
-        }
-        self.remove_transaction(blk.miner_tx.hash())?;
-        Ok(blk)
-    }
-
-    /// `blocks_exists` check if the given block exists
-    ///
-    /// Return `true` if the block exist, `false` otherwise. In case of failures, a `DB_FAILURES` will be return.
-    ///
-    /// Parameters:
-    /// `hash`: is the given hash of the requested block.
-    fn block_exists(&'service self, hash: Hash) -> Result<bool, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockhash = ro_tx.cursor_dup::<table::blockhash>()?;
-        Ok(cursor_blockhash.get_dup(&(), &hash.into())?.is_some())
-    }
-
-    /// `get_block_hash` fetch the block's hash located at the give height.
-    ///
-    /// Return the hash of the last block. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// No parameters is required
-    fn get_block_hash(&'service self, height: u64) -> Result<Hash, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockmetadata = ro_tx.cursor_dup::<table::blockmetadata>()?;
-        let metadata = cursor_blockmetadata
-            .get_dup(&(), &height)?
-            .ok_or(DB_FAILURES::NotFound("Failed to find block's metadata"))?;
-
-        Ok(metadata.block_hash.0)
-    }
-
-    /// `get_block_height` gets the height of the block with a given hash
-    ///
-    /// Return the requested height.
-    fn get_block_height(&'service self, hash: Hash) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockhash = ro_tx.cursor_dup::<table::blockhash>()?;
-
-        cursor_blockhash
-            .get_dup(&(), &hash.into())?
-            .ok_or(DB_FAILURES::NotFound("Failed to find block height"))
-    }
-
-    /// `get_block_weights` fetch the block's weight located at the given height.
-    ///
-    /// Return the requested block weight. In case of failures, a `DB_FAILURES` will be return.
-    ///
-    /// Parameters:
-    /// `height`: is the given height where the requested block is located.
-    fn get_block_weight(&'service self, height: u64) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockmetadata = ro_tx.cursor_dup::<table::blockmetadata>()?;
-
-        let metadata = cursor_blockmetadata
-            .get_dup(&(), &height)?
-            .ok_or(DB_FAILURES::NotFound("Failed to find block's metadata"))?;
-
-        Ok(metadata.weight)
-    }
-
-    /// `get_block_already_generated_coins` fetch a block's already generated coins
-    ///
-    /// Return the total coins generated as of the block with the given height. In case of failures, a `DB_FAILURES` will be return.
-    ///
-    /// Parameters:
-    /// `height`: is the given height of the block to seek.
-    fn get_block_already_generated_coins(&'service self, height: u64) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockmetadata = ro_tx.cursor_dup::<table::blockmetadata>()?;
-
-        let metadata = cursor_blockmetadata
-            .get_dup(&(), &height)?
-            .ok_or(DB_FAILURES::NotFound("Failed to find block's metadata"))?;
-
-        Ok(metadata.total_coins_generated)
-    }
-
-    /// `get_block_long_term_weight` fetch a block's long term weight.
-    ///
-    /// Should return block's long term weight. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `height`: is the given height where the requested block is located.
-    fn get_block_long_term_weight(&'service self, height: u64) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockmetadata = ro_tx.cursor_dup::<table::blockmetadata>()?;
-
-        let metadata = cursor_blockmetadata
-            .get_dup(&(), &height)?
-            .ok_or(DB_FAILURES::NotFound("Failed to find block's metadata"))?;
-
-        Ok(metadata.long_term_block_weight)
-    }
-
-    /// `get_block_timestamp` fetch a block's timestamp.
-    ///
-    /// Should return the timestamp of the block with given height. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `height`: is the given height where the requested block to fetch timestamp is located.
-    fn get_block_timestamp(&'service self, height: u64) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockmetadata = ro_tx.cursor_dup::<table::blockmetadata>()?;
-
-        let metadata = cursor_blockmetadata
-            .get_dup(&(), &height)?
-            .ok_or(DB_FAILURES::NotFound("Failed to find block's metadata"))?;
-
-        Ok(metadata.timestamp)
-    }
-
-    /// `get_block_cumulative_rct_outputs` fetch a blocks' cumulative number of RingCT outputs
-    ///
-    /// Should return the number of RingCT outputs in the blockchain up to the blocks located at the given heights. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `height`: is the height to check for RingCT distribution.
-    fn get_block_cumulative_rct_outputs(&'service self, height: u64) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockmetadata = ro_tx.cursor_dup::<table::blockmetadata>()?;
-
-        let metadata = cursor_blockmetadata
-            .get_dup(&(), &height)?
-            .ok_or(DB_FAILURES::NotFound("Failed to find block's metadata"))?;
-
-        Ok(metadata.cum_rct)
-    }
-
-    fn get_block(&'service self, hash: Hash) -> Result<Block, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockhash = ro_tx.cursor_dup::<table::blockhash>()?;
-
-        let blk_height: u64 = cursor_blockhash
-            .get_dup(&(), &hash.into())?
-            .ok_or(DB_FAILURES::NotFound("Can't find block"))?;
-
-        Ok(ro_tx
-            .get::<table::blocks>(&blk_height)?
-            .ok_or(DB_FAILURES::NotFound("Can't find block"))?
-            .0)
-    }
-
-    fn get_block_from_height(&'service self, height: u64) -> Result<Block, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-
-        Ok(ro_tx
-            .get::<table::blocks>(&height)?
-            .ok_or(DB_FAILURES::NotFound("Can't find block"))?
-            .0)
-    }
-
-    /// `get_block_header` fetches the block's header with the given hash.
-    ///
-    /// Return the requested block header. In case of failures, a `DB_FAILURES` will be return. Precisely, a `BLOCK_DNE`
-    /// error will be returned if the requested block can't be found.
-    ///
-    /// Parameters:
-    /// `hash`: is the given hash of the requested block.
-    fn get_block_header(&'service self, hash: Hash) -> Result<BlockHeader, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_blockhash = ro_tx.cursor_dup::<table::blockhash>()?;
-
-        let blk_height: u64 = cursor_blockhash
-            .get_dup(&(), &hash.into())?
-            .ok_or(DB_FAILURES::NotFound("Can't find block"))?;
-
-        Ok(ro_tx
-            .get::<table::blocks>(&blk_height)?
-            .ok_or(DB_FAILURES::NotFound("Can't find block"))?
-            .0
-            .header)
-    }
-
-    fn get_block_header_from_height(
-        &'service self,
-        height: u64,
-    ) -> Result<BlockHeader, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-
-        Ok(ro_tx
-            .get::<table::blocks>(&(height - 1))?
-            .ok_or(DB_FAILURES::NotFound("Can't find block"))?
-            .0
-            .header)
-    }
-
-    /// `get_top_block` fetch the last/top block of the blockchain
-    ///
-    /// Return the last/top block of the blockchain. In case of failures, a DB_FAILURES, will be return.
-    ///
-    /// No parameters is required.
-    fn get_top_block(&'service self) -> Result<Block, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-
-        let blk_height = self.height()?;
-
-        Ok(ro_tx
-            .get::<table::blocks>(&blk_height)?
-            .ok_or(DB_FAILURES::NotFound("Can't find block"))?
-            .0)
-    }
-
-    /// `get_top_block_hash` fetch the block's hash located at the top of the blockchain (the last one).
-    ///
-    /// Return the hash of the last block. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// No parameters is required
-    fn get_top_block_hash(&'service self) -> Result<Hash, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let height = self.height()?;
-        let mut cursor_blockmetadata = ro_tx.cursor_dup::<table::blockmetadata>()?;
-
-        let metadata = cursor_blockmetadata
-            .get_dup(&(), &(height - 1))?
-            .ok_or(DB_FAILURES::NotFound("Failed to find block's metadata"))?;
-
-        Ok(metadata.block_hash.0)
-    }
-
-    // ------------------------------|  Transactions  |-----------------------------
-
-    /// `add_transaction` add the corresponding transaction and its hash to the specified block.
-    ///
-    /// In case of failures, a DB_FAILURES will be return. Precisely, a TX_EXISTS will be returned if the
-    /// transaction to be added already exists in the database.
-    ///
-    /// Parameters:
-    /// `blk_hash`: is the hash of the block which inherit the transaction
-    /// `tx`: is obviously the transaction to add
-    /// `tx_hash`: is the hash of the transaction.
-    /// `tx_prunable_hash_ptr`: is the hash of the prunable part of the transaction.
-    fn add_transaction(&'service self, tx: monero::Transaction) -> Result<(), DB_FAILURES> {
-        let is_coinbase: bool = tx.prefix.inputs.is_empty();
-        let tx_hash = tx.hash();
-
-        let mut tx_prunable_blob = Vec::new();
-        get_transaction_prunable_blob(&tx, &mut tx_prunable_blob).unwrap();
-
-        let tx_prunable_hash: Option<Hash> = calculate_prunable_hash(&tx, &tx_prunable_blob);
-
-        for txin in tx.prefix.inputs.iter() {
-            if let TxIn::ToKey {
-                amount: _,
-                key_offsets: _,
-                k_image,
-            } = txin
-            {
-                self.add_spent_key(k_image.clone())?;
-            } else {
-                return Err(DB_FAILURES::Other(
-                    "Unsupported input type, aborting transaction addition",
-                ));
-            }
-        }
-
-        let tx_id =
-            self.add_transaction_data(tx.clone(), tx_prunable_blob, tx_hash, tx_prunable_hash)?;
-
-        let tx_num_outputs = tx.prefix.outputs.len();
-        let amount_output_dinces: Vec<u64> = Vec::with_capacity(tx_num_outputs);
-
-        for txout in tx.prefix.outputs.iter().zip(0..tx_num_outputs) {
-            if is_coinbase && tx.prefix.version.0 == 2 {
-                let commitment: Option<Key> = None;
-                // ZeroCommit is from RingCT Module, not finishable yet
-            }
-        }
-        todo!()
-    }
-
-    /// `add_transaction_data` add the specified transaction data to its storage.
-    ///
-    /// It only add the transaction blob and tx's metadata, not the collection of outputs.
-    ///
-    /// Return the hash of the transaction added. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `tx`: is the transaction to add
-    /// `tx_prunable_blob`; is its prunable blob.
-    /// `tx_hash`: is the transaction's hash
-    /// `tx_prunable_hash`: is the hash of the prunable part of the transaction
-    fn add_transaction_data(
-        &'service self,
-        tx: monero::Transaction,
-        tx_prunable_blob: Vec<u8>,
-        tx_hash: Hash,
-        tx_prunable_hash: Option<Hash>,
-    ) -> Result<u64, DB_FAILURES> {
-        // Checking if the transaction already exist in the database
-        let res = self.get::<table::txsidentifier>(&tx_hash.into())?;
-        if res.is_some() {
-            return Err(DB_FAILURES::AlreadyExist(
-                "Attempting to add transaction that's already in the db",
-            ));
-        }
-
-        // Inserting tx index in table::txsindetifier
-        let height = self.height()?;
-        let tx_id = self.get_num_tx()?;
-
-        let txindex = TxIndex {
-            tx_id,
-            unlock_time: tx.prefix.unlock_time.0,
-            height,
-        };
-
-        self.put::<table::txsidentifier>(&tx_hash.into(), &txindex)?;
-
-        // TODO: Investigate unprunable_size == 0 condition
-        // Inserting tx pruned part in table::txspruned
-        let tx_pruned = TransactionPruned {
-            prefix: tx.prefix.clone(),
-            rct_signatures: tx.rct_signatures,
-        };
-        self.put::<table::txspruned>(&tx_id, &tx_pruned)?;
-
-        // Inserting tx prunable part in table::txs
-        self.put::<table::txsprunable>(&tx_id, &tx_prunable_blob)?;
-
-        // Checking to see if the database is pruned and inserting into table::txsprunabletip accordingly
-        if self.get_blockchain_pruning_seed()? > 0 {
-            self.put::<table::txsprunabletip>(&tx_id, &height)?;
-        }
-
-        // V2 Tx store hash of their prunable part
-        if let Some(tx_prunable_hash) = tx_prunable_hash {
-            self.put::<table::txsprunablehash>(&tx_id, &tx_prunable_hash.into())?;
-        }
-        Ok(tx_id)
-    }
-
-    fn remove_transaction(&'service self, tx_hash: Hash) -> Result<(), DB_FAILURES> {
-        let txpruned = self.get_pruned_tx(tx_hash)?;
-
-        for input in txpruned.prefix.inputs.iter() {
-            if let TxIn::ToKey {
-                amount: _,
-                key_offsets: _,
-                k_image,
-            } = input
-            {
-                self.remove_spent_key(k_image.clone())?;
-            }
-        }
-
-        self.remove_transaction_data(txpruned.prefix, tx_hash)
-    }
-
-    fn remove_transaction_data(
-        &'service self,
-        txprefix: monero::TransactionPrefix,
-        tx_hash: Hash,
-    ) -> Result<(), DB_FAILURES> {
-        // Checking if the transaction exist and fetching its index
-        let txindex =
-            self.get::<table::txsidentifier>(&tx_hash.into())?
-                .ok_or(DB_FAILURES::NotFound(
-                    "Attempting to remove transaction that isn't in the db",
-                ))?;
-
-        self.delete::<table::txspruned>(&txindex.tx_id, &None)?;
-        self.delete::<table::txsprunable>(&txindex.tx_id, &None)?;
-        // If Its in Tip blocks range we must delete it
-        if self.get::<table::txsprunabletip>(&txindex.tx_id)?.is_some() {
-            self.delete::<table::txsprunabletip>(&txindex.tx_id, &None)?;
-        }
-        // If v2 Tx we must delete the prunable hash
-        if txprefix.version.0 > 1 {
-            self.delete::<table::txsprunablehash>(&txindex.tx_id, &None)?;
-        }
-
-        self.remove_tx_outputs(txprefix, txindex.tx_id)?;
-
-        self.delete::<table::txsoutputs>(&txindex.tx_id, &None)?;
-        self.delete::<table::txsidentifier>(&tx_hash.into(), &None)
-    }
-
-    fn remove_tx_outputs(
-        &'service self,
-        txprefix: monero::TransactionPrefix,
-        tx_id: u64,
-    ) -> Result<(), DB_FAILURES> {
-        let amount_output_indices: TxOutputIdx = self
-            .get::<table::txsoutputs>(&tx_id)?
-            .ok_or(DB_FAILURES::NotFound("Failed to find tx's outputs indices"))?;
-
-        if amount_output_indices.0.is_empty() {
-            return Err(DB_FAILURES::Other(
-                "Attempting to remove outputs of a an empty tx",
-            ));
-        }
-
-        // Checking if the input is a coinbase input
-        #[allow(clippy::match_like_matches_macro)]
-        let is_coinbase_input: bool = match &txprefix.inputs[0] {
-            TxIn::Gen { height: _ } if txprefix.version.0 > 1 && txprefix.inputs.len() == 1 => true,
-            _ => false,
-        };
-        for o in 0..txprefix.outputs.len() {
-            let amount = match is_coinbase_input {
-                true => 0,
-                false => txprefix.outputs[o].amount.0,
-            };
-            self.remove_output(Some(amount), amount_output_indices.0[o])?;
-        }
-        Ok(())
-    }
-
-    /// `get_num_tx` fetches the total number of transactions stored in the database
-    ///
-    /// Should return the count. In case of failure, a DB_FAILURES will be return.
-    ///
-    /// No parameters is required.
-    fn get_num_tx(&'service self) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        ro_tx.num_entries::<table::txspruned>().map(|n| n as u64)
-    }
-
-    /// `tx_exists` check if a transaction exist with the given hash.
-    ///
-    /// Return `true` if the transaction exist, `false` otherwise. In case of failure, a DB_FAILURES will be return.
-    ///
-    /// Parameters :
-    /// `hash` is the given hash of transaction to check.
-    fn tx_exists(&'service self, hash: Hash) -> Result<bool, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        Ok(ro_tx.get::<table::txsidentifier>(&hash.into())?.is_some())
-    }
-
-    /// `get_tx_unlock_time` fetch a transaction's unlock time/height
-    ///
-    /// Should return the unlock time/height in u64. In case of failure, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `hash`: is the given hash of the transaction to check.
-    fn get_tx_unlock_time(&'service self, hash: Hash) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-
-        // Getting the tx index
-        let txindex =
-            ro_tx
-                .get::<table::txsidentifier>(&hash.into())?
-                .ok_or(DB_FAILURES::NotFound(
-                    "wasn't able to find a transaction in the database",
-                ))?;
-
-        Ok(txindex.unlock_time)
-    }
-
-    /// `get_tx` fetches the transaction with the given hash.
-    ///
-    /// Should return the transaction. In case of failure, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `hash`: is the given hash of transaction to fetch.
-    fn get_tx(&'service self, hash: Hash) -> Result<monero::Transaction, DB_FAILURES> {
-        // Getting the pruned tx
-        let pruned_tx = self.get_pruned_tx(hash)?;
-
-        // Getting the tx index
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let txindex =
-            ro_tx
-                .get::<table::txsidentifier>(&hash.into())?
-                .ok_or(DB_FAILURES::NotFound(
-                    "failed to find index of a transaction",
-                ))?;
-
-        // Getting its prunable part
-        let prunable_part =
-            ro_tx
-                .get::<table::txsprunable>(&txindex.tx_id)?
-                .ok_or(DB_FAILURES::NotFound(
-                    "failed to find prunable part of a transaction",
-                ))?;
-
-        // Making it a Transaction
-        pruned_tx
-            .into_transaction(&prunable_part)
-            .map_err(|err| DB_FAILURES::SerializeIssue(err.into()))
-    }
-
-    /// `get_tx_list` fetches the transactions with given hashes.
-    ///
-    /// Should return a vector with the requested transactions. In case of failures, a DB_FAILURES will be return.
-    /// Precisely, a HASH_DNE error will be returned with the corresponding hash of transaction that is not found in the DB.
-    ///
-    /// `hlist`: is the given collection of hashes corresponding to the transactions to fetch.
-    fn get_tx_list(
-        &'service self,
-        hash_list: Vec<Hash>,
-    ) -> Result<Vec<monero::Transaction>, DB_FAILURES> {
-        let mut result: Vec<monero::Transaction> = Vec::with_capacity(hash_list.len());
-
-        for hash in hash_list {
-            result.push(self.get_tx(hash)?);
-        }
-        Ok(result)
-    }
-
-    /// `get_pruned_tx` fetches the transaction base with the given hash.
-    ///
-    /// Should return the transaction. In case of failure, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `hash`: is the given hash of transaction to fetch.
-    fn get_pruned_tx(&'service self, hash: Hash) -> Result<TransactionPruned, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-
-        let txindex =
-            ro_tx
-                .get::<table::txsidentifier>(&hash.into())?
-                .ok_or(DB_FAILURES::NotFound(
-                    "wasn't able to find a transaction in the database",
-                ))?;
-
-        ro_tx
-            .get::<table::txspruned>(&txindex.tx_id)?
-            .ok_or(DB_FAILURES::NotFound(
-                "failed to find prefix of a transaction",
-            ))
-    }
-
-    /// `get_tx_block_height` fetches the height of a transaction's block
-    ///
-    /// Should return the height of the block containing the transaction with the given hash. In case
-    /// of failures, a DB FAILURES will be return. Precisely, a TX_DNE error will be return if the transaction cannot be found.
-    ///
-    /// Parameters:
-    /// `hash`: is the fiven hash of the first transaction
-    fn get_tx_block_height(&'service self, hash: Hash) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let txindex = ro_tx
-            .get::<table::txsidentifier>(&hash.into())?
-            .ok_or(DB_FAILURES::NotFound("txindex not found"))?;
-        Ok(txindex.height)
-    }
-
-    // --------------------------------|  Outputs  |--------------------------------
-
-    /// `add_output` add an output data to it's storage .
-    ///
-    /// It internally keep track of the global output count. The global output count is also used to index outputs based on
-    /// their order of creations.
-    ///
-    /// Should return the amount output index. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `tx_hash`: is the hash of the transaction where the output comes from.
-    /// `output`: is the output's publickey to store.
-    /// `index`: is the local output's index (from transaction).
-    /// `unlock_time`: is the unlock time (height) of the output.
-    /// `commitment`: is the RingCT commitment of this output.
-    fn add_output(
-        &'service self,
-        tx_hash: Hash,
-        output: TxOut,
-        local_index: u64,
-        unlock_time: u64,
-        commitment: Option<Key>,
-    ) -> Result<u64, DB_FAILURES> {
-        let height = self.height()?;
-
-        let mut cursor_outputmetadata = self.write_cursor_dup::<table::outputmetadata>()?;
-
-        let pubkey = output.target.as_one_time_key().map(Into::into);
-        let mut out_metadata = OutputMetadata {
-            tx_hash: tx_hash.into(),
-            local_index,
-            pubkey,
-            unlock_time,
-            height,
-            commitment: None,
-        };
-
-        // RingCT Outputs
-        if let Some(commitment) = commitment {
-            out_metadata.commitment = Some(commitment.into());
-
-            let amount_index = self.get_rct_num_outputs()? + 1;
-            cursor_outputmetadata.put_cursor_dup(&(), &amount_index, &out_metadata)?;
-            Ok(amount_index)
-        }
-        // Pre-RingCT Outputs
-        else {
-            let amount_index = self.get_pre_rct_num_outputs(output.amount.0)? + 1;
-            let mut cursor = self.write_cursor_dup::<table::prerctoutputmetadata>()?;
-            cursor.put_cursor_dup(&output.amount.0, &amount_index, &out_metadata)?;
-            Ok(amount_index)
-        }
-    }
-
-    fn remove_output(&'service self, amount: Option<u64>, index: u64) -> Result<(), DB_FAILURES> {
-        let mut cursor_outputmetadata = self.write_cursor_dup::<table::outputmetadata>()?;
-
-        if let Some(amount) = amount {
-            if amount == 0 {
-                cursor_outputmetadata
-                    .get_dup(&(), &index)?
-                    .ok_or(DB_FAILURES::NotFound(
-                        "Failed to find PostRCT output metadata",
-                    ))?;
-                cursor_outputmetadata.del()
-            } else {
-                let mut cursor = self.write_cursor_dup::<table::prerctoutputmetadata>()?;
-                let _ = cursor
-                    .get_dup(&amount, &index)?
-                    .ok_or(DB_FAILURES::NotFound(
-                        "Failed to find PreRCT output metadata",
-                    ))?;
-                cursor.del()
-            }
-        } else {
-            cursor_outputmetadata
-                .get_dup(&(), &index)?
-                .ok_or(DB_FAILURES::NotFound(
-                    "Failed to find PostRCT output metadata",
-                ))?;
-            cursor_outputmetadata.del()
-        }
-    }
-
-    /// `get_output` get an output's data
-    ///
-    /// Return the public key, unlock time, and block height for the output with the given amount and index, collected in a struct
-    /// In case of failures, a `DB_FAILURES` will be return. Precisely, if the output cannot be found, an `OUTPUT_DNE` error will be return.
-    /// If any of the required part for the final struct isn't found, a `DB_ERROR` will be return
-    ///
-    /// Parameters:
-    /// `amount`: is the corresponding amount of the output
-    /// `index`: is the output's index (indexed by amount)
-    /// `include_commitment` : `true` by default.
-    fn get_output(
-        &'service self,
-        amount: Option<u64>,
-        index: u64,
-    ) -> Result<OutputMetadata, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_outputmetadata = ro_tx.cursor_dup::<table::outputmetadata>()?;
-
-        if let Some(amount) = amount {
-            if amount > 0 {
-                let mut cursor = ro_tx.cursor_dup::<table::prerctoutputmetadata>()?;
-                return cursor
-                    .get_dup(&amount, &index)?
-                    .ok_or(DB_FAILURES::NotFound(
-                        "Failed to find PreRCT output metadata",
-                    ));
-            }
-        }
-        cursor_outputmetadata
-            .get_dup(&(), &index)?
-            .ok_or(DB_FAILURES::NotFound(
-                "Failed to find PostRCT output metadata",
-            ))
-    }
-
-    /// `get_output_list` gets a collection of output's data from a corresponding index collection.
-    ///
-    /// Return a collection of output's data. In case of failurse, a `DB_FAILURES` will be return.
-    ///
-    /// Parameters:
-    /// `amounts`: is the collection of amounts corresponding to the requested outputs.
-    /// `offsets`: is a collection of outputs' index (indexed by amount).
-    /// `allow partial`: `false` by default.
-    fn get_output_list(
-        &'service self,
-        amounts: Option<Vec<u64>>,
-        offsets: Vec<u64>,
-    ) -> Result<Vec<OutputMetadata>, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor_outputmetadata = ro_tx.cursor_dup::<table::outputmetadata>()?;
-        let mut result: Vec<OutputMetadata> = Vec::new();
-
-        // Pre-RingCT output to be found.
-        if let Some(amounts) = amounts {
-            let mut cursor = ro_tx.cursor_dup::<table::prerctoutputmetadata>()?;
-
-            for ofs in amounts.into_iter().zip(offsets) {
-                if ofs.0 == 0 {
-                    let output = cursor_outputmetadata.get_dup(&(), &ofs.1)?.ok_or(
-                        DB_FAILURES::NotFound("An output hasn't been found in the database"),
-                    )?;
-                    result.push(output);
-                } else {
-                    let output = cursor
-                        .get_dup(&ofs.0, &ofs.1)?
-                        .ok_or(DB_FAILURES::NotFound(
-                            "An output hasn't been found in the database",
-                        ))?;
-                    result.push(output);
-                }
-            }
-        // No Pre-RingCT outputs to be found.
-        } else {
-            for ofs in offsets {
-                let output =
-                    cursor_outputmetadata
-                        .get_dup(&(), &ofs)?
-                        .ok_or(DB_FAILURES::NotFound(
-                            "An output hasn't been found in the database",
-                        ))?;
-                result.push(output);
-            }
-        }
-
-        Ok(result)
-    }
-
-    /// `get_rct_num_outputs` fetches the number post-RingCT output.
-    ///
-    /// Return the number of post-RingCT outputs. In case of failures a `DB_FAILURES` will be return.
-    ///
-    /// No parameters is required
-    fn get_rct_num_outputs(&'service self) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-
-        ro_tx
-            .num_entries::<table::outputmetadata>()
-            .map(|n| n as u64)
-    }
-
-    /// `get_pre_rct_num_outputs` fetches the number of preRCT outputs of a given amount.
-    ///
-    /// Return a count of outputs of the given amount. in case of failures a `DB_FAILURES` will be return.
-    ///
-    /// Parameters:
-    /// `amount`: is the output amount being looked up.
-    fn get_pre_rct_num_outputs(&'service self, amount: u64) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        let mut cursor = ro_tx.cursor_dup::<table::prerctoutputmetadata>()?;
-
-        transaction::Cursor::set(&mut cursor, &amount)?;
-        let out_metadata: Option<(u64, OutputMetadata)> =
-            transaction::DupCursor::last_dup(&mut cursor)?;
-        if let Some(out_metadata) = out_metadata {
-            return Ok(out_metadata.0);
-        }
-        Err(DB_FAILURES::Other("failed to decode the subkey and value"))
-    }
-
-    // ------------------------------| Spent Keys |------------------------------
-
-    /// `add_spent_key` add the supplied key image to the spent key image record
-    fn add_spent_key(&'service self, key_image: KeyImage) -> Result<(), DB_FAILURES> {
-        let mut cursor_spentkeys = self.write_cursor_dup::<table::spentkeys>()?;
-        cursor_spentkeys.put_cursor_dup(&(), &key_image.into(), &())
-    }
-
-    /// `remove_spent_key` remove the specified key image from the spent key image record
-    fn remove_spent_key(&'service self, key_image: KeyImage) -> Result<(), DB_FAILURES> {
-        let mut cursor_spentkeys = self.write_cursor_dup::<table::spentkeys>()?;
-        cursor_spentkeys.get_dup(&(), &key_image.into())?;
-        cursor_spentkeys.del()
-    }
-
-    /// `is_spent_key_recorded` check if the specified key image has been spent
-    fn is_spent_key_recorded(&'service self, key_image: KeyImage) -> Result<bool, DB_FAILURES> {
-        let mut cursor_spentkeys = self.write_cursor_dup::<table::spentkeys>()?;
-        Ok(cursor_spentkeys.get_dup(&(), &key_image.into())?.is_some())
-    }
-
-    // --------------------------------------------|  Alt-Block  |------------------------------------------------------------
-
-    /// `add_alt_block` add a new alternative block.
-    ///
-    /// In case of failures, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// blkid: is the hash of the original block
-    /// data: is the metadata for the block
-    /// blob: is the blobdata of this alternative block.
-    fn add_alt_block(
-        &'service self,
-        altblock_hash: Hash,
-        data: AltBlock,
-    ) -> Result<(), DB_FAILURES> {
-        self.put::<table::altblock>(&altblock_hash.into(), &data)
-    }
-
-    /// `get_alt_block` gets the specified alternative block.
-    ///
-    /// Return a tuple containing the blobdata of the alternative block and its metadata. In case of failures, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `blkid`: is the hash of the requested alternative block.
-    fn get_alt_block(&'service self, altblock_hash: Hash) -> Result<AltBlock, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        ro_tx
-            .get::<table::altblock>(&altblock_hash.into())?
-            .ok_or(DB_FAILURES::NotFound(
-                "Failed to find an AltBLock in the db",
-            ))
-    }
-
-    /// `remove_alt_block` remove the specified alternative block
-    ///
-    /// In case of failures, a DB_FAILURES will be return.
-    ///
-    /// Parameters:
-    /// `blkid`: is the hash of the alternative block to remove.
-    fn remove_alt_block(&mut self, altblock_hash: Hash) -> Result<(), DB_FAILURES> {
-        self.delete::<table::altblock>(&altblock_hash.into(), &None)
-    }
-
-    /// `get_alt_block` gets the total number of alternative blocks stored
-    ///
-    /// In case of failures, a DB_FAILURES will be return.
-    ///
-    /// No parameters is required.
-    fn get_alt_block_count(&'service self) -> Result<u64, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-        ro_tx.num_entries::<table::altblock>().map(|n| n as u64)
-    }
-
-    /// `drop_alt_block` drop all alternative blocks.
-    ///
-    /// In case of failures, a DB_FAILURES will be return.
-    ///
-    /// No parameters is required.
-    fn drop_alt_blocks(&mut self) -> Result<(), DB_FAILURES> {
-        self.clear::<table::altblock>()
-    }
-
-    // --------------------------------| Properties |--------------------------------
-
-    // No pruning yet
-    fn get_blockchain_pruning_seed(&'service self) -> Result<u32, DB_FAILURES> {
-        let ro_tx = self.db.tx().map_err(Into::into)?;
-
-        ro_tx
-            .get::<table::properties>(&0)?
-            .ok_or(DB_FAILURES::NotFound("Can't find prunning seed"))
-    }
-}
diff --git a/old_database/src/lib.rs b/old_database/src/lib.rs
deleted file mode 100644
index d410b67f..00000000
--- a/old_database/src/lib.rs
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright (C) 2023 Cuprate Contributors
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-//! The cuprate-db crate implement (as its name suggests) the relations between the blockchain/txpool objects and their databases.
-//! `lib.rs` contains all the generics, trait and specification for interfaces between blockchain and a backend-agnostic database
-//! Every other files in this folder are implementation of these traits/methods to real storage engine.
-//!
-//! At the moment, the only storage engine available is MDBX.
-//! The next storage engine planned is HSE (Heteregeonous Storage Engine) from Micron.
-//!
-//! For more information, please consult this docs:
-
-#![deny(unused_attributes)]
-#![forbid(unsafe_code)]
-#![allow(non_camel_case_types)]
-#![deny(clippy::expect_used, clippy::panic)]
-#![allow(dead_code, unused_macros)] // temporary
-
-use monero::{util::ringct::RctSig, Block, BlockHeader, Hash};
-use std::ops::Range;
-use thiserror::Error;
-
-#[cfg(feature = "mdbx")]
-pub mod mdbx;
-//#[cfg(feature = "hse")]
-//pub mod hse;
-
-pub mod encoding;
-pub mod error;
-pub mod interface;
-pub mod table;
-pub mod types;
-
-const DEFAULT_BLOCKCHAIN_DATABASE_DIRECTORY: &str = "blockchain";
-const DEFAULT_TXPOOL_DATABASE_DIRECTORY: &str = "txpool_mem";
-const BINCODE_CONFIG: bincode::config::Configuration<
-    bincode::config::LittleEndian,
-    bincode::config::Fixint,
-> = bincode::config::standard().with_fixed_int_encoding();
-
-// ------------------------------------------|      Database      |------------------------------------------
-
-pub mod database {
-    //! This module contains the Database abstraction trait. Any key/value storage engine implemented need
-    //! to fulfil these associated types and functions, in order to be usable. This module also contains the
-    //! Interface struct which is used by the DB Reactor to interact with the database.
-
-    use crate::{
-        error::DB_FAILURES,
-        transaction::{Transaction, WriteTransaction},
-    };
-    use std::{ops::Deref, path::PathBuf, sync::Arc};
-
-    /// `Database` Trait implement all the methods necessary to generate transactions as well as execute specific functions. It also implement generic associated types to identify the
-    /// different transaction modes (read & write) and it's native errors.
-    pub trait Database<'a> {
-        type TX: Transaction<'a>;
-        type TXMut: WriteTransaction<'a>;
-        type Error: Into<DB_FAILURES>;
-
-        // Create a transaction from the database
-        fn tx(&'a self) -> Result<Self::TX, Self::Error>;
-
-        // Create a mutable transaction from the database
-        fn tx_mut(&'a self) -> Result<Self::TXMut, Self::Error>;
-
-        // Open a database from the specified path
-        fn open(path: PathBuf) -> Result<Self, Self::Error>
-        where
-            Self: std::marker::Sized;
-
-        // Check if the database is built.
-        fn check_all_tables_exist(&'a self) -> Result<(), Self::Error>;
-
-        // Build the database
-        fn build(&'a self) -> Result<(), Self::Error>;
-    }
-
-    /// `Interface` is a struct containing a shared pointer to the database and transaction's to be used for the implemented method of Interface.
-    pub struct Interface<'a, D: Database<'a>> {
-        pub db: Arc<D>,
-        pub tx: Option<<D as Database<'a>>::TXMut>,
-    }
-
-    // Convenient implementations for database
-    impl<'service, D: Database<'service>> Interface<'service, D> {
-        fn from(db: Arc<D>) -> Result<Self, DB_FAILURES> {
-            Ok(Self { db, tx: None })
-        }
-
-        fn open(&'service mut self) -> Result<(), DB_FAILURES> {
-            let tx = self.db.tx_mut().map_err(Into::into)?;
-            self.tx = Some(tx);
-            Ok(())
-        }
-    }
-
-    impl<'service, D: Database<'service>> Deref for Interface<'service, D> {
-        type Target = <D as Database<'service>>::TXMut;
-
-        fn deref(&self) -> &Self::Target {
-            return self.tx.as_ref().unwrap();
-        }
-    }
-}
-
-// ------------------------------------------|      DatabaseTx     |------------------------------------------
-
-pub mod transaction {
-    //! This module contains the abstractions of Transactional Key/Value database functions.
-    //! Any key/value database/storage engine can be implemented easily for Cuprate as long as
-    //! these functions or equivalent logic exist for it.
-
-    use crate::{
-        error::DB_FAILURES,
-        table::{DupTable, Table},
-    };
-
-    // Abstraction of a read-only cursor, for simple tables
-    #[allow(clippy::type_complexity)]
-    pub trait Cursor<'t, T: Table> {
-        fn first(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
-
-        fn get_cursor(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
-
-        fn last(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
-
-        fn next(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
-
-        fn prev(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES>;
-
-        fn set(&mut self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES>;
-    }
-
-    // Abstraction of a read-only cursor with support for duplicated tables. DupCursor inherit Cursor methods as
-    // a duplicated table can be treated as a simple table.
-    #[allow(clippy::type_complexity)]
-    pub trait DupCursor<'t, T: DupTable>: Cursor<'t, T> {
-        fn first_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES>;
-
-        fn get_dup(
-            &mut self,
-            key: &T::Key,
-            subkey: &T::SubKey,
-        ) -> Result<Option<T::Value>, DB_FAILURES>;
-
-        fn last_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES>;
-
-        fn next_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES>;
-
-        fn prev_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES>;
-    }
-
-    // Abstraction of a read-write cursor, for simple tables. WriteCursor inherit Cursor methods.
-    pub trait WriteCursor<'t, T: Table>: Cursor<'t, T> {
-        fn put_cursor(&mut self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES>;
-
-        fn del(&mut self) -> Result<(), DB_FAILURES>;
-    }
-
-    // Abstraction of a read-write cursor with support for duplicated tables. DupWriteCursor inherit DupCursor and WriteCursor methods.
-    pub trait DupWriteCursor<'t, T: DupTable>: WriteCursor<'t, T> {
-        fn put_cursor_dup(
-            &mut self,
-            key: &T::Key,
-            subkey: &T::SubKey,
-            value: &T::Value,
-        ) -> Result<(), DB_FAILURES>;
-
-        /// Delete all data under associated to its key
-        fn del_nodup(&mut self) -> Result<(), DB_FAILURES>;
-    }
-
-    // Abstraction of a read-only transaction.
-    pub trait Transaction<'a>: Send + Sync {
-        type Cursor<T: Table>: Cursor<'a, T>;
-        type DupCursor<T: DupTable>: DupCursor<'a, T> + Cursor<'a, T>;
-
-        fn get<T: Table>(&self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES>;
-
-        fn commit(self) -> Result<(), DB_FAILURES>;
-
-        fn cursor<T: Table>(&self) -> Result<Self::Cursor<T>, DB_FAILURES>;
-
-        fn cursor_dup<T: DupTable>(&self) -> Result<Self::DupCursor<T>, DB_FAILURES>;
-
-        fn num_entries<T: Table>(&self) -> Result<usize, DB_FAILURES>;
-    }
-
-    // Abstraction of a read-write transaction. WriteTransaction inherits Transaction methods.
-    pub trait WriteTransaction<'a>: Transaction<'a> {
-        type WriteCursor<T: Table>: WriteCursor<'a, T>;
-        type DupWriteCursor<T: DupTable>: DupWriteCursor<'a, T> + DupCursor<'a, T>;
-
-        fn put<T: Table>(&self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES>;
-
-        fn delete<T: Table>(
-            &self,
-            key: &T::Key,
-            value: &Option<T::Value>,
-        ) -> Result<(), DB_FAILURES>;
-
-        fn clear<T: Table>(&self) -> Result<(), DB_FAILURES>;
-
-        fn write_cursor<T: Table>(&self) -> Result<Self::WriteCursor<T>, DB_FAILURES>;
-
-        fn write_cursor_dup<T: DupTable>(&self) -> Result<Self::DupWriteCursor<T>, DB_FAILURES>;
-    }
-}
diff --git a/old_database/src/mdbx.rs b/old_database/src/mdbx.rs
deleted file mode 100644
index e44a58b8..00000000
--- a/old_database/src/mdbx.rs
+++ /dev/null
@@ -1,474 +0,0 @@
-//! ### MDBX implementation
-//! This module contains the implementation of all the database traits for the MDBX storage engine.
-//! This include basic transactions methods, cursors and errors conversion.
-
-use crate::{
-    database::Database,
-    error::{DB_FAILURES, DB_SERIAL},
-    table::{self, DupTable, Table},
-    transaction::{Transaction, WriteTransaction},
-    BINCODE_CONFIG,
-};
-use libmdbx::{
-    Cursor, DatabaseFlags, DatabaseKind, Geometry, Mode, PageSize, SyncMode, TableFlags,
-    TransactionKind, WriteFlags, RO, RW,
-};
-use std::ops::Range;
-
-// Constant used in mdbx implementation
-const MDBX_DEFAULT_SYNC_MODE: SyncMode = SyncMode::Durable;
-const MDBX_MAX_MAP_SIZE: usize = 4 * 1024usize.pow(3); // 4TB
-const MDBX_GROWTH_STEP: isize = 100 * 1024isize.pow(2); // 100MB
-const MDBX_PAGE_SIZE: Option<PageSize> = None;
-const MDBX_GEOMETRY: Geometry<Range<usize>> = Geometry {
-    size: Some(0..MDBX_MAX_MAP_SIZE),
-    growth_step: Some(MDBX_GROWTH_STEP),
-    shrink_threshold: None,
-    page_size: MDBX_PAGE_SIZE,
-};
-
-/// [`mdbx_decode`] is a function which the supplied bytes will be deserialized using `bincode::decode_from_slice(src, BINCODE_CONFIG)`
-/// function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeDecode(err)))` if it failed to decode the value. It is used for clarity purpose.
-fn mdbx_decode<T: bincode::Decode>(src: &[u8]) -> Result<(T, usize), DB_FAILURES> {
-    bincode::decode_from_slice(src, BINCODE_CONFIG)
-        .map_err(|e| DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeDecode(e)))
-}
-
-/// [`mdbx_encode`] is a function that serialize a given value into a vector using `bincode::encode_to_vec(src, BINCODE_CONFIG)`
-/// function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(err)))` if it failed to encode the value. It is used for clarity purpose.
-fn mdbx_encode<T: bincode::Encode>(src: &T) -> Result<Vec<u8>, DB_FAILURES> {
-    bincode::encode_to_vec(src, BINCODE_CONFIG)
-        .map_err(|e| DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(e)))
-}
-
-/// [`mdbx_open_table`] is a simple function used for syntax clarity. It try to open the table, and return a `DB_FAILURES` if it failed.
-fn mdbx_open_table<'db, K: TransactionKind, E: DatabaseKind, T: Table>(
-    tx: &'db libmdbx::Transaction<'db, K, E>,
-) -> Result<libmdbx::Table, DB_FAILURES> {
-    tx.open_table(Some(T::TABLE_NAME))
-        .map_err(std::convert::Into::<DB_FAILURES>::into)
-}
-
-/// [`cursor_pair_decode`] is a function defining a conditional return used in (almost) every cursor functions. If a pair of key/value effectively exist from the cursor,
-/// the two values are decoded using `mdbx_decode` function. Return `Err(DB_FAILURES::SerializeIssue(DB_SERIAL::BincodeEncode(err)))` if it failed to encode the value.
-/// It is used for clarity purpose.
-fn cursor_pair_decode<L: bincode::Decode, R: bincode::Decode>(
-    pair: Option<(Vec<u8>, Vec<u8>)>,
-) -> Result<Option<(L, R)>, DB_FAILURES> {
-    if let Some(pair) = pair {
-        let decoded_key = mdbx_decode(pair.0.as_slice())?;
-        let decoded_value = mdbx_decode(pair.1.as_slice())?;
-        Ok(Some((decoded_key.0, decoded_value.0)))
-    } else {
-        Ok(None)
-    }
-}
-
-// Implementation of the database trait with mdbx types
-impl<'a, E> Database<'a> for libmdbx::Database<E>
-where
-    E: DatabaseKind,
-{
-    type TX = libmdbx::Transaction<'a, RO, E>;
-    type TXMut = libmdbx::Transaction<'a, RW, E>;
-    type Error = libmdbx::Error;
-
-    // Open a Read-Only transaction
-    fn tx(&'a self) -> Result<Self::TX, Self::Error> {
-        self.begin_ro_txn()
-    }
-
-    // Open a Read-Write transaction
-    fn tx_mut(&'a self) -> Result<Self::TXMut, Self::Error> {
-        self.begin_rw_txn()
-    }
-
-    // Open the database with the given path
-    fn open(path: std::path::PathBuf) -> Result<Self, Self::Error> {
-        let db: libmdbx::Database<E> = libmdbx::Database::new()
-            .set_flags(DatabaseFlags::from(Mode::ReadWrite {
-                sync_mode: MDBX_DEFAULT_SYNC_MODE,
-            }))
-            .set_geometry(MDBX_GEOMETRY)
-            .set_max_readers(32)
-            .set_max_tables(15)
-            .open(path.as_path())?;
-
-        Ok(db)
-    }
-
-    // Open each tables to verify if the database is complete.
-    fn check_all_tables_exist(&'a self) -> Result<(), Self::Error> {
-        let ro_tx = self.begin_ro_txn()?;
-        // ----- BLOCKS -----
-        ro_tx.open_table(Some(table::blockhash::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::blockmetadata::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::blocks::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::altblock::TABLE_NAME))?;
-        // ------ TXNs ------
-        ro_tx.open_table(Some(table::txspruned::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::txsprunablehash::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::txsprunabletip::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::txsprunable::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::txsoutputs::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::txsidentifier::TABLE_NAME))?;
-        // ---- OUTPUTS -----
-        ro_tx.open_table(Some(table::prerctoutputmetadata::TABLE_NAME))?;
-        ro_tx.open_table(Some(table::outputmetadata::TABLE_NAME))?;
-        // ---- SPT KEYS ----
-        ro_tx.open_table(Some(table::spentkeys::TABLE_NAME))?;
-        // --- PROPERTIES ---
-        ro_tx.open_table(Some(table::properties::TABLE_NAME))?;
-
-        Ok(())
-    }
-
-    // Construct the table of the database
-    fn build(&'a self) -> Result<(), Self::Error> {
-        let rw_tx = self.begin_rw_txn()?;
-
-        // Constructing the tables
-        // ----- BLOCKS -----
-        rw_tx.create_table(
-            Some(table::blockhash::TABLE_NAME),
-            TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
-        )?;
-        rw_tx.create_table(
-            Some(table::blockmetadata::TABLE_NAME),
-            TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
-        )?;
-        rw_tx.create_table(Some(table::blocks::TABLE_NAME), TableFlags::INTEGER_KEY)?;
-        rw_tx.create_table(Some(table::altblock::TABLE_NAME), TableFlags::INTEGER_KEY)?;
-        // ------ TXNs ------
-        rw_tx.create_table(Some(table::txspruned::TABLE_NAME), TableFlags::INTEGER_KEY)?;
-        rw_tx.create_table(
-            Some(table::txsprunable::TABLE_NAME),
-            TableFlags::INTEGER_KEY,
-        )?;
-        rw_tx.create_table(
-            Some(table::txsprunablehash::TABLE_NAME),
-            TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
-        )?;
-        rw_tx.create_table(
-            Some(table::txsprunabletip::TABLE_NAME),
-            TableFlags::INTEGER_KEY,
-        )?;
-        rw_tx.create_table(
-            Some(table::txsoutputs::TABLE_NAME),
-            TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
-        )?;
-        rw_tx.create_table(
-            Some(table::txsidentifier::TABLE_NAME),
-            TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
-        )?;
-        // ---- OUTPUTS -----
-        rw_tx.create_table(
-            Some(table::prerctoutputmetadata::TABLE_NAME),
-            TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
-        )?;
-        rw_tx.create_table(
-            Some(table::outputmetadata::TABLE_NAME),
-            TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
-        )?;
-        // ---- SPT KEYS ----
-        rw_tx.create_table(
-            Some(table::spentkeys::TABLE_NAME),
-            TableFlags::INTEGER_KEY | TableFlags::DUP_FIXED | TableFlags::DUP_SORT,
-        )?;
-        // --- PROPERTIES ---
-        rw_tx.create_table(Some(table::properties::TABLE_NAME), TableFlags::INTEGER_KEY)?;
-
-        rw_tx.commit()?;
-        Ok(())
-    }
-}
-
-// Implementation of the Cursor trait for mdbx's Cursors
-impl<'a, T, R> crate::transaction::Cursor<'a, T> for Cursor<'a, R>
-where
-    T: Table,
-    R: TransactionKind,
-{
-    fn first(&mut self) -> Result<Option<(T::Key, T::Value)>, DB_FAILURES> {
-        let pair = self
-            .first::<Vec<u8>, Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        cursor_pair_decode(pair)
-    }
-
-    fn get_cursor(
-        &mut self,
-    ) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
-        let pair = self
-            .get_current::<Vec<u8>, Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        cursor_pair_decode(pair)
-    }
-
-    fn last(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
-        let pair = self
-            .last::<Vec<u8>, Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        cursor_pair_decode(pair)
-    }
-
-    fn next(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
-        let pair = self
-            .next::<Vec<u8>, Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        cursor_pair_decode(pair)
-    }
-
-    fn prev(&mut self) -> Result<Option<(<T as Table>::Key, <T as Table>::Value)>, DB_FAILURES> {
-        let pair = self
-            .prev::<Vec<u8>, Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        cursor_pair_decode(pair)
-    }
-
-    fn set(&mut self, key: &T::Key) -> Result<Option<<T as Table>::Value>, DB_FAILURES> {
-        let encoded_key = mdbx_encode(key)?;
-
-        let value = self
-            .set::<Vec<u8>>(&encoded_key)
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        if let Some(value) = value {
-            return Ok(Some(mdbx_decode(value.as_slice())?.0));
-        }
-        Ok(None)
-    }
-}
-
-// Implementation of the DupCursor trait for mdbx's Cursors
-impl<'t, T, R> crate::transaction::DupCursor<'t, T> for Cursor<'t, R>
-where
-    R: TransactionKind,
-    T: DupTable,
-{
-    fn first_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES> {
-        let value = self
-            .first_dup::<Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        if let Some(value) = value {
-            return Ok(Some(mdbx_decode(value.as_slice())?.0));
-        }
-        Ok(None)
-    }
-
-    fn get_dup(
-        &mut self,
-        key: &T::Key,
-        subkey: &T::SubKey,
-    ) -> Result<Option<<T>::Value>, DB_FAILURES> {
-        let (encoded_key, encoded_subkey) = (mdbx_encode(key)?, mdbx_encode(subkey)?);
-
-        let value = self
-            .get_both::<Vec<u8>>(&encoded_key, &encoded_subkey)
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        if let Some(value) = value {
-            return Ok(Some(mdbx_decode(value.as_slice())?.0));
-        }
-        Ok(None)
-    }
-
-    fn last_dup(&mut self) -> Result<Option<(T::SubKey, T::Value)>, DB_FAILURES> {
-        let value = self
-            .last_dup::<Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        if let Some(value) = value {
-            return Ok(Some(mdbx_decode(value.as_slice())?.0));
-        }
-        Ok(None)
-    }
-
-    fn next_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES> {
-        let pair = self
-            .next_dup::<Vec<u8>, Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        if let Some(pair) = pair {
-            let (decoded_key, decoded_value) = (
-                mdbx_decode(pair.0.as_slice())?,
-                mdbx_decode(pair.1.as_slice())?,
-            );
-            return Ok(Some((decoded_key.0, decoded_value.0)));
-        }
-        Ok(None)
-    }
-
-    fn prev_dup(&mut self) -> Result<Option<(T::Key, (T::SubKey, T::Value))>, DB_FAILURES> {
-        let pair = self
-            .prev_dup::<Vec<u8>, Vec<u8>>()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        if let Some(pair) = pair {
-            let (decoded_key, decoded_value) = (
-                mdbx_decode(pair.0.as_slice())?,
-                mdbx_decode(pair.1.as_slice())?,
-            );
-            return Ok(Some((decoded_key.0, decoded_value.0)));
-        }
-        Ok(None)
-    }
-}
-
-// Implementation of the WriteCursor trait for mdbx's Cursors in RW permission
-impl<'a, T> crate::transaction::WriteCursor<'a, T> for Cursor<'a, RW>
-where
-    T: Table,
-{
-    fn put_cursor(&mut self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES> {
-        let (encoded_key, encoded_value) = (mdbx_encode(key)?, mdbx_encode(value)?);
-
-        self.put(&encoded_key, &encoded_value, WriteFlags::empty())
-            .map_err(Into::into)
-    }
-
-    fn del(&mut self) -> Result<(), DB_FAILURES> {
-        self.del(WriteFlags::empty()).map_err(Into::into)
-    }
-}
-
-// Implementation of the DupWriteCursor trait for mdbx's Cursors in RW permission
-impl<'a, T> crate::transaction::DupWriteCursor<'a, T> for Cursor<'a, RW>
-where
-    T: DupTable,
-{
-    fn put_cursor_dup(
-        &mut self,
-        key: &<T>::Key,
-        subkey: &<T as DupTable>::SubKey,
-        value: &<T>::Value,
-    ) -> Result<(), DB_FAILURES> {
-        let (encoded_key, mut encoded_subkey, mut encoded_value) =
-            (mdbx_encode(key)?, mdbx_encode(subkey)?, mdbx_encode(value)?);
-        encoded_subkey.append(&mut encoded_value);
-
-        self.put(
-            encoded_key.as_slice(),
-            encoded_subkey.as_slice(),
-            WriteFlags::empty(),
-        )
-        .map_err(Into::into)
-    }
-
-    fn del_nodup(&mut self) -> Result<(), DB_FAILURES> {
-        self.del(WriteFlags::NO_DUP_DATA).map_err(Into::into)
-    }
-}
-
-// Implementation of the Transaction trait for mdbx's Transactions
-impl<'a, E, R: TransactionKind> Transaction<'a> for libmdbx::Transaction<'_, R, E>
-where
-    E: DatabaseKind,
-{
-    type Cursor<T: Table> = Cursor<'a, R>;
-    type DupCursor<T: DupTable> = Cursor<'a, R>;
-
-    fn get<T: Table>(&self, key: &T::Key) -> Result<Option<T::Value>, DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-
-        let encoded_key = mdbx_encode(key)?;
-
-        let value = self
-            .get::<Vec<u8>>(&table, &encoded_key)
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-        if let Some(value) = value {
-            return Ok(Some(mdbx_decode(value.as_slice())?.0));
-        }
-        Ok(None)
-    }
-
-    fn cursor<T: Table>(&self) -> Result<Self::Cursor<T>, DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-
-        self.cursor(&table).map_err(Into::into)
-    }
-
-    fn commit(self) -> Result<(), DB_FAILURES> {
-        let b = self
-            .commit()
-            .map_err(std::convert::Into::<DB_FAILURES>::into)?;
-
-        if b {
-            Ok(())
-        } else {
-            Err(DB_FAILURES::FailedToCommit)
-        }
-    }
-
-    fn cursor_dup<T: DupTable>(&self) -> Result<Self::DupCursor<T>, DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-
-        self.cursor(&table).map_err(Into::into)
-    }
-
-    fn num_entries<T: Table>(&self) -> Result<usize, DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-        let stat = self.table_stat(&table)?;
-
-        Ok(stat.entries())
-    }
-}
-
-// Implementation of the Transaction trait for mdbx's Transactions with RW permissions
-impl<'a, E> WriteTransaction<'a> for libmdbx::Transaction<'a, RW, E>
-where
-    E: DatabaseKind,
-{
-    type WriteCursor<T: Table> = Cursor<'a, RW>;
-    type DupWriteCursor<T: DupTable> = Cursor<'a, RW>;
-
-    fn put<T: Table>(&self, key: &T::Key, value: &T::Value) -> Result<(), DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-
-        let (encoded_key, encoded_value) = (mdbx_encode(key)?, mdbx_encode(value)?);
-
-        self.put(&table, encoded_key, encoded_value, WriteFlags::empty())
-            .map_err(Into::into)
-    }
-
-    fn delete<T: Table>(&self, key: &T::Key, value: &Option<T::Value>) -> Result<(), DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-
-        let encoded_key = mdbx_encode(key)?;
-        if let Some(value) = value {
-            let encoded_value = mdbx_encode(value)?;
-
-            return self
-                .del(&table, encoded_key, Some(encoded_value.as_slice()))
-                .map(|_| ())
-                .map_err(Into::into);
-        }
-        self.del(&table, encoded_key, None)
-            .map(|_| ())
-            .map_err(Into::into)
-    }
-
-    fn clear<T: Table>(&self) -> Result<(), DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-
-        self.clear_table(&table).map_err(Into::into)
-    }
-
-    fn write_cursor<T: Table>(&self) -> Result<Self::WriteCursor<T>, DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-
-        self.cursor(&table).map_err(Into::into)
-    }
-
-    fn write_cursor_dup<T: DupTable>(&self) -> Result<Self::DupWriteCursor<T>, DB_FAILURES> {
-        let table = mdbx_open_table::<_, _, T>(self)?;
-
-        self.cursor(&table).map_err(Into::into)
-    }
-}
diff --git a/old_database/src/table.rs b/old_database/src/table.rs
deleted file mode 100644
index 0b2f38ac..00000000
--- a/old_database/src/table.rs
+++ /dev/null
@@ -1,181 +0,0 @@
-//! ### Table module
-//! This module contains the definition of the [`Table`] and [`DupTable`] trait, and the actual tables used in the database.
-//! [`DupTable`] are just a trait used to define that they support DUPSORT|DUPFIXED operation (as of now we don't know the equivalent for HSE).
-//! All tables are defined with docs explaining its purpose, what types are the key and data.
-//! For more details please look at Cuprate's book : <link to cuprate book>
-
-use crate::{
-    encoding::Compat,
-    types::{
-        /*OutTx,*/ AltBlock, BlockMetadata, /*RctOutkey,*/ OutputMetadata,
-        TransactionPruned, TxIndex, /*OutAmountIdx,*/ /*KeyImage,*/ TxOutputIdx,
-    },
-};
-use bincode::{de::Decode, enc::Encode};
-use monero::{blockdata::transaction::KeyImage, Block, Hash};
-
-/// A trait implementing a table interaction for the database. It is implemented to an empty struct to specify the name and table's associated types. These associated
-/// types are used to simplify deserialization process.
-pub trait Table: Send + Sync + 'static + Clone {
-    // name of the table
-    const TABLE_NAME: &'static str;
-
-    // Definition of a key & value types of the database
-    type Key: Encode + Decode;
-    type Value: Encode + Decode;
-}
-
-/// A trait implementing a table with duplicated data support.
-pub trait DupTable: Table {
-    // Subkey of the table (prefix of the data)
-    type SubKey: Encode + Decode;
-}
-
-/// This declarative macro declare a new empty struct and impl the specified name, and corresponding types.
-macro_rules! impl_table {
-	( $(#[$docs:meta])* $table:ident , $key:ty , $value:ty ) => {
-        #[derive(Clone)]
-		$(#[$docs])*
-		pub(crate) struct $table;
-
-   		impl Table for $table {
-	 		const TABLE_NAME: &'static str = stringify!($table);
-        	type Key = $key;
-            type Value = $value;
-        }
-	};
-}
-
-/// This declarative macro declare extend the original impl_table! macro by implementy DupTable trait.
-macro_rules! impl_duptable {
-	($(#[$docs:meta])* $table:ident, $key:ty, $subkey:ty, $value:ty) => {
-		impl_table!($(#[$docs])* $table, $key, $value);
-
-		impl DupTable for $table {
-			type SubKey = $subkey;
-		}
-	};
-}
-
-// ------------------------------------------|      Tables definition    |------------------------------------------
-
-// ----- BLOCKS -----
-
-impl_duptable!(
-    /// `blockhash` is table defining a relation between the hash of a block and its height. Its primary use is to quickly find block's hash by its height.
-    blockhash,
-    (),
-    Compat<Hash>,
-    u64
-);
-
-impl_duptable!(
-    /// `blockmetadata` store block metadata alongside their corresponding Hash. The blocks metadata can contains the total_coins_generated, weight, long_term_block_weight & cumulative RingCT
-    blockmetadata,
-    (),
-    u64,
-    BlockMetadata
-);
-
-impl_table!(
-    /// `blockbody` store blocks' bodies along their Hash. The blocks body contains the coinbase transaction and its corresponding mined transactions' hashes.
-    blocks,
-    u64,
-    Compat<Block>
-);
-
-/*
-impl_table!(
-    /// `blockhfversion` keep track of block's hard fork version. If an outdated node continue to run after a hard fork, it needs to know, after updating, what blocks needs to be update.
-    blockhfversion, u64, u8);
-*/
-
-impl_table!(
-    /// `altblock` is a table that permits the storage of blocks from an alternative chain, which may cause a re-org. These blocks can be fetch by their corresponding hash.
-    altblock,
-    Compat<Hash>,
-    AltBlock
-);
-
-// ------- TXNs -------
-
-impl_table!(
-    /// `txspruned` is table storing TransactionPruned (or Pruned Tx). These can be fetch by the corresponding Transaction ID.
-    txspruned,
-    u64,
-    TransactionPruned
-);
-
-impl_table!(
-    /// `txsprunable` is a table storing the Prunable part of transactions (Signatures and RctSig), stored as raw bytes. These can be fetch by the corresponding Transaction ID.
-    txsprunable,
-    u64,
-    Vec<u8>
-);
-
-impl_duptable!(
-    /// `txsprunablehash` is a table storing hashes of prunable part of transactions. These hash can be fetch by the corresponding Transaction ID.
-    txsprunablehash,
-    u64,
-    (),
-    Compat<Hash>
-);
-
-impl_table!(
-    /// `txsprunabletip` is a table used for optimization purpose. It defines at which block's height this transaction belong as long as the block is with Tip blocks. These can be fetch by the corresponding Transaction ID.
-    txsprunabletip,
-    u64,
-    u64
-);
-
-impl_duptable!(
-    /// `txsoutputs` is a table storing output indices used in a transaction. These can be fetch by the corresponding Transaction ID.
-    txsoutputs,
-    u64,
-    (),
-    TxOutputIdx
-);
-
-impl_duptable!(
-    /// `txsidentifier` is a table defining a relation between the hash of a transaction and its transaction Indexes. Its primarily used to quickly find tx's ID by its hash.
-    txsidentifier,
-    Compat<Hash>,
-    (),
-    TxIndex
-);
-
-// ---- OUTPUTS ----
-
-impl_duptable!(
-    /// `prerctoutputmetadata` is a duplicated table storing Pre-RingCT output's metadata. The key is the amount of this output, and the subkey is its amount idx.
-    prerctoutputmetadata,
-    u64,
-    u64,
-    OutputMetadata
-);
-impl_duptable!(
-    /// `prerctoutputmetadata` is a table storing RingCT output's metadata. The key is the amount idx of this output since amount is always 0 for RingCT outputs.
-    outputmetadata,
-    (),
-    u64,
-    OutputMetadata
-);
-
-// ---- SPT KEYS ----
-
-impl_duptable!(
-    /// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
-    spentkeys,
-    (),
-    Compat<KeyImage>,
-    ()
-);
-
-// ---- PROPERTIES ----
-
-impl_table!(
-    /// `spentkeys`is a table storing every KeyImage that have been used to create decoys input. As these KeyImage can't be re used they need to marked.
-    properties,
-    u32,
-    u32
-);
diff --git a/old_database/src/types.rs b/old_database/src/types.rs
deleted file mode 100644
index f4f806ae..00000000
--- a/old_database/src/types.rs
+++ /dev/null
@@ -1,516 +0,0 @@
-//! ### Types module
-//! This module contains definition and implementations of some of the structures stored in the database.
-//! Some of these types are just Wrapper for convenience or re-definition of `monero-rs` database type (see Boog900/monero-rs, "db" branch)
-//! Since the database do not use dummy keys, these redefined structs are the same as monerod without the prefix data used as a key.
-//! All these types implement [`bincode::Encode`] and [`bincode::Decode`]. They can store `monero-rs` types in their field. In this case, these field
-//! use the [`Compat<T>`] wrapper.
-
-use crate::encoding::{Compat, ReaderCompat};
-use bincode::{enc::write::Writer, Decode, Encode};
-use monero::{
-    consensus::{encode, Decodable},
-    util::ringct::{Key, RctSig, RctSigBase, RctSigPrunable, RctType, Signature},
-    Block, Hash, PublicKey, Transaction, TransactionPrefix, TxIn,
-};
-
-// ---- BLOCKS ----
-
-#[derive(Clone, Debug, Encode, Decode)]
-/// [`BlockMetadata`] is a struct containing metadata of a block such as  the block's `timestamp`, the `total_coins_generated` at this height, its `weight`, its difficulty (`diff_lo`)
-/// and cumulative difficulty (`diff_hi`), the `block_hash`, the cumulative RingCT (`cum_rct`) and its long term weight (`long_term_block_weight`). The monerod's struct equivalent is `mdb_block_info_4`
-/// This struct is used in [`crate::table::blockmetadata`] table.
-pub struct BlockMetadata {
-    /// Block's timestamp (the time at which it started to be mined)
-    pub timestamp: u64,
-    /// Total monero supply, this block included
-    pub total_coins_generated: u64,
-    /// Block's weight (sum of all transactions weights)
-    pub weight: u64,
-    /// Block's cumulative_difficulty. In monerod this field would have been split into two `u64`, since cpp don't support *natively* `uint128_t`/`u128`
-    pub cumulative_difficulty: u128,
-    /// Block's hash
-    pub block_hash: Compat<Hash>,
-    /// Cumulative number of RingCT outputs up to this block
-    pub cum_rct: u64,
-    /// Block's long term weight
-    pub long_term_block_weight: u64,
-}
-
-#[derive(Clone, Debug, Encode, Decode)]
-/// [`AltBlock`] is a struct containing an alternative `block` (defining an alternative mainchain) and its metadata (`block_height`, `cumulative_weight`,
-/// `cumulative_difficulty_low`, `cumulative_difficulty_high`, `already_generated_coins`).
-/// This struct is used in [`crate::table::altblock`] table.
-pub struct AltBlock {
-    /// Alternative block's height.
-    pub height: u64,
-    /// Cumulative weight median at this block
-    pub cumulative_weight: u64,
-    /// Cumulative difficulty
-    pub cumulative_difficulty: u128,
-    /// Total generated coins excluding this block's coinbase reward + fees
-    pub already_generated_coins: u64,
-    /// Actual block data, with Prefix and Transactions.
-    /// It is worth noting that monerod implementation do not contain the block in its struct, but still append it at the end of metadata.
-    pub block: Compat<Block>,
-}
-
-// ---- TRANSACTIONS ----
-
-#[derive(Clone, Debug)]
-/// [`TransactionPruned`] is, as its name suggest, the pruned part of a transaction, which is the Transaction Prefix and its RingCT ring.
-/// This struct is used in the [`crate::table::txsprefix`] table.
-pub struct TransactionPruned {
-    /// The transaction prefix.
-    pub prefix: TransactionPrefix,
-    /// The RingCT ring, will only contain the 'sig' field.
-    pub rct_signatures: RctSig,
-}
-
-impl bincode::Decode for TransactionPruned {
-    fn decode<D: bincode::de::Decoder>(
-        decoder: &mut D,
-    ) -> Result<Self, bincode::error::DecodeError> {
-        let mut r = ReaderCompat(decoder.reader());
-
-        // We first decode the TransactionPrefix and get the n° of inputs/outputs
-        let prefix: TransactionPrefix = Decodable::consensus_decode(&mut r)
-            .map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?;
-
-        let (inputs, outputs) = (prefix.inputs.len(), prefix.outputs.len());
-
-        // Handle the prefix accordingly to its version
-        match *prefix.version {
-            // First transaction format, Pre-RingCT, so the ring are None
-            1 => Ok(TransactionPruned {
-                prefix,
-                rct_signatures: RctSig { sig: None, p: None },
-            }),
-            _ => {
-                let mut rct_signatures = RctSig { sig: None, p: None };
-                // No inputs so no RingCT
-                if inputs == 0 {
-                    return Ok(TransactionPruned {
-                        prefix,
-                        rct_signatures,
-                    });
-                }
-                // Otherwise get the RingCT ring for the tx inputs
-                if let Some(sig) = RctSigBase::consensus_decode(&mut r, inputs, outputs)
-                    .map_err(|_| bincode::error::DecodeError::Other("Monero-rs decoding failed"))?
-                {
-                    rct_signatures = RctSig {
-                        sig: Some(sig),
-                        p: None,
-                    };
-                }
-                // And we return it
-                Ok(TransactionPruned {
-                    prefix,
-                    rct_signatures,
-                })
-            }
-        }
-    }
-}
-
-impl bincode::Encode for TransactionPruned {
-    fn encode<E: bincode::enc::Encoder>(
-        &self,
-        encoder: &mut E,
-    ) -> Result<(), bincode::error::EncodeError> {
-        let writer = encoder.writer();
-        // Encoding the Transaction prefix first
-        let buf = monero::consensus::serialize(&self.prefix);
-        writer.write(&buf)?;
-        match *self.prefix.version {
-            1 => {} // First transaction format, Pre-RingCT, so the there is no Rct ring to add
-            _ => {
-                if let Some(sig) = &self.rct_signatures.sig {
-                    // If there is ring then we append it at the end
-                    let buf = monero::consensus::serialize(sig);
-                    writer.write(&buf)?;
-                }
-            }
-        }
-        Ok(())
-    }
-}
-
-impl TransactionPruned {
-    /// Turns a pruned transaction to a normal transaction with the missing pruned data
-    pub fn into_transaction(self, prunable: &[u8]) -> Result<Transaction, encode::Error> {
-        let mut r = std::io::Cursor::new(prunable);
-        match *self.prefix.version {
-            // Pre-RingCT transactions
-            1 => {
-                let signatures: Result<Vec<Vec<Signature>>, encode::Error> = self
-                    .prefix
-                    .inputs
-                    .iter()
-                    .filter_map(|input| match input {
-                        TxIn::ToKey { key_offsets, .. } => {
-                            let sigs: Result<Vec<Signature>, encode::Error> = key_offsets
-                                .iter()
-                                .map(|_| Decodable::consensus_decode(&mut r))
-                                .collect();
-                            Some(sigs)
-                        }
-                        _ => None,
-                    })
-                    .collect();
-                Ok(Transaction {
-                    prefix: self.prefix,
-                    signatures: signatures?,
-                    rct_signatures: RctSig { sig: None, p: None },
-                })
-            }
-            // Post-RingCT Transactions
-            _ => {
-                let signatures = Vec::new();
-                let mut rct_signatures = RctSig { sig: None, p: None };
-                if self.prefix.inputs.is_empty() {
-                    return Ok(Transaction {
-                        prefix: self.prefix,
-                        signatures,
-                        rct_signatures: RctSig { sig: None, p: None },
-                    });
-                }
-                if let Some(sig) = self.rct_signatures.sig {
-                    let p = {
-                        if sig.rct_type != RctType::Null {
-                            let mixin_size = if !self.prefix.inputs.is_empty() {
-                                match &self.prefix.inputs[0] {
-                                    TxIn::ToKey { key_offsets, .. } => key_offsets.len() - 1,
-                                    _ => 0,
-                                }
-                            } else {
-                                0
-                            };
-                            RctSigPrunable::consensus_decode(
-                                &mut r,
-                                sig.rct_type,
-                                self.prefix.inputs.len(),
-                                self.prefix.outputs.len(),
-                                mixin_size,
-                            )?
-                        } else {
-                            None
-                        }
-                    };
-                    rct_signatures = RctSig { sig: Some(sig), p };
-                }
-                Ok(Transaction {
-                    prefix: self.prefix,
-                    signatures,
-                    rct_signatures,
-                })
-            }
-        }
-    }
-}
-
-pub fn get_transaction_prunable_blob<W: std::io::Write + ?Sized>(
-    tx: &monero::Transaction,
-    w: &mut W,
-) -> Result<usize, std::io::Error> {
-    let mut len = 0;
-    match tx.prefix.version.0 {
-        1 => {
-            for sig in tx.signatures.iter() {
-                for c in sig {
-                    len += monero::consensus::encode::Encodable::consensus_encode(c, w)?;
-                }
-            }
-        }
-        _ => {
-            if let Some(sig) = &tx.rct_signatures.sig {
-                if let Some(p) = &tx.rct_signatures.p {
-                    len += p.consensus_encode(w, sig.rct_type)?;
-                }
-            }
-        }
-    }
-    Ok(len)
-}
-
-pub fn calculate_prunable_hash(tx: &monero::Transaction, tx_prunable_blob: &[u8]) -> Option<Hash> {
-    // V1 transaction don't have prunable hash
-    if tx.prefix.version.0 == 1 {
-        return None;
-    }
-
-    // Checking if it's a miner tx
-    if let TxIn::Gen { height: _ } = &tx.prefix.inputs[0] {
-        if tx.prefix.inputs.len() == 1 {
-            // Returning miner tx's empty hash
-            return Some(Hash::from_slice(&[
-                0x70, 0xa4, 0x85, 0x5d, 0x04, 0xd8, 0xfa, 0x7b, 0x3b, 0x27, 0x82, 0xca, 0x53, 0xb6,
-                0x00, 0xe5, 0xc0, 0x03, 0xc7, 0xdc, 0xb2, 0x7d, 0x7e, 0x92, 0x3c, 0x23, 0xf7, 0x86,
-                0x01, 0x46, 0xd2, 0xc5,
-            ]));
-        }
-    };
-
-    // Calculating the hash
-    Some(Hash::new(tx_prunable_blob))
-}
-
-#[derive(Clone, Debug, Encode, Decode)]
-/// [`TxIndex`] is a struct used in the [`crate::table::txsidentifier`]. It store the `unlock_time` of a transaction, the `height` of the block
-/// whose transaction belong to and the Transaction ID (`tx_id`)
-pub struct TxIndex {
-    /// Transaction ID
-    pub tx_id: u64,
-    /// The unlock time of this transaction (the height at which it is unlocked, it is not a timestamp)
-    pub unlock_time: u64,
-    /// The height of the block whose transaction belong to
-    pub height: u64, // TODO USELESS already in txs_prunable_tip
-}
-
-#[derive(Clone, Debug, Encode, Decode)]
-/// [`TxOutputIdx`] is a single-tuple struct used to contain the indexes (amount and amount indices) of the transactions outputs. It is defined for more clarity on its role.
-/// This struct is used in [`crate::table::txsoutputs`] table.
-pub struct TxOutputIdx(pub Vec<u64>);
-
-// ---- OUTPUTS ----
-
-#[derive(Clone, Debug, Encode, Decode)]
-/// [`RctOutkey`] is a struct containing RingCT metadata and an output ID. It is equivalent to the `output_data_t` struct in monerod
-/// This struct is used in [`crate::table::outputamounts`]
-pub struct RctOutkey {
-    // /// amount_index
-    //pub amount_index: u64,
-    /// The output's ID
-    pub output_id: u64,
-    /// The output's public key (for spend verification)
-    pub pubkey: Compat<PublicKey>,
-    /// The output's unlock time (the height at which it is unlocked, it is not a timestamp)
-    pub unlock_time: u64,
-    /// The height of the block which used this output
-    pub height: u64,
-    /// The output's amount commitment (for spend verification)
-    /// For compatibility with Pre-RingCT outputs, this field is an option. In fact, monerod distinguish between `pre_rct_output_data_t` and `output_data_t` field like that :
-    /// ```cpp
-    /// // This MUST be identical to output_data_t, without the extra rct data at the end
-    /// struct pre_rct_output_data_t
-    /// ```
-    pub commitment: Option<Compat<Key>>,
-}
-
-#[derive(Clone, Debug, Encode, Decode)]
-/// [`OutputMetadata`] is a struct containing Outputs Metadata. It is used in [`crate::table::outputmetadata`]. It is a struct merging the
-/// `out_tx_index` tuple with `output_data_t` structure in monerod, without the output ID.
-pub struct OutputMetadata {
-    pub tx_hash: Compat<Hash>,
-
-    pub local_index: u64,
-
-    pub pubkey: Option<Compat<PublicKey>>,
-
-    pub unlock_time: u64,
-
-    pub height: u64,
-
-    pub commitment: Option<Compat<Key>>,
-}
-
-//#[derive(Clone, Debug, Encode, Decode)]
-//// [`OutAmountIdx`] is a struct tuple used to contain the two keys used in [`crate::table::outputamounts`] table.
-//// In monerod, the database key is the amount while the *cursor key* (the amount index) is the prefix of the actual data being returned.
-//// As we prefer to note use cursor with partial data, we prefer to concat these two into a unique key
-//pub struct OutAmountIdx(u64,u64);
-// MAYBE NOT FINALLY
-
-//#[derive(Clone, Debug, Encode, Decode)]
-// /// [`OutTx`] is a struct containing the hash of the transaction whose output belongs to, and the local index of this output.
-// /// This struct is used in [`crate::table::outputinherit`].
-/*pub struct OutTx {
-    /// Output's transaction hash
-    pub tx_hash: Compat<Hash>,
-    /// Local index of the output
-    pub local_index: u64,
-}*/
-
-#[cfg(test)]
-mod tests {
-    use monero::Hash;
-
-    use super::get_transaction_prunable_blob;
-
-    #[test]
-    fn calculate_tx_prunable_hash() {
-        let prunable_blob: Vec<u8> = vec![
-            1, 113, 10, 7, 87, 70, 119, 97, 244, 126, 155, 133, 254, 167, 60, 204, 134, 45, 71, 17,
-            87, 21, 252, 8, 218, 233, 219, 192, 84, 181, 196, 74, 213, 2, 246, 222, 66, 45, 152,
-            159, 156, 19, 224, 251, 110, 154, 188, 91, 129, 53, 251, 82, 134, 46, 93, 119, 136, 35,
-            13, 190, 235, 231, 44, 183, 134, 221, 12, 131, 222, 209, 246, 52, 14, 33, 94, 173, 251,
-            233, 18, 154, 91, 72, 229, 180, 43, 35, 152, 130, 38, 82, 56, 179, 36, 168, 54, 41, 62,
-            49, 208, 35, 245, 29, 27, 81, 72, 140, 104, 4, 59, 22, 120, 252, 67, 197, 130, 245, 93,
-            100, 129, 134, 19, 137, 228, 237, 166, 89, 5, 42, 1, 110, 139, 39, 81, 89, 159, 40,
-            239, 211, 251, 108, 82, 68, 125, 182, 75, 152, 129, 74, 73, 208, 215, 15, 63, 3, 106,
-            168, 35, 56, 126, 66, 2, 189, 53, 201, 77, 187, 102, 127, 154, 60, 209, 33, 217, 109,
-            81, 217, 183, 252, 114, 90, 245, 21, 229, 174, 254, 177, 147, 130, 74, 49, 118, 203,
-            14, 7, 118, 221, 81, 181, 78, 97, 224, 76, 160, 134, 73, 206, 204, 199, 201, 30, 201,
-            77, 4, 78, 237, 167, 76, 92, 104, 247, 247, 203, 141, 243, 72, 52, 83, 61, 35, 147,
-            231, 124, 21, 115, 81, 83, 67, 222, 61, 225, 171, 66, 243, 185, 195, 51, 72, 243, 80,
-            104, 4, 166, 54, 199, 235, 193, 175, 4, 242, 42, 146, 170, 90, 212, 101, 208, 113, 58,
-            65, 121, 55, 179, 206, 92, 50, 94, 171, 33, 67, 108, 220, 19, 193, 155, 30, 58, 46, 9,
-            227, 48, 246, 187, 82, 230, 61, 64, 95, 197, 183, 150, 62, 203, 252, 36, 157, 135, 160,
-            120, 189, 52, 94, 186, 93, 5, 36, 120, 160, 62, 254, 178, 101, 11, 228, 63, 128, 249,
-            182, 56, 100, 9, 5, 2, 81, 243, 229, 245, 43, 234, 35, 216, 212, 46, 165, 251, 183,
-            133, 10, 76, 172, 95, 106, 231, 13, 216, 222, 15, 92, 122, 103, 68, 238, 190, 108, 124,
-            138, 62, 255, 243, 22, 209, 2, 138, 45, 178, 101, 240, 18, 186, 71, 239, 137, 191, 134,
-            128, 221, 181, 173, 242, 111, 117, 45, 255, 138, 101, 79, 242, 42, 4, 144, 245, 193,
-            79, 14, 44, 201, 223, 0, 193, 123, 75, 155, 140, 248, 0, 226, 246, 230, 126, 7, 32,
-            107, 173, 193, 206, 184, 11, 33, 148, 104, 32, 79, 149, 71, 68, 150, 6, 47, 90, 231,
-            151, 14, 121, 196, 169, 249, 117, 154, 167, 139, 103, 62, 97, 250, 131, 160, 92, 239,
-            18, 236, 110, 184, 102, 30, 194, 175, 243, 145, 169, 183, 163, 141, 244, 186, 172, 251,
-            3, 78, 165, 33, 12, 2, 136, 180, 178, 83, 117, 0, 184, 170, 255, 69, 131, 123, 8, 212,
-            158, 162, 119, 137, 146, 63, 95, 133, 186, 91, 255, 152, 187, 107, 113, 147, 51, 219,
-            207, 5, 160, 169, 97, 9, 1, 202, 152, 186, 128, 160, 110, 120, 7, 176, 103, 87, 30,
-            137, 240, 67, 55, 79, 147, 223, 45, 177, 210, 101, 225, 22, 25, 129, 111, 101, 21, 213,
-            20, 254, 36, 57, 67, 70, 93, 192, 11, 180, 75, 99, 185, 77, 75, 74, 63, 182, 183, 208,
-            16, 69, 237, 96, 76, 96, 212, 242, 6, 169, 14, 250, 168, 129, 18, 141, 240, 101, 196,
-            96, 120, 88, 90, 51, 77, 12, 133, 212, 192, 107, 131, 238, 34, 237, 93, 157, 108, 13,
-            255, 187, 163, 106, 148, 108, 105, 244, 243, 174, 189, 180, 48, 102, 57, 170, 118, 211,
-            110, 126, 222, 165, 93, 36, 157, 90, 14, 135, 184, 197, 185, 7, 99, 199, 224, 225, 243,
-            212, 116, 149, 137, 186, 16, 196, 73, 23, 11, 248, 248, 67, 167, 149, 154, 64, 76, 218,
-            119, 135, 239, 34, 48, 66, 57, 109, 246, 3, 141, 169, 42, 157, 222, 21, 40, 183, 168,
-            97, 195, 106, 244, 229, 61, 122, 136, 59, 255, 120, 86, 30, 63, 226, 18, 65, 218, 188,
-            195, 217, 85, 12, 211, 221, 188, 27, 8, 98, 103, 211, 213, 217, 65, 82, 229, 145, 80,
-            147, 220, 57, 143, 20, 189, 253, 106, 13, 21, 170, 60, 24, 48, 162, 234, 0, 240, 226,
-            4, 28, 76, 93, 56, 3, 187, 223, 58, 31, 184, 58, 234, 198, 140, 223, 217, 1, 147, 94,
-            218, 199, 154, 121, 137, 44, 229, 0, 1, 10, 133, 250, 140, 64, 150, 89, 64, 112, 178,
-            221, 87, 19, 24, 104, 252, 28, 65, 207, 28, 195, 217, 73, 12, 16, 83, 55, 199, 84, 117,
-            175, 123, 13, 234, 10, 54, 63, 245, 161, 74, 235, 92, 189, 247, 47, 62, 176, 41, 159,
-            40, 250, 116, 63, 33, 193, 78, 72, 29, 215, 9, 191, 233, 243, 87, 14, 195, 7, 89, 101,
-            0, 28, 0, 234, 205, 59, 142, 119, 119, 52, 143, 80, 151, 211, 184, 235, 98, 222, 206,
-            170, 166, 4, 155, 3, 235, 26, 62, 8, 171, 19, 14, 53, 245, 77, 114, 175, 246, 170, 139,
-            227, 212, 141, 72, 223, 134, 63, 91, 26, 12, 78, 253, 198, 162, 152, 202, 207, 170,
-            254, 8, 4, 4, 175, 207, 84, 10, 108, 179, 157, 132, 110, 76, 201, 247, 227, 158, 106,
-            59, 41, 206, 229, 128, 2, 60, 203, 65, 71, 160, 232, 186, 227, 51, 12, 142, 85, 93, 89,
-            234, 236, 157, 230, 247, 167, 99, 7, 37, 146, 13, 53, 39, 255, 209, 177, 179, 17, 131,
-            59, 16, 75, 180, 21, 119, 88, 4, 12, 49, 140, 3, 110, 235, 231, 92, 13, 41, 137, 21,
-            37, 46, 138, 44, 250, 44, 161, 179, 114, 94, 63, 207, 192, 81, 234, 35, 125, 54, 2,
-            214, 10, 57, 116, 154, 150, 147, 223, 232, 36, 108, 152, 145, 157, 132, 190, 103, 233,
-            155, 141, 243, 249, 120, 72, 168, 14, 196, 35, 54, 107, 167, 218, 209, 1, 209, 197,
-            187, 242, 76, 86, 229, 114, 131, 196, 69, 171, 118, 28, 51, 192, 146, 14, 140, 84, 66,
-            155, 237, 194, 167, 121, 160, 166, 198, 166, 57, 13, 66, 162, 234, 148, 102, 133, 111,
-            18, 166, 77, 156, 75, 84, 220, 80, 35, 81, 141, 23, 197, 162, 23, 167, 187, 187, 187,
-            137, 184, 96, 140, 162, 6, 49, 63, 39, 84, 107, 85, 202, 168, 51, 194, 214, 132, 253,
-            253, 189, 231, 1, 226, 118, 104, 84, 147, 244, 58, 233, 250, 66, 26, 109, 223, 34, 2,
-            2, 112, 141, 147, 230, 134, 73, 45, 105, 180, 223, 52, 95, 40, 235, 209, 50, 67, 193,
-            22, 176, 176, 128, 140, 238, 252, 129, 220, 175, 79, 133, 12, 123, 209, 64, 5, 160, 39,
-            47, 66, 122, 245, 65, 102, 133, 58, 74, 138, 153, 217, 48, 59, 84, 135, 117, 92, 131,
-            44, 109, 40, 105, 69, 29, 14, 142, 71, 87, 112, 68, 134, 0, 14, 158, 14, 68, 15, 180,
-            150, 108, 49, 196, 94, 82, 27, 208, 163, 103, 81, 85, 124, 61, 242, 151, 29, 74, 87,
-            134, 166, 145, 186, 110, 207, 162, 99, 92, 133, 121, 137, 124, 90, 134, 5, 249, 231,
-            181, 222, 38, 170, 141, 113, 204, 172, 169, 173, 63, 81, 170, 76,
-        ];
-        let prunable_hash = Hash::from_slice(&[
-            0x5c, 0x5e, 0x69, 0xd8, 0xfc, 0x0d, 0x22, 0x6a, 0x60, 0x91, 0x47, 0xda, 0x98, 0x36,
-            0x06, 0x00, 0xf4, 0xea, 0x49, 0xcc, 0x49, 0x45, 0x2c, 0x5e, 0xf8, 0xba, 0x20, 0xf5,
-            0x93, 0xd4, 0x80, 0x7d,
-        ]);
-        assert_eq!(prunable_hash, Hash::new(prunable_blob));
-    }
-
-    #[test]
-    fn get_prunable_tx_blob() {
-        let mut pruned_p_blob: Vec<u8> = vec![
-            2, 0, 1, 2, 0, 16, 180, 149, 135, 30, 237, 231, 156, 1, 132, 145, 47, 182, 251, 153, 1,
-            225, 234, 94, 219, 134, 23, 222, 210, 30, 208, 213, 12, 136, 158, 5, 159, 148, 15, 206,
-            144, 2, 132, 63, 135, 22, 151, 8, 134, 8, 178, 26, 194, 111, 101, 192, 45, 104, 18,
-            115, 178, 194, 100, 255, 227, 10, 253, 165, 53, 62, 81, 67, 202, 169, 56, 99, 42, 146,
-            175, 137, 85, 195, 27, 151, 2, 0, 3, 207, 28, 183, 85, 7, 58, 81, 205, 53, 9, 191, 141,
-            209, 70, 58, 30, 38, 225, 212, 68, 14, 4, 216, 204, 101, 163, 66, 156, 101, 143, 255,
-            196, 134, 0, 3, 254, 66, 159, 187, 180, 41, 78, 252, 85, 255, 154, 55, 239, 222, 199,
-            37, 159, 210, 71, 186, 188, 46, 134, 181, 236, 221, 173, 43, 93, 50, 138, 249, 221, 44,
-            1, 34, 67, 111, 182, 199, 28, 219, 56, 238, 143, 188, 101, 103, 205, 139, 160, 144,
-            226, 34, 92, 235, 221, 75, 38, 7, 104, 255, 108, 208, 1, 184, 169, 2, 9, 1, 84, 62, 77,
-            107, 119, 22, 148, 222, 6, 128, 128, 211, 14, 242, 200, 16, 137, 239, 249, 55, 59, 16,
-            193, 192, 140, 240, 153, 129, 228, 115, 222, 247, 41, 128, 219, 241, 249, 198, 214, 75,
-            31, 82, 225, 1, 158, 183, 226, 220, 126, 228, 191, 211, 79, 43, 220, 95, 124, 109, 14,
-            162, 170, 68, 37, 62, 21, 139, 182, 246, 152, 36, 156, 172, 197, 20, 145, 85, 9, 8,
-            106, 237, 112, 63, 189, 172, 145, 49, 234, 68, 152, 200, 241, 0, 37,
-        ];
-        let prunable_blob: Vec<u8> = vec![
-            1, 113, 10, 7, 87, 70, 119, 97, 244, 126, 155, 133, 254, 167, 60, 204, 134, 45, 71, 17,
-            87, 21, 252, 8, 218, 233, 219, 192, 84, 181, 196, 74, 213, 2, 246, 222, 66, 45, 152,
-            159, 156, 19, 224, 251, 110, 154, 188, 91, 129, 53, 251, 82, 134, 46, 93, 119, 136, 35,
-            13, 190, 235, 231, 44, 183, 134, 221, 12, 131, 222, 209, 246, 52, 14, 33, 94, 173, 251,
-            233, 18, 154, 91, 72, 229, 180, 43, 35, 152, 130, 38, 82, 56, 179, 36, 168, 54, 41, 62,
-            49, 208, 35, 245, 29, 27, 81, 72, 140, 104, 4, 59, 22, 120, 252, 67, 197, 130, 245, 93,
-            100, 129, 134, 19, 137, 228, 237, 166, 89, 5, 42, 1, 110, 139, 39, 81, 89, 159, 40,
-            239, 211, 251, 108, 82, 68, 125, 182, 75, 152, 129, 74, 73, 208, 215, 15, 63, 3, 106,
-            168, 35, 56, 126, 66, 2, 189, 53, 201, 77, 187, 102, 127, 154, 60, 209, 33, 217, 109,
-            81, 217, 183, 252, 114, 90, 245, 21, 229, 174, 254, 177, 147, 130, 74, 49, 118, 203,
-            14, 7, 118, 221, 81, 181, 78, 97, 224, 76, 160, 134, 73, 206, 204, 199, 201, 30, 201,
-            77, 4, 78, 237, 167, 76, 92, 104, 247, 247, 203, 141, 243, 72, 52, 83, 61, 35, 147,
-            231, 124, 21, 115, 81, 83, 67, 222, 61, 225, 171, 66, 243, 185, 195, 51, 72, 243, 80,
-            104, 4, 166, 54, 199, 235, 193, 175, 4, 242, 42, 146, 170, 90, 212, 101, 208, 113, 58,
-            65, 121, 55, 179, 206, 92, 50, 94, 171, 33, 67, 108, 220, 19, 193, 155, 30, 58, 46, 9,
-            227, 48, 246, 187, 82, 230, 61, 64, 95, 197, 183, 150, 62, 203, 252, 36, 157, 135, 160,
-            120, 189, 52, 94, 186, 93, 5, 36, 120, 160, 62, 254, 178, 101, 11, 228, 63, 128, 249,
-            182, 56, 100, 9, 5, 2, 81, 243, 229, 245, 43, 234, 35, 216, 212, 46, 165, 251, 183,
-            133, 10, 76, 172, 95, 106, 231, 13, 216, 222, 15, 92, 122, 103, 68, 238, 190, 108, 124,
-            138, 62, 255, 243, 22, 209, 2, 138, 45, 178, 101, 240, 18, 186, 71, 239, 137, 191, 134,
-            128, 221, 181, 173, 242, 111, 117, 45, 255, 138, 101, 79, 242, 42, 4, 144, 245, 193,
-            79, 14, 44, 201, 223, 0, 193, 123, 75, 155, 140, 248, 0, 226, 246, 230, 126, 7, 32,
-            107, 173, 193, 206, 184, 11, 33, 148, 104, 32, 79, 149, 71, 68, 150, 6, 47, 90, 231,
-            151, 14, 121, 196, 169, 249, 117, 154, 167, 139, 103, 62, 97, 250, 131, 160, 92, 239,
-            18, 236, 110, 184, 102, 30, 194, 175, 243, 145, 169, 183, 163, 141, 244, 186, 172, 251,
-            3, 78, 165, 33, 12, 2, 136, 180, 178, 83, 117, 0, 184, 170, 255, 69, 131, 123, 8, 212,
-            158, 162, 119, 137, 146, 63, 95, 133, 186, 91, 255, 152, 187, 107, 113, 147, 51, 219,
-            207, 5, 160, 169, 97, 9, 1, 202, 152, 186, 128, 160, 110, 120, 7, 176, 103, 87, 30,
-            137, 240, 67, 55, 79, 147, 223, 45, 177, 210, 101, 225, 22, 25, 129, 111, 101, 21, 213,
-            20, 254, 36, 57, 67, 70, 93, 192, 11, 180, 75, 99, 185, 77, 75, 74, 63, 182, 183, 208,
-            16, 69, 237, 96, 76, 96, 212, 242, 6, 169, 14, 250, 168, 129, 18, 141, 240, 101, 196,
-            96, 120, 88, 90, 51, 77, 12, 133, 212, 192, 107, 131, 238, 34, 237, 93, 157, 108, 13,
-            255, 187, 163, 106, 148, 108, 105, 244, 243, 174, 189, 180, 48, 102, 57, 170, 118, 211,
-            110, 126, 222, 165, 93, 36, 157, 90, 14, 135, 184, 197, 185, 7, 99, 199, 224, 225, 243,
-            212, 116, 149, 137, 186, 16, 196, 73, 23, 11, 248, 248, 67, 167, 149, 154, 64, 76, 218,
-            119, 135, 239, 34, 48, 66, 57, 109, 246, 3, 141, 169, 42, 157, 222, 21, 40, 183, 168,
-            97, 195, 106, 244, 229, 61, 122, 136, 59, 255, 120, 86, 30, 63, 226, 18, 65, 218, 188,
-            195, 217, 85, 12, 211, 221, 188, 27, 8, 98, 103, 211, 213, 217, 65, 82, 229, 145, 80,
-            147, 220, 57, 143, 20, 189, 253, 106, 13, 21, 170, 60, 24, 48, 162, 234, 0, 240, 226,
-            4, 28, 76, 93, 56, 3, 187, 223, 58, 31, 184, 58, 234, 198, 140, 223, 217, 1, 147, 94,
-            218, 199, 154, 121, 137, 44, 229, 0, 1, 10, 133, 250, 140, 64, 150, 89, 64, 112, 178,
-            221, 87, 19, 24, 104, 252, 28, 65, 207, 28, 195, 217, 73, 12, 16, 83, 55, 199, 84, 117,
-            175, 123, 13, 234, 10, 54, 63, 245, 161, 74, 235, 92, 189, 247, 47, 62, 176, 41, 159,
-            40, 250, 116, 63, 33, 193, 78, 72, 29, 215, 9, 191, 233, 243, 87, 14, 195, 7, 89, 101,
-            0, 28, 0, 234, 205, 59, 142, 119, 119, 52, 143, 80, 151, 211, 184, 235, 98, 222, 206,
-            170, 166, 4, 155, 3, 235, 26, 62, 8, 171, 19, 14, 53, 245, 77, 114, 175, 246, 170, 139,
-            227, 212, 141, 72, 223, 134, 63, 91, 26, 12, 78, 253, 198, 162, 152, 202, 207, 170,
-            254, 8, 4, 4, 175, 207, 84, 10, 108, 179, 157, 132, 110, 76, 201, 247, 227, 158, 106,
-            59, 41, 206, 229, 128, 2, 60, 203, 65, 71, 160, 232, 186, 227, 51, 12, 142, 85, 93, 89,
-            234, 236, 157, 230, 247, 167, 99, 7, 37, 146, 13, 53, 39, 255, 209, 177, 179, 17, 131,
-            59, 16, 75, 180, 21, 119, 88, 4, 12, 49, 140, 3, 110, 235, 231, 92, 13, 41, 137, 21,
-            37, 46, 138, 44, 250, 44, 161, 179, 114, 94, 63, 207, 192, 81, 234, 35, 125, 54, 2,
-            214, 10, 57, 116, 154, 150, 147, 223, 232, 36, 108, 152, 145, 157, 132, 190, 103, 233,
-            155, 141, 243, 249, 120, 72, 168, 14, 196, 35, 54, 107, 167, 218, 209, 1, 209, 197,
-            187, 242, 76, 86, 229, 114, 131, 196, 69, 171, 118, 28, 51, 192, 146, 14, 140, 84, 66,
-            155, 237, 194, 167, 121, 160, 166, 198, 166, 57, 13, 66, 162, 234, 148, 102, 133, 111,
-            18, 166, 77, 156, 75, 84, 220, 80, 35, 81, 141, 23, 197, 162, 23, 167, 187, 187, 187,
-            137, 184, 96, 140, 162, 6, 49, 63, 39, 84, 107, 85, 202, 168, 51, 194, 214, 132, 253,
-            253, 189, 231, 1, 226, 118, 104, 84, 147, 244, 58, 233, 250, 66, 26, 109, 223, 34, 2,
-            2, 112, 141, 147, 230, 134, 73, 45, 105, 180, 223, 52, 95, 40, 235, 209, 50, 67, 193,
-            22, 176, 176, 128, 140, 238, 252, 129, 220, 175, 79, 133, 12, 123, 209, 64, 5, 160, 39,
-            47, 66, 122, 245, 65, 102, 133, 58, 74, 138, 153, 217, 48, 59, 84, 135, 117, 92, 131,
-            44, 109, 40, 105, 69, 29, 14, 142, 71, 87, 112, 68, 134, 0, 14, 158, 14, 68, 15, 180,
-            150, 108, 49, 196, 94, 82, 27, 208, 163, 103, 81, 85, 124, 61, 242, 151, 29, 74, 87,
-            134, 166, 145, 186, 110, 207, 162, 99, 92, 133, 121, 137, 124, 90, 134, 5, 249, 231,
-            181, 222, 38, 170, 141, 113, 204, 172, 169, 173, 63, 81, 170, 76,
-        ];
-        let mut tx_blob: Vec<u8> = Vec::new();
-        tx_blob.append(&mut pruned_p_blob);
-        tx_blob.append(&mut prunable_blob.clone());
-        let mut buf = Vec::new();
-        #[allow(clippy::expect_used)]
-        let tx: monero::Transaction =
-            monero::consensus::encode::deserialize(&tx_blob).expect("failed to serialize");
-        #[allow(clippy::expect_used)]
-        get_transaction_prunable_blob(&tx, &mut buf).expect("failed to get out prunable blob");
-        assert_eq!(prunable_blob, buf);
-    }
-}
diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs
index 4c3a773d..3a49c6be 100644
--- a/p2p/address-book/src/book.rs
+++ b/p2p/address-book/src/book.rs
@@ -84,7 +84,7 @@ impl<Z: NetworkZone> AddressBook<Z> {
         let connected_peers = HashMap::new();
 
         let mut peer_save_interval = interval(cfg.peer_save_period);
-        peer_save_interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
+        peer_save_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
 
         Self {
             white_list,
@@ -236,7 +236,9 @@ impl<Z: NetworkZone> AddressBook<Z> {
     ) {
         tracing::debug!("Received new peer list, length: {}", peer_list.len());
 
-        peer_list.retain(|peer| {
+        peer_list.retain_mut(|peer| {
+            peer.adr.make_canonical();
+
             if !peer.adr.should_add_to_peer_list() {
                 false
             } else {
@@ -259,7 +261,7 @@ impl<Z: NetworkZone> AddressBook<Z> {
     ) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
         tracing::debug!("Retrieving random white peer");
         self.white_list
-            .take_random_peer(&mut rand::thread_rng(), block_needed)
+            .take_random_peer(&mut rand::thread_rng(), block_needed, &self.anchor_list)
     }
 
     fn take_random_gray_peer(
@@ -268,7 +270,7 @@ impl<Z: NetworkZone> AddressBook<Z> {
     ) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
         tracing::debug!("Retrieving random gray peer");
         self.gray_list
-            .take_random_peer(&mut rand::thread_rng(), block_needed)
+            .take_random_peer(&mut rand::thread_rng(), block_needed, &HashSet::new())
     }
 
     fn get_white_peers(&self, len: usize) -> Vec<ZoneSpecificPeerListEntryBase<Z::Addr>> {
diff --git a/p2p/address-book/src/book/tests.rs b/p2p/address-book/src/book/tests.rs
index 4e1fd877..1cb0fc85 100644
--- a/p2p/address-book/src/book/tests.rs
+++ b/p2p/address-book/src/book/tests.rs
@@ -1,8 +1,7 @@
 use std::{path::PathBuf, sync::Arc, time::Duration};
 
 use futures::StreamExt;
-use tokio::sync::Semaphore;
-use tokio::time::interval;
+use tokio::{sync::Semaphore, time::interval};
 
 use monero_p2p::handles::HandleBuilder;
 use monero_pruning::PruningSeed;
diff --git a/p2p/address-book/src/lib.rs b/p2p/address-book/src/lib.rs
index ce56b4f4..a3dc0543 100644
--- a/p2p/address-book/src/lib.rs
+++ b/p2p/address-book/src/lib.rs
@@ -82,5 +82,5 @@ pub async fn init_address_book<Z: NetworkZone>(
 
     let address_book = book::AddressBook::<Z>::new(cfg, white_list, gray_list, Vec::new());
 
-    Ok(Buffer::new(address_book, 15))
+    Ok(Buffer::new(address_book, 150))
 }
diff --git a/p2p/address-book/src/peer_list.rs b/p2p/address-book/src/peer_list.rs
index f2c192f0..2aaf432a 100644
--- a/p2p/address-book/src/peer_list.rs
+++ b/p2p/address-book/src/peer_list.rs
@@ -89,28 +89,42 @@ impl<Z: NetworkZone> PeerList<Z> {
         &mut self,
         r: &mut R,
         block_needed: Option<u64>,
+        must_keep_peers: &HashSet<Z::Addr>,
     ) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
-        if let Some(needed_height) = block_needed {
-            let (_, addresses_with_block) = self.pruning_seeds.iter().find(|(seed, _)| {
-                // TODO: factor in peer blockchain height?
-                seed.get_next_unpruned_block(needed_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
-                    .expect("Block needed is higher than max block allowed.")
-                    == needed_height
-            })?;
-            let n = r.gen_range(0..addresses_with_block.len());
-            let peer = addresses_with_block[n];
-            self.remove_peer(&peer)
-        } else {
-            let len = self.len();
-            if len == 0 {
-                None
-            } else {
-                let n = r.gen_range(0..len);
+        // Take a random peer and see if it's in the list of must_keep_peers, if it is try again.
+        // TODO: improve this
 
-                let (&key, _) = self.peers.get_index(n).unwrap();
-                self.remove_peer(&key)
+        for _ in 0..3 {
+            if let Some(needed_height) = block_needed {
+                let (_, addresses_with_block) = self.pruning_seeds.iter().find(|(seed, _)| {
+                    // TODO: factor in peer blockchain height?
+                    seed.get_next_unpruned_block(needed_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
+                        .expect("Block needed is higher than max block allowed.")
+                        == needed_height
+                })?;
+                let n = r.gen_range(0..addresses_with_block.len());
+                let peer = addresses_with_block[n];
+                if must_keep_peers.contains(&peer) {
+                    continue;
+                }
+
+                return self.remove_peer(&peer);
+            }
+            let len = self.len();
+
+            if len == 0 {
+                return None;
+            }
+
+            let n = r.gen_range(0..len);
+
+            let (&key, _) = self.peers.get_index(n).unwrap();
+            if !must_keep_peers.contains(&key) {
+                return self.remove_peer(&key);
             }
         }
+
+        None
     }
 
     pub fn get_random_peers<R: Rng>(
diff --git a/p2p/address-book/src/peer_list/tests.rs b/p2p/address-book/src/peer_list/tests.rs
index ed9682eb..7aba0a20 100644
--- a/p2p/address-book/src/peer_list/tests.rs
+++ b/p2p/address-book/src/peer_list/tests.rs
@@ -87,7 +87,7 @@ fn peer_list_remove_specific_peer() {
     let mut peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
 
     let peer = peer_list
-        .take_random_peer(&mut rand::thread_rng(), None)
+        .take_random_peer(&mut rand::thread_rng(), None, &HashSet::new())
         .unwrap();
 
     let pruning_idxs = peer_list.pruning_seeds;
@@ -160,7 +160,7 @@ fn peer_list_get_peer_with_block() {
     peer_list.add_new_peer(make_fake_peer(101, Some(384)));
 
     let peer = peer_list
-        .take_random_peer(&mut r, Some(1))
+        .take_random_peer(&mut r, Some(1), &HashSet::new())
         .expect("We just added a peer with the correct seed");
 
     assert!(peer
@@ -173,7 +173,7 @@ fn peer_list_get_peer_with_block() {
 fn peer_list_ban_peers() {
     let mut peer_list = make_fake_peer_list_with_random_pruning_seeds(100);
     let peer = peer_list
-        .take_random_peer(&mut rand::thread_rng(), None)
+        .take_random_peer(&mut rand::thread_rng(), None, &HashSet::new())
         .unwrap();
     let ban_id = peer.adr.ban_id();
 
diff --git a/p2p/cuprate-p2p/Cargo.toml b/p2p/cuprate-p2p/Cargo.toml
new file mode 100644
index 00000000..d73684af
--- /dev/null
+++ b/p2p/cuprate-p2p/Cargo.toml
@@ -0,0 +1,36 @@
+[package]
+name = "cuprate-p2p"
+version = "0.1.0"
+edition = "2021"
+license = "MIT"
+authors = ["Boog900"]
+
+[dependencies]
+fixed-bytes = { path = "../../net/fixed-bytes" }
+monero-wire = { path = "../../net/monero-wire" }
+monero-p2p = { path = "../monero-p2p", features = ["borsh"] }
+monero-address-book = { path = "../address-book" }
+monero-pruning = { path = "../../pruning" }
+cuprate-helper = { path = "../../helper", features = ["asynch"] }
+
+monero-serai = { workspace = true, features = ["std"] }
+
+tower = { workspace = true }
+tokio = { workspace = true, features = ["rt"] }
+rayon = { workspace = true }
+tokio-util = { workspace = true }
+tokio-stream = { workspace = true, features = ["sync", "time"] }
+futures = { workspace = true, features = ["std"] }
+pin-project = { workspace = true }
+dashmap = { workspace = true }
+
+thiserror = { workspace = true }
+bytes = { workspace = true, features = ["std"] }
+indexmap = { workspace = true, features = ["std"] }
+rand = { workspace = true, features = ["std", "std_rng"] }
+rand_distr = { workspace = true, features = ["std"] }
+hex = { workspace = true, features = ["std"] }
+tracing = { workspace = true, features = ["std", "attributes"] }
+
+[dev-dependencies]
+cuprate-test-utils = { path = "../../test-utils" }
diff --git a/p2p/cuprate-p2p/src/broadcast.rs b/p2p/cuprate-p2p/src/broadcast.rs
new file mode 100644
index 00000000..cc8a3fd6
--- /dev/null
+++ b/p2p/cuprate-p2p/src/broadcast.rs
@@ -0,0 +1,540 @@
+//! # Broadcast Router
+//!
+//! This module handles broadcasting messages to multiple peers with the [`BroadcastSvc`].
+use std::{
+    future::{ready, Future, Ready},
+    pin::{pin, Pin},
+    task::{ready, Context, Poll},
+    time::Duration,
+};
+
+use bytes::Bytes;
+use futures::Stream;
+use rand::prelude::*;
+use rand_distr::Exp;
+use tokio::{
+    sync::{
+        broadcast::{self, error::TryRecvError},
+        watch,
+    },
+    time::{sleep_until, Instant, Sleep},
+};
+use tokio_stream::wrappers::WatchStream;
+use tower::Service;
+
+use monero_p2p::{client::InternalPeerID, BroadcastMessage, ConnectionDirection, NetworkZone};
+use monero_wire::{
+    common::{BlockCompleteEntry, TransactionBlobs},
+    protocol::{NewFluffyBlock, NewTransactions},
+};
+
+use crate::constants::{
+    DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND, DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND,
+    MAX_TXS_IN_BROADCAST_CHANNEL, SOFT_TX_MESSAGE_SIZE_SIZE_LIMIT,
+};
+
+/// The configuration for the [`BroadcastSvc`].
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct BroadcastConfig {
+    /// The average number of seconds between diffusion flushes for outbound connections.
+    pub diffusion_flush_average_seconds_outbound: Duration,
+    /// The average number of seconds between diffusion flushes for inbound connections.
+    pub diffusion_flush_average_seconds_inbound: Duration,
+}
+
+impl Default for BroadcastConfig {
+    fn default() -> Self {
+        Self {
+            diffusion_flush_average_seconds_inbound: DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND,
+            diffusion_flush_average_seconds_outbound: DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND,
+        }
+    }
+}
+
+/// Initialise the [`BroadcastSvc`] and the functions to produce [`BroadcastMessageStream`]s.
+///
+/// This function will return in order:
+/// - The [`BroadcastSvc`]
+/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers.
+/// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers.
+pub fn init_broadcast_channels<N: NetworkZone>(
+    config: BroadcastConfig,
+) -> (
+    BroadcastSvc<N>,
+    impl Fn(InternalPeerID<N::Addr>) -> BroadcastMessageStream<N> + Clone + Send + 'static,
+    impl Fn(InternalPeerID<N::Addr>) -> BroadcastMessageStream<N> + Clone + Send + 'static,
+) {
+    let outbound_dist = Exp::new(
+        1.0 / config
+            .diffusion_flush_average_seconds_outbound
+            .as_secs_f64(),
+    )
+    .unwrap();
+    let inbound_dist =
+        Exp::new(1.0 / config.diffusion_flush_average_seconds_inbound.as_secs_f64()).unwrap();
+
+    // Set a default value for init - the broadcast streams given to the peer tasks will only broadcast from this channel when the value
+    // changes so no peer will get sent this.
+    let (block_watch_sender, block_watch_receiver) = watch::channel(NewBlockInfo {
+        block_bytes: Default::default(),
+        current_blockchain_height: 0,
+    });
+
+    // create the inbound/outbound broadcast channels.
+    let (tx_broadcast_channel_outbound_sender, tx_broadcast_channel_outbound_receiver) =
+        broadcast::channel(MAX_TXS_IN_BROADCAST_CHANNEL);
+    let (tx_broadcast_channel_inbound_sender, tx_broadcast_channel_inbound_receiver) =
+        broadcast::channel(MAX_TXS_IN_BROADCAST_CHANNEL);
+
+    // create the broadcast service.
+    let broadcast_svc = BroadcastSvc {
+        new_block_watch: block_watch_sender,
+        tx_broadcast_channel_outbound: tx_broadcast_channel_outbound_sender,
+        tx_broadcast_channel_inbound: tx_broadcast_channel_inbound_sender,
+    };
+
+    // wrap the tx broadcast channels in a wrapper that impls Clone so the closures later on impl clone.
+    let tx_channel_outbound_receiver_wrapped =
+        CloneableBroadcastReceiver(tx_broadcast_channel_outbound_receiver);
+    let tx_channel_inbound_receiver_wrapped =
+        CloneableBroadcastReceiver(tx_broadcast_channel_inbound_receiver);
+
+    // Create the closures that will be used to start the broadcast streams that the connection task will hold to listen
+    // for messages to broadcast.
+    let block_watch_receiver_cloned = block_watch_receiver.clone();
+    let outbound_stream_maker = move |addr| {
+        BroadcastMessageStream::new(
+            addr,
+            outbound_dist,
+            block_watch_receiver_cloned.clone(),
+            tx_channel_outbound_receiver_wrapped.clone().0,
+        )
+    };
+
+    let inbound_stream_maker = move |addr| {
+        BroadcastMessageStream::new(
+            addr,
+            inbound_dist,
+            block_watch_receiver.clone(),
+            tx_channel_inbound_receiver_wrapped.clone().0,
+        )
+    };
+
+    (broadcast_svc, outbound_stream_maker, inbound_stream_maker)
+}
+
+/// A request to broadcast some data to all connected peers or a sub-set like all inbound or all outbound.
+///
+/// Only certain P2P messages are supported here: [`NewFluffyBlock`] and [`NewTransactions`]. These are the only
+/// P2P messages that make sense to broadcast to multiple peers.
+///
+/// [`NewBlock`](monero_wire::protocol::NewBlock) has been excluded as monerod has had fluffy blocks for a while and
+/// Cuprate sets fluffy blocks as a requirement during handshakes.
+pub enum BroadcastRequest<N: NetworkZone> {
+    /// Broadcast a block to the network. The block will be broadcast as a fluffy block to all peers.
+    Block {
+        /// The block.
+        block_bytes: Bytes,
+        /// The current chain height - will be 1 more than the blocks' height.
+        current_blockchain_height: u64,
+    },
+    /// Broadcast transactions to the network. If a [`ConnectionDirection`] is set the transaction
+    /// will only be broadcast to that sub-set of peers, if it is [`None`] then the transaction will
+    /// be broadcast to all peers.
+    Transaction {
+        /// The serialised tx to broadcast.
+        tx_bytes: Bytes,
+        /// The direction of peers to broadcast this tx to, if [`None`] it will be sent to all peers.
+        direction: Option<ConnectionDirection>,
+        /// The peer on this network that told us about the tx.
+        received_from: Option<InternalPeerID<N::Addr>>,
+    },
+}
+
+pub struct BroadcastSvc<N: NetworkZone> {
+    new_block_watch: watch::Sender<NewBlockInfo>,
+    tx_broadcast_channel_outbound: broadcast::Sender<BroadcastTxInfo<N>>,
+    tx_broadcast_channel_inbound: broadcast::Sender<BroadcastTxInfo<N>>,
+}
+
+impl<N: NetworkZone> Service<BroadcastRequest<N>> for BroadcastSvc<N> {
+    type Response = ();
+    type Error = std::convert::Infallible;
+    type Future = Ready<Result<(), std::convert::Infallible>>;
+
+    fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+        Poll::Ready(Ok(()))
+    }
+
+    fn call(&mut self, req: BroadcastRequest<N>) -> Self::Future {
+        match req {
+            BroadcastRequest::Block {
+                block_bytes,
+                current_blockchain_height,
+            } => {
+                tracing::debug!(
+                    "queuing block at chain height {current_blockchain_height} for broadcast"
+                );
+
+                self.new_block_watch.send_replace(NewBlockInfo {
+                    block_bytes,
+                    current_blockchain_height,
+                });
+            }
+            BroadcastRequest::Transaction {
+                tx_bytes,
+                received_from,
+                direction,
+            } => {
+                let nex_tx_info = BroadcastTxInfo {
+                    tx: tx_bytes,
+                    received_from,
+                };
+
+                // An error here means _all_ receivers were dropped which we assume will never happen.
+                let _ = match direction {
+                    Some(ConnectionDirection::InBound) => {
+                        self.tx_broadcast_channel_inbound.send(nex_tx_info)
+                    }
+                    Some(ConnectionDirection::OutBound) => {
+                        self.tx_broadcast_channel_outbound.send(nex_tx_info)
+                    }
+                    None => {
+                        let _ = self.tx_broadcast_channel_outbound.send(nex_tx_info.clone());
+                        self.tx_broadcast_channel_inbound.send(nex_tx_info)
+                    }
+                };
+            }
+        }
+
+        ready(Ok(()))
+    }
+}
+
+/// A wrapper type that impls [`Clone`] for [`broadcast::Receiver`].
+///
+/// The clone impl just calls [`Receiver::resubscribe`](broadcast::Receiver::resubscribe), which isn't _exactly_
+/// a clone but is what we need for our use case.
+struct CloneableBroadcastReceiver<T: Clone>(broadcast::Receiver<T>);
+
+impl<T: Clone> Clone for CloneableBroadcastReceiver<T> {
+    fn clone(&self) -> Self {
+        Self(self.0.resubscribe())
+    }
+}
+
+/// A new block to broadcast.
+#[derive(Clone)]
+struct NewBlockInfo {
+    /// The block.
+    block_bytes: Bytes,
+    /// The current chain height - will be 1 more than the blocks' height.
+    current_blockchain_height: u64,
+}
+
+/// A new transaction to broadcast.
+#[derive(Clone)]
+struct BroadcastTxInfo<N: NetworkZone> {
+    /// The tx.
+    tx: Bytes,
+    /// The peer that sent us this tx (if the peer is on this network).
+    received_from: Option<InternalPeerID<N::Addr>>,
+}
+
+/// A [`Stream`] that returns [`BroadcastMessage`] to broadcast to a peer.
+///
+/// This is given to the connection task to await on for broadcast messages.
+#[pin_project::pin_project]
+pub struct BroadcastMessageStream<N: NetworkZone> {
+    /// The peer that is holding this stream.
+    addr: InternalPeerID<N::Addr>,
+
+    /// The channel where new blocks are received.
+    #[pin]
+    new_block_watch: WatchStream<NewBlockInfo>,
+    /// The channel where txs to broadcast are received.
+    tx_broadcast_channel: broadcast::Receiver<BroadcastTxInfo<N>>,
+
+    /// The distribution to generate the wait time before the next transaction
+    /// diffusion flush.
+    diffusion_flush_dist: Exp<f64>,
+    /// A [`Sleep`] that will awake when it's time to broadcast txs.
+    #[pin]
+    next_flush: Sleep,
+}
+
+impl<N: NetworkZone> BroadcastMessageStream<N> {
+    /// Creates a new [`BroadcastMessageStream`]
+    fn new(
+        addr: InternalPeerID<N::Addr>,
+        diffusion_flush_dist: Exp<f64>,
+        new_block_watch: watch::Receiver<NewBlockInfo>,
+        tx_broadcast_channel: broadcast::Receiver<BroadcastTxInfo<N>>,
+    ) -> Self {
+        let next_flush = Instant::now()
+            + Duration::from_secs_f64(diffusion_flush_dist.sample(&mut thread_rng()));
+
+        Self {
+            addr,
+            // We don't want to broadcast the message currently in the queue.
+            new_block_watch: WatchStream::from_changes(new_block_watch),
+            tx_broadcast_channel,
+            diffusion_flush_dist,
+            next_flush: sleep_until(next_flush),
+        }
+    }
+}
+
+impl<N: NetworkZone> Stream for BroadcastMessageStream<N> {
+    type Item = BroadcastMessage;
+
+    fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+        let mut this = self.project();
+
+        // Prioritise blocks.
+        if let Poll::Ready(res) = this.new_block_watch.poll_next(cx) {
+            let Some(block) = res else {
+                return Poll::Ready(None);
+            };
+
+            let block_mes = NewFluffyBlock {
+                b: BlockCompleteEntry {
+                    pruned: false,
+                    block: block.block_bytes,
+                    // This is a full fluffy block these values do not need to be set.
+                    block_weight: 0,
+                    txs: TransactionBlobs::None,
+                },
+                current_blockchain_height: block.current_blockchain_height,
+            };
+
+            return Poll::Ready(Some(BroadcastMessage::NewFluffyBlock(block_mes)));
+        }
+
+        ready!(this.next_flush.as_mut().poll(cx));
+
+        let (txs, more_available) = get_txs_to_broadcast::<N>(this.addr, this.tx_broadcast_channel);
+
+        let next_flush = if more_available {
+            // If there are more txs to broadcast then set the next flush for now so we get woken up straight away.
+            Instant::now()
+        } else {
+            Instant::now()
+                + Duration::from_secs_f64(this.diffusion_flush_dist.sample(&mut thread_rng()))
+        };
+
+        let next_flush = sleep_until(next_flush);
+        this.next_flush.set(next_flush);
+
+        if let Some(txs) = txs {
+            tracing::debug!(
+                "Diffusion flush timer expired, diffusing {} txs",
+                txs.txs.len()
+            );
+            // no need to poll next_flush as we are ready now.
+            Poll::Ready(Some(BroadcastMessage::NewTransaction(txs)))
+        } else {
+            tracing::trace!("Diffusion flush timer expired but no txs to diffuse");
+            // poll next_flush now to register the waker with it
+            // the waker will already be registered with the block broadcast channel.
+            let _ = this.next_flush.poll(cx);
+            Poll::Pending
+        }
+    }
+}
+
+/// Returns a list of new transactions to broadcast and a [`bool`] for if there are more txs in the queue
+/// that won't fit in the current batch.
+fn get_txs_to_broadcast<N: NetworkZone>(
+    addr: &InternalPeerID<N::Addr>,
+    broadcast_rx: &mut broadcast::Receiver<BroadcastTxInfo<N>>,
+) -> (Option<NewTransactions>, bool) {
+    let mut new_txs = NewTransactions {
+        txs: vec![],
+        dandelionpp_fluff: true,
+        padding: Bytes::new(),
+    };
+    let mut total_size = 0;
+
+    loop {
+        match broadcast_rx.try_recv() {
+            Ok(txs) => {
+                if txs.received_from.is_some_and(|from| &from == addr) {
+                    // If we are the one that sent this tx don't broadcast it back to us.
+                    continue;
+                }
+
+                total_size += txs.tx.len();
+
+                new_txs.txs.push(txs.tx);
+
+                if total_size > SOFT_TX_MESSAGE_SIZE_SIZE_LIMIT {
+                    return (Some(new_txs), true);
+                }
+            }
+            Err(e) => match e {
+                TryRecvError::Empty | TryRecvError::Closed => {
+                    if new_txs.txs.is_empty() {
+                        return (None, false);
+                    }
+                    return (Some(new_txs), false);
+                }
+                TryRecvError::Lagged(lag) => {
+                    tracing::debug!(
+                        "{lag} transaction broadcast messages were missed, continuing."
+                    );
+                    continue;
+                }
+            },
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::{pin::pin, time::Duration};
+
+    use bytes::Bytes;
+    use futures::StreamExt;
+    use tokio::time::timeout;
+    use tower::{Service, ServiceExt};
+
+    use cuprate_test_utils::test_netzone::TestNetZone;
+    use monero_p2p::{client::InternalPeerID, BroadcastMessage, ConnectionDirection};
+
+    use super::{init_broadcast_channels, BroadcastConfig, BroadcastRequest};
+
+    const TEST_CONFIG: BroadcastConfig = BroadcastConfig {
+        diffusion_flush_average_seconds_outbound: Duration::from_millis(100),
+        diffusion_flush_average_seconds_inbound: Duration::from_millis(200),
+    };
+
+    #[tokio::test]
+    async fn tx_broadcast_direction_correct() {
+        let (mut brcst, outbound_mkr, inbound_mkr) =
+            init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
+
+        let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
+        let mut inbound_stream = pin!(inbound_mkr(InternalPeerID::Unknown(1)));
+
+        // Outbound should get 1 and 3, inbound should get 2 and 3.
+
+        brcst
+            .ready()
+            .await
+            .unwrap()
+            .call(BroadcastRequest::Transaction {
+                tx_bytes: Bytes::from_static(&[1]),
+                direction: Some(ConnectionDirection::OutBound),
+                received_from: None,
+            })
+            .await
+            .unwrap();
+
+        brcst
+            .ready()
+            .await
+            .unwrap()
+            .call(BroadcastRequest::Transaction {
+                tx_bytes: Bytes::from_static(&[2]),
+                direction: Some(ConnectionDirection::InBound),
+                received_from: None,
+            })
+            .await
+            .unwrap();
+
+        brcst
+            .ready()
+            .await
+            .unwrap()
+            .call(BroadcastRequest::Transaction {
+                tx_bytes: Bytes::from_static(&[3]),
+                direction: None,
+                received_from: None,
+            })
+            .await
+            .unwrap();
+
+        let match_tx = |mes, txs| match mes {
+            BroadcastMessage::NewTransaction(tx) => assert_eq!(tx.txs.as_slice(), txs),
+            _ => panic!("Block broadcast?"),
+        };
+
+        let next = outbound_stream.next().await.unwrap();
+        let txs = [Bytes::from_static(&[1]), Bytes::from_static(&[3])];
+        match_tx(next, &txs);
+
+        let next = inbound_stream.next().await.unwrap();
+        match_tx(next, &[Bytes::from_static(&[2]), Bytes::from_static(&[3])]);
+    }
+
+    #[tokio::test]
+    async fn block_broadcast_sent_to_all() {
+        let (mut brcst, outbound_mkr, inbound_mkr) =
+            init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
+
+        let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
+        let mut inbound_stream = pin!(inbound_mkr(InternalPeerID::Unknown(1)));
+
+        brcst
+            .ready()
+            .await
+            .unwrap()
+            .call(BroadcastRequest::Block {
+                block_bytes: Default::default(),
+                current_blockchain_height: 0,
+            })
+            .await
+            .unwrap();
+
+        let next = outbound_stream.next().await.unwrap();
+        assert!(matches!(next, BroadcastMessage::NewFluffyBlock(_)));
+
+        let next = inbound_stream.next().await.unwrap();
+        assert!(matches!(next, BroadcastMessage::NewFluffyBlock(_)));
+    }
+
+    #[tokio::test]
+    async fn tx_broadcast_skipped_for_received_from_peer() {
+        let (mut brcst, outbound_mkr, inbound_mkr) =
+            init_broadcast_channels::<TestNetZone<true, true, true>>(TEST_CONFIG);
+
+        let mut outbound_stream = pin!(outbound_mkr(InternalPeerID::Unknown(1)));
+        let mut outbound_stream_from = pin!(outbound_mkr(InternalPeerID::Unknown(0)));
+
+        let mut inbound_stream = pin!(inbound_mkr(InternalPeerID::Unknown(1)));
+        let mut inbound_stream_from = pin!(inbound_mkr(InternalPeerID::Unknown(0)));
+
+        brcst
+            .ready()
+            .await
+            .unwrap()
+            .call(BroadcastRequest::Transaction {
+                tx_bytes: Bytes::from_static(&[1]),
+                direction: None,
+                received_from: Some(InternalPeerID::Unknown(0)),
+            })
+            .await
+            .unwrap();
+
+        let match_tx = |mes, txs| match mes {
+            BroadcastMessage::NewTransaction(tx) => assert_eq!(tx.txs.as_slice(), txs),
+            _ => panic!("Block broadcast?"),
+        };
+
+        let next = outbound_stream.next().await.unwrap();
+        let txs = [Bytes::from_static(&[1])];
+        match_tx(next, &txs);
+
+        let next = inbound_stream.next().await.unwrap();
+        match_tx(next, &[Bytes::from_static(&[1])]);
+
+        // Make sure the streams with the same id as the one we said sent the tx do not get the tx to broadcast.
+        assert!(timeout(
+            Duration::from_secs(2),
+            futures::future::select(inbound_stream_from.next(), outbound_stream_from.next())
+        )
+        .await
+        .is_err())
+    }
+}
diff --git a/p2p/cuprate-p2p/src/client_pool.rs b/p2p/cuprate-p2p/src/client_pool.rs
new file mode 100644
index 00000000..5dc7d1b9
--- /dev/null
+++ b/p2p/cuprate-p2p/src/client_pool.rs
@@ -0,0 +1,148 @@
+//! # Client Pool.
+//!
+//! The [`ClientPool`], is a pool of currently connected peers that can be pulled from.
+//! It does _not_ necessarily contain every connected peer as another place could have
+//! taken a peer from the pool.
+//!
+//! When taking peers from the pool they are wrapped in [`ClientPoolDropGuard`], which
+//! returns the peer to the pool when it is dropped.
+//!
+//! Internally the pool is a [`DashMap`] which means care should be taken in `async` code
+//! as internally this uses blocking RwLocks.
+//!
+use std::sync::Arc;
+
+use dashmap::{DashMap, DashSet};
+use tokio::sync::mpsc;
+
+use monero_p2p::{
+    client::{Client, InternalPeerID},
+    handles::ConnectionHandle,
+    ConnectionDirection, NetworkZone,
+};
+
+pub(crate) mod disconnect_monitor;
+mod drop_guard_client;
+
+pub use drop_guard_client::ClientPoolDropGuard;
+
+/// The client pool, which holds currently connected free peers.
+///
+/// See the [module docs](self) for more.
+pub struct ClientPool<N: NetworkZone> {
+    /// The connected [`Client`]s.
+    clients: DashMap<InternalPeerID<N::Addr>, Client<N>>,
+    /// A set of outbound clients, as these allow accesses/mutation from different threads,
+    /// a peer ID in here does not mean the peer is necessarily in `clients` as it could have been removed
+    /// by another thread. However, if the peer is in both here and `clients` it is definitely
+    /// an outbound peer.
+    outbound_clients: DashSet<InternalPeerID<N::Addr>>,
+
+    /// A channel to send new peer ids down to monitor for disconnect.
+    new_connection_tx: mpsc::UnboundedSender<(ConnectionHandle, InternalPeerID<N::Addr>)>,
+}
+
+impl<N: NetworkZone> ClientPool<N> {
+    /// Returns a new [`ClientPool`] wrapped in an [`Arc`].
+    pub fn new() -> Arc<ClientPool<N>> {
+        let (tx, rx) = mpsc::unbounded_channel();
+
+        let pool = Arc::new(ClientPool {
+            clients: DashMap::new(),
+            outbound_clients: DashSet::new(),
+            new_connection_tx: tx,
+        });
+
+        tokio::spawn(disconnect_monitor::disconnect_monitor(rx, pool.clone()));
+
+        pool
+    }
+
+    /// Adds a [`Client`] to the pool, the client must have previously been taken from the
+    /// pool.
+    ///
+    /// See [`ClientPool::add_new_client`] to add a [`Client`] which was not taken from the pool before.
+    ///
+    /// # Panics
+    /// This function panics if `client` already exists in the pool.
+    fn add_client(&self, client: Client<N>) {
+        let handle = client.info.handle.clone();
+        let id = client.info.id;
+
+        // Fast path: if the client is disconnected don't add it to the peer set.
+        if handle.is_closed() {
+            return;
+        }
+
+        if client.info.direction == ConnectionDirection::OutBound {
+            self.outbound_clients.insert(id);
+        }
+
+        let res = self.clients.insert(id, client);
+        assert!(res.is_none());
+
+        // We have to check this again otherwise we could have a race condition where a
+        // peer is disconnected after the first check, the disconnect monitor tries to remove it,
+        // and then it is added to the pool.
+        if handle.is_closed() {
+            self.remove_client(&id);
+        }
+    }
+
+    /// Adds a _new_ [`Client`] to the pool, this client should be a new connection, and not already
+    /// from the pool.
+    ///
+    /// # Panics
+    /// This function panics if `client` already exists in the pool.
+    pub fn add_new_client(&self, client: Client<N>) {
+        self.new_connection_tx
+            .send((client.info.handle.clone(), client.info.id))
+            .unwrap();
+
+        self.add_client(client);
+    }
+
+    /// Remove a [`Client`] from the pool.
+    ///
+    /// [`None`] is returned if the client did not exist in the pool.
+    fn remove_client(&self, peer: &InternalPeerID<N::Addr>) -> Option<Client<N>> {
+        self.outbound_clients.remove(peer);
+
+        self.clients.remove(peer).map(|(_, client)| client)
+    }
+
+    /// Borrows a [`Client`] from the pool.
+    ///
+    /// The [`Client`] is wrapped in [`ClientPoolDropGuard`] which
+    /// will return the client to the pool when it's dropped.
+    ///
+    /// See [`Self::borrow_clients`] for borrowing multiple clients.
+    pub fn borrow_client(
+        self: &Arc<Self>,
+        peer: &InternalPeerID<N::Addr>,
+    ) -> Option<ClientPoolDropGuard<N>> {
+        self.remove_client(peer).map(|client| ClientPoolDropGuard {
+            pool: Arc::clone(self),
+            client: Some(client),
+        })
+    }
+
+    /// Borrows multiple [`Client`]s from the pool.
+    ///
+    /// Note that the returned iterator is not guaranteed to contain every peer asked for.
+    ///
+    /// See [`Self::borrow_client`] for borrowing a single client.
+    #[allow(private_interfaces)] // TODO: Remove me when 2024 Rust
+    pub fn borrow_clients<'a, 'b>(
+        self: &'a Arc<Self>,
+        peers: &'b [InternalPeerID<N::Addr>],
+    ) -> impl Iterator<Item = ClientPoolDropGuard<N>> + Captures<(&'a (), &'b ())> {
+        peers.iter().filter_map(|peer| self.borrow_client(peer))
+    }
+}
+
+/// TODO: Remove me when 2024 Rust
+///
+/// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick
+trait Captures<U> {}
+impl<T: ?Sized, U> Captures<U> for T {}
diff --git a/p2p/cuprate-p2p/src/client_pool/disconnect_monitor.rs b/p2p/cuprate-p2p/src/client_pool/disconnect_monitor.rs
new file mode 100644
index 00000000..4e5ec081
--- /dev/null
+++ b/p2p/cuprate-p2p/src/client_pool/disconnect_monitor.rs
@@ -0,0 +1,72 @@
+//! # Disconnect Monitor
+//!
+//! This module contains the [`disconnect_monitor`] task, which monitors connected peers for disconnection
+//! and then removes them from the [`ClientPool`] if they do.
+use std::{
+    future::Future,
+    pin::Pin,
+    sync::Arc,
+    task::{Context, Poll},
+};
+
+use futures::{stream::FuturesUnordered, StreamExt};
+use tokio::sync::mpsc;
+use tokio_util::sync::WaitForCancellationFutureOwned;
+use tracing::instrument;
+
+use monero_p2p::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone};
+
+use super::ClientPool;
+
+/// The disconnect monitor task.
+#[instrument(level = "info", skip_all)]
+pub async fn disconnect_monitor<N: NetworkZone>(
+    mut new_connection_rx: mpsc::UnboundedReceiver<(ConnectionHandle, InternalPeerID<N::Addr>)>,
+    client_pool: Arc<ClientPool<N>>,
+) {
+    tracing::info!("Starting peer disconnect monitor.");
+
+    let mut futs: FuturesUnordered<PeerDisconnectFut<N>> = FuturesUnordered::new();
+
+    loop {
+        tokio::select! {
+            Some((con_handle, peer_id)) = new_connection_rx.recv() => {
+                tracing::debug!("Monitoring {peer_id} for disconnect");
+                futs.push(PeerDisconnectFut {
+                    closed_fut: con_handle.closed(),
+                    peer_id: Some(peer_id),
+                });
+            }
+            Some(peer_id) = futs.next() => {
+                tracing::debug!("{peer_id} has disconnected, removing from client pool.");
+                client_pool.remove_client(&peer_id);
+            }
+            else => {
+                tracing::info!("Peer disconnect monitor shutting down.");
+                return;
+            }
+        }
+    }
+}
+
+/// A [`Future`] that resolves when a peer disconnects.
+#[pin_project::pin_project]
+pub(crate) struct PeerDisconnectFut<N: NetworkZone> {
+    /// The inner [`Future`] that resolves when a peer disconnects.
+    #[pin]
+    pub(crate) closed_fut: WaitForCancellationFutureOwned,
+    /// The peers ID.
+    pub(crate) peer_id: Option<InternalPeerID<N::Addr>>,
+}
+
+impl<N: NetworkZone> Future for PeerDisconnectFut<N> {
+    type Output = InternalPeerID<N::Addr>;
+
+    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        let this = self.project();
+
+        this.closed_fut
+            .poll(cx)
+            .map(|_| this.peer_id.take().unwrap())
+    }
+}
diff --git a/p2p/cuprate-p2p/src/client_pool/drop_guard_client.rs b/p2p/cuprate-p2p/src/client_pool/drop_guard_client.rs
new file mode 100644
index 00000000..5555d713
--- /dev/null
+++ b/p2p/cuprate-p2p/src/client_pool/drop_guard_client.rs
@@ -0,0 +1,41 @@
+use std::{
+    ops::{Deref, DerefMut},
+    sync::Arc,
+};
+
+use monero_p2p::{client::Client, NetworkZone};
+
+use crate::client_pool::ClientPool;
+
+/// A wrapper around [`Client`] which returns the client to the [`ClientPool`] when dropped.
+pub struct ClientPoolDropGuard<N: NetworkZone> {
+    /// The [`ClientPool`] to return the peer to.
+    pub(super) pool: Arc<ClientPool<N>>,
+    /// The [`Client`].
+    ///
+    /// This is set to [`Some`] when this guard is created, then
+    /// ### [`take`](Option::take)n and returned to the pool when dropped.
+    pub(super) client: Option<Client<N>>,
+}
+
+impl<N: NetworkZone> Deref for ClientPoolDropGuard<N> {
+    type Target = Client<N>;
+
+    fn deref(&self) -> &Self::Target {
+        self.client.as_ref().unwrap()
+    }
+}
+
+impl<N: NetworkZone> DerefMut for ClientPoolDropGuard<N> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        self.client.as_mut().unwrap()
+    }
+}
+
+impl<N: NetworkZone> Drop for ClientPoolDropGuard<N> {
+    fn drop(&mut self) {
+        let client = self.client.take().unwrap();
+
+        self.pool.add_client(client);
+    }
+}
diff --git a/p2p/cuprate-p2p/src/config.rs b/p2p/cuprate-p2p/src/config.rs
new file mode 100644
index 00000000..31b5ab12
--- /dev/null
+++ b/p2p/cuprate-p2p/src/config.rs
@@ -0,0 +1,12 @@
+/// P2P config.
+#[derive(Clone, Debug)]
+pub struct P2PConfig {
+    /// The number of outbound connections to make and try keep.
+    pub outbound_connections: usize,
+    /// The amount of extra connections we can make if we are under load from the rest of Cuprate.
+    pub extra_outbound_connections: usize,
+    /// The percent of outbound peers that should be gray aka never connected to before.
+    ///
+    /// Only values 0..=1 are valid.
+    pub gray_peers_percent: f64,
+}
diff --git a/p2p/cuprate-p2p/src/connection_maintainer.rs b/p2p/cuprate-p2p/src/connection_maintainer.rs
new file mode 100644
index 00000000..bff4b9d5
--- /dev/null
+++ b/p2p/cuprate-p2p/src/connection_maintainer.rs
@@ -0,0 +1,291 @@
+//! Outbound Connection Maintainer.
+//!
+//! This module handles maintaining the number of outbound connections defined in the [`P2PConfig`].
+//! It also handles making extra connections when the peer set is under load or when we need data that
+//! no connected peer has.
+use std::sync::Arc;
+
+use rand::{distributions::Bernoulli, prelude::*};
+use tokio::{
+    sync::{mpsc, OwnedSemaphorePermit, Semaphore},
+    task::JoinSet,
+    time::{sleep, timeout},
+};
+use tower::{Service, ServiceExt};
+use tracing::instrument;
+
+use monero_p2p::{
+    client::{Client, ConnectRequest, HandshakeError},
+    services::{AddressBookRequest, AddressBookResponse},
+    AddressBook, NetworkZone,
+};
+
+use crate::{
+    client_pool::ClientPool,
+    config::P2PConfig,
+    constants::{HANDSHAKE_TIMEOUT, MAX_SEED_CONNECTIONS, OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT},
+};
+
+enum OutboundConnectorError {
+    MaxConnections,
+    FailedToConnectToSeeds,
+    NoAvailablePeers,
+}
+
+/// A request from the peer set to make an outbound connection.
+///
+/// This will only be sent when the peer set is under load from the rest of Cuprate or the peer
+/// set needs specific data that none of the currently connected peers have.
+pub struct MakeConnectionRequest {
+    /// The block needed that no connected peers have due to pruning.
+    block_needed: Option<u64>,
+}
+
+/// The outbound connection count keeper.
+///
+/// This handles maintaining a minimum number of connections and making extra connections when needed, upto a maximum.
+pub struct OutboundConnectionKeeper<N: NetworkZone, A, C> {
+    /// The pool of currently connected peers.
+    pub client_pool: Arc<ClientPool<N>>,
+    /// The channel that tells us to make new _extra_ outbound connections.
+    pub make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
+    /// The address book service
+    pub address_book_svc: A,
+    /// The service to connect to a specific peer.
+    pub connector_svc: C,
+    /// A semaphore to keep the amount of outbound peers constant.
+    pub outbound_semaphore: Arc<Semaphore>,
+    /// The amount of peers we connected to because we needed more peers. If the `outbound_semaphore`
+    /// is full, and we need to connect to more peers for blocks or because not enough peers are ready
+    /// we add a permit to the semaphore and keep track here, upto a value in config.
+    pub extra_peers: usize,
+    /// The p2p config.
+    pub config: P2PConfig,
+    /// The [`Bernoulli`] distribution, when sampled will return true if we should connect to a gray peer or
+    /// false if we should connect to a white peer.
+    ///
+    /// This is weighted to the percentage given in `config`.
+    pub peer_type_gen: Bernoulli,
+}
+
+impl<N, A, C> OutboundConnectionKeeper<N, A, C>
+where
+    N: NetworkZone,
+    A: AddressBook<N>,
+    C: Service<ConnectRequest<N>, Response = Client<N>, Error = HandshakeError>,
+    C::Future: Send + 'static,
+{
+    pub fn new(
+        config: P2PConfig,
+        client_pool: Arc<ClientPool<N>>,
+        make_connection_rx: mpsc::Receiver<MakeConnectionRequest>,
+        address_book_svc: A,
+        connector_svc: C,
+    ) -> Self {
+        let peer_type_gen = Bernoulli::new(config.gray_peers_percent)
+            .expect("Gray peer percent is incorrect should be 0..=1");
+
+        Self {
+            client_pool,
+            make_connection_rx,
+            address_book_svc,
+            connector_svc,
+            outbound_semaphore: Arc::new(Semaphore::new(config.outbound_connections)),
+            extra_peers: 0,
+            config,
+            peer_type_gen,
+        }
+    }
+
+    /// Connects to random seeds to get peers and immediately disconnects
+    #[instrument(level = "info", skip(self))]
+    async fn connect_to_random_seeds(&mut self) -> Result<(), OutboundConnectorError> {
+        let seeds = N::SEEDS.choose_multiple(&mut thread_rng(), MAX_SEED_CONNECTIONS);
+
+        if seeds.len() == 0 {
+            panic!("No seed nodes available to get peers from");
+        }
+
+        // This isn't really needed here to limit connections as the seed nodes will be dropped when we have got
+        // peers from them.
+        let semaphore = Arc::new(Semaphore::new(seeds.len()));
+
+        let mut allowed_errors = seeds.len();
+
+        let mut handshake_futs = JoinSet::new();
+
+        for seed in seeds {
+            tracing::info!("Getting peers from seed node: {}", seed);
+
+            let fut = timeout(
+                HANDSHAKE_TIMEOUT,
+                self.connector_svc
+                    .ready()
+                    .await
+                    .expect("Connector had an error in `poll_ready`")
+                    .call(ConnectRequest {
+                        addr: *seed,
+                        permit: semaphore
+                            .clone()
+                            .try_acquire_owned()
+                            .expect("This must have enough permits as we just set the amount."),
+                    }),
+            );
+            // Spawn the handshake on a separate task with a timeout, so we don't get stuck connecting to a peer.
+            handshake_futs.spawn(fut);
+        }
+
+        while let Some(res) = handshake_futs.join_next().await {
+            if matches!(res, Err(_) | Ok(Err(_)) | Ok(Ok(Err(_)))) {
+                allowed_errors -= 1;
+            }
+        }
+
+        if allowed_errors == 0 {
+            Err(OutboundConnectorError::FailedToConnectToSeeds)
+        } else {
+            Ok(())
+        }
+    }
+
+    /// Connects to a given outbound peer.
+    #[instrument(level = "info", skip(self, permit), fields(%addr))]
+    async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) {
+        let client_pool = self.client_pool.clone();
+        let connection_fut = self
+            .connector_svc
+            .ready()
+            .await
+            .expect("Connector had an error in `poll_ready`")
+            .call(ConnectRequest { addr, permit });
+
+        tokio::spawn(async move {
+            if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await {
+                client_pool.add_new_client(peer);
+            }
+        });
+    }
+
+    /// Handles a request from the peer set for more peers.
+    async fn handle_peer_request(
+        &mut self,
+        req: &MakeConnectionRequest,
+    ) -> Result<(), OutboundConnectorError> {
+        // try to get a permit.
+        let permit = self
+            .outbound_semaphore
+            .clone()
+            .try_acquire_owned()
+            .or_else(|_| {
+                // if we can't get a permit add one if we are below the max number of connections.
+                if self.extra_peers >= self.config.extra_outbound_connections {
+                    // If we can't add a permit return an error.
+                    Err(OutboundConnectorError::MaxConnections)
+                } else {
+                    self.outbound_semaphore.add_permits(1);
+                    self.extra_peers += 1;
+                    Ok(self.outbound_semaphore.clone().try_acquire_owned().unwrap())
+                }
+            })?;
+
+        // try to get a random peer on any network zone from the address book.
+        let peer = self
+            .address_book_svc
+            .ready()
+            .await
+            .expect("Error in address book!")
+            .call(AddressBookRequest::TakeRandomPeer {
+                height: req.block_needed,
+            })
+            .await;
+
+        match peer {
+            Err(_) => {
+                // TODO: We should probably send peer requests to our connected peers rather than go to seeds.
+                tracing::warn!("No peers in address book which are available and have the data we need. Getting peers from seed nodes.");
+
+                self.connect_to_random_seeds().await?;
+                Err(OutboundConnectorError::NoAvailablePeers)
+            }
+
+            Ok(AddressBookResponse::Peer(peer)) => {
+                self.connect_to_outbound_peer(permit, peer.adr).await;
+                Ok(())
+            }
+            Ok(_) => panic!("peer list sent incorrect response!"),
+        }
+    }
+
+    /// Handles a free permit, by either connecting to a new peer or by removing a permit if we are above the
+    /// minimum number of outbound connections.
+    #[instrument(level = "debug", skip(self, permit))]
+    async fn handle_free_permit(
+        &mut self,
+        permit: OwnedSemaphorePermit,
+    ) -> Result<(), OutboundConnectorError> {
+        if self.extra_peers > 0 {
+            tracing::debug!(
+                "Permit available but we are over the minimum number of peers, forgetting permit."
+            );
+            permit.forget();
+            self.extra_peers -= 1;
+            return Ok(());
+        }
+
+        tracing::debug!("Permit available, making outbound connection.");
+
+        let req = if self.peer_type_gen.sample(&mut thread_rng()) {
+            AddressBookRequest::TakeRandomGrayPeer { height: None }
+        } else {
+            // This will try white peers first then gray.
+            AddressBookRequest::TakeRandomPeer { height: None }
+        };
+
+        let Ok(AddressBookResponse::Peer(peer)) = self
+            .address_book_svc
+            .ready()
+            .await
+            .expect("Error in address book!")
+            .call(req)
+            .await
+        else {
+            tracing::warn!("No peers in peer list to make connection to.");
+            self.connect_to_random_seeds().await?;
+            return Err(OutboundConnectorError::NoAvailablePeers);
+        };
+
+        self.connect_to_outbound_peer(permit, peer.adr).await;
+        Ok(())
+    }
+
+    /// Runs the outbound connection count keeper.
+    pub async fn run(mut self) {
+        tracing::info!(
+            "Starting outbound connection maintainer, target outbound connections: {}",
+            self.config.outbound_connections
+        );
+
+        loop {
+            tokio::select! {
+                biased;
+                peer_req = self.make_connection_rx.recv() => {
+                    let Some(peer_req) = peer_req else {
+                        tracing::info!("Shutting down outbound connector, make connection channel closed.");
+                        return;
+                    };
+                    // We can't really do much about errors in this function.
+                    let _ = self.handle_peer_request(&peer_req).await;
+                },
+                // This future is not cancellation safe as you will lose your space in the queue but as we are the only place
+                // that actually requires permits that should be ok.
+                Ok(permit) = self.outbound_semaphore.clone().acquire_owned() => {
+                    if self.handle_free_permit(permit).await.is_err() {
+                        // if we got an error then we still have a permit free so to prevent this from just looping
+                        // uncontrollably add a timeout.
+                        sleep(OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT).await;
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/p2p/cuprate-p2p/src/constants.rs b/p2p/cuprate-p2p/src/constants.rs
new file mode 100644
index 00000000..8ec02759
--- /dev/null
+++ b/p2p/cuprate-p2p/src/constants.rs
@@ -0,0 +1,41 @@
+use std::time::Duration;
+
+/// The timeout we set on handshakes.
+pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(30);
+
+/// The maximum amount of connections to make to seed nodes for when we need peers.
+pub(crate) const MAX_SEED_CONNECTIONS: usize = 3;
+
+/// The timeout for when we fail to find a peer to connect to.
+pub(crate) const OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_secs(5);
+
+/// The durations of a short ban.
+pub(crate) const SHORT_BAN: Duration = Duration::from_secs(60 * 10);
+
+/// The default amount of time between inbound diffusion flushes.
+pub(crate) const DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND: Duration = Duration::from_secs(5);
+
+/// The default amount of time between outbound diffusion flushes.
+pub(crate) const DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND: Duration = Duration::from_millis(2500);
+
+/// This size limit on [`NewTransactions`](monero_wire::protocol::NewTransactions) messages that we create.
+pub(crate) const SOFT_TX_MESSAGE_SIZE_SIZE_LIMIT: usize = 10 * 1024 * 1024;
+
+/// The amount of transactions in the broadcast queue. When this value is hit, old transactions will be dropped from
+/// the queue.
+///
+/// Because of internal implementation details this value is _always_ hit, i.e. a transaction will not be dropped until
+/// 50 more transactions after it are added to the queue.
+pub(crate) const MAX_TXS_IN_BROADCAST_CHANNEL: usize = 50;
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    /// Outbound diffusion flushes should be shorter than
+    /// inbound ones as we control these connections.
+    #[test]
+    fn outbound_diffusion_flush_shorter_than_inbound() {
+        assert!(DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND < DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND);
+    }
+}
diff --git a/p2p/cuprate-p2p/src/lib.rs b/p2p/cuprate-p2p/src/lib.rs
new file mode 100644
index 00000000..afa4c93a
--- /dev/null
+++ b/p2p/cuprate-p2p/src/lib.rs
@@ -0,0 +1,17 @@
+//! Cuprate's P2P Crate.
+//!
+//! This crate contains a [`ClientPool`](client_pool::ClientPool) which holds connected peers on a single [`NetworkZone`](monero_p2p::NetworkZone).
+//!
+//! This crate also contains the different routing methods that control how messages should be sent, i.e. broadcast to all,
+//! or send to a single peer.
+//!
+#![allow(dead_code)]
+
+mod broadcast;
+pub mod client_pool;
+pub mod config;
+pub mod connection_maintainer;
+mod constants;
+mod sync_states;
+
+pub use config::P2PConfig;
diff --git a/p2p/cuprate-p2p/src/sync_states.rs b/p2p/cuprate-p2p/src/sync_states.rs
new file mode 100644
index 00000000..9b8b3bd2
--- /dev/null
+++ b/p2p/cuprate-p2p/src/sync_states.rs
@@ -0,0 +1,427 @@
+//! # Sync States
+//!
+//! This module contains a [`PeerSyncSvc`], which keeps track of the claimed chain states of connected peers.
+//! This allows checking if we are behind and getting a list of peers who claim they are ahead.
+use std::{
+    cmp::Ordering,
+    collections::{BTreeMap, HashMap, HashSet},
+    future::{ready, Ready},
+    task::{Context, Poll},
+};
+
+use futures::{stream::FuturesUnordered, StreamExt};
+use tokio::sync::watch;
+use tower::Service;
+
+use monero_p2p::{
+    client::InternalPeerID,
+    handles::ConnectionHandle,
+    services::{PeerSyncRequest, PeerSyncResponse},
+    NetworkZone,
+};
+use monero_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT};
+use monero_wire::CoreSyncData;
+
+use crate::{client_pool::disconnect_monitor::PeerDisconnectFut, constants::SHORT_BAN};
+
+/// The highest claimed sync info from our connected peers.
+#[derive(Debug)]
+pub struct NewSyncInfo {
+    /// The peers chain height.
+    chain_height: u64,
+    /// The peers top block's hash.
+    top_hash: [u8; 32],
+    /// The peers cumulative difficulty.
+    cumulative_difficulty: u128,
+}
+
+/// A service that keeps track of our peers blockchains.
+///
+/// This is the service that handles:
+/// 1. Finding out if we need to sync
+/// 1. Giving the peers that should be synced _from_, to the requester
+pub struct PeerSyncSvc<N: NetworkZone> {
+    /// A map of cumulative difficulties to peers.
+    cumulative_difficulties: BTreeMap<u128, HashSet<InternalPeerID<N::Addr>>>,
+    /// A map of peers to cumulative difficulties.
+    peers: HashMap<InternalPeerID<N::Addr>, (u128, PruningSeed)>,
+    /// A watch channel for *a* top synced peer info.
+    new_height_watcher: watch::Sender<NewSyncInfo>,
+    /// The handle to the peer that has data in `new_height_watcher`.
+    last_peer_in_watcher_handle: Option<ConnectionHandle>,
+    /// A [`FuturesUnordered`] that resolves when a peer disconnects.
+    closed_connections: FuturesUnordered<PeerDisconnectFut<N>>,
+}
+
+impl<N: NetworkZone> PeerSyncSvc<N> {
+    /// Creates a new [`PeerSyncSvc`] with a [`Receiver`](watch::Receiver) that will be updated with
+    /// the highest seen sync data, this makes no guarantees about which peer will be chosen in case of a tie.
+    pub fn new() -> (Self, watch::Receiver<NewSyncInfo>) {
+        let (watch_tx, mut watch_rx) = watch::channel(NewSyncInfo {
+            chain_height: 0,
+            top_hash: [0; 32],
+            cumulative_difficulty: 0,
+        });
+
+        watch_rx.mark_unchanged();
+
+        (
+            Self {
+                cumulative_difficulties: BTreeMap::new(),
+                peers: HashMap::new(),
+                new_height_watcher: watch_tx,
+                last_peer_in_watcher_handle: None,
+                closed_connections: FuturesUnordered::new(),
+            },
+            watch_rx,
+        )
+    }
+
+    /// This function checks if any peers have disconnected, removing them if they have.
+    fn poll_disconnected(&mut self, cx: &mut Context<'_>) {
+        while let Poll::Ready(Some(peer_id)) = self.closed_connections.poll_next_unpin(cx) {
+            tracing::trace!("Peer {peer_id} disconnected, removing from peers sync info service.");
+            let (peer_cum_diff, _) = self.peers.remove(&peer_id).unwrap();
+
+            let cum_diff_peers = self
+                .cumulative_difficulties
+                .get_mut(&peer_cum_diff)
+                .unwrap();
+            cum_diff_peers.remove(&peer_id);
+            if cum_diff_peers.is_empty() {
+                // If this was the last peer remove the whole entry for this cumulative difficulty.
+                self.cumulative_difficulties.remove(&peer_cum_diff);
+            }
+        }
+    }
+
+    /// Returns a list of peers that claim to have a higher cumulative difficulty than `current_cum_diff`.
+    fn peers_to_sync_from(
+        &self,
+        current_cum_diff: u128,
+        block_needed: Option<u64>,
+    ) -> Vec<InternalPeerID<N::Addr>> {
+        self.cumulative_difficulties
+            .range((current_cum_diff + 1)..)
+            .flat_map(|(_, peers)| peers)
+            .filter(|peer| {
+                if let Some(block_needed) = block_needed {
+                    // we just use CRYPTONOTE_MAX_BLOCK_HEIGHT as the blockchain height, this only means
+                    // we don't take into account the tip blocks which are not pruned.
+                    self.peers
+                        .get(peer)
+                        .unwrap()
+                        .1
+                        .has_full_block(block_needed, CRYPTONOTE_MAX_BLOCK_HEIGHT)
+                } else {
+                    true
+                }
+            })
+            .copied()
+            .collect()
+    }
+
+    /// Updates a peers sync state.
+    fn update_peer_sync_info(
+        &mut self,
+        peer_id: InternalPeerID<N::Addr>,
+        handle: ConnectionHandle,
+        core_sync_data: CoreSyncData,
+    ) -> Result<(), tower::BoxError> {
+        tracing::trace!(
+            "Received new core sync data from peer, top hash: {}",
+            hex::encode(core_sync_data.top_id)
+        );
+
+        let new_cumulative_difficulty = core_sync_data.cumulative_difficulty();
+
+        if let Some((old_cum_diff, _)) = self.peers.get_mut(&peer_id) {
+            match (*old_cum_diff).cmp(&new_cumulative_difficulty) {
+                Ordering::Equal => {
+                    // If the cumulative difficulty of the peers chain hasn't changed then no need to update anything.
+                    return Ok(());
+                }
+                Ordering::Greater => {
+                    // This will only happen if a peer lowers its cumulative difficulty during the connection.
+                    // This won't happen if a peer re-syncs their blockchain as then the connection would have closed.
+                    tracing::debug!(
+                        "Peer's claimed cumulative difficulty has dropped, closing connection and banning peer for: {} seconds.", SHORT_BAN.as_secs()
+                    );
+                    handle.ban_peer(SHORT_BAN);
+                    return Err("Peers cumulative difficulty dropped".into());
+                }
+                Ordering::Less => (),
+            }
+
+            // Remove the old cumulative difficulty entry for this peer
+            let old_cum_diff_peers = self.cumulative_difficulties.get_mut(old_cum_diff).unwrap();
+            old_cum_diff_peers.remove(&peer_id);
+            if old_cum_diff_peers.is_empty() {
+                // If this was the last peer remove the whole entry for this cumulative difficulty.
+                self.cumulative_difficulties.remove(old_cum_diff);
+            }
+            // update the cumulative difficulty
+            *old_cum_diff = new_cumulative_difficulty;
+        } else {
+            // The peer is new so add it the list of peers.
+            self.peers.insert(
+                peer_id,
+                (
+                    new_cumulative_difficulty,
+                    PruningSeed::decompress_p2p_rules(core_sync_data.pruning_seed)?,
+                ),
+            );
+
+            // add it to the list of peers to watch for disconnection.
+            self.closed_connections.push(PeerDisconnectFut {
+                closed_fut: handle.closed(),
+                peer_id: Some(peer_id),
+            })
+        }
+
+        self.cumulative_difficulties
+            .entry(new_cumulative_difficulty)
+            .or_default()
+            .insert(peer_id);
+
+        // If the claimed cumulative difficulty is higher than the current one in the watcher
+        // or if the peer in the watch has disconnected, update it.
+        if self.new_height_watcher.borrow().cumulative_difficulty < new_cumulative_difficulty
+            || self
+                .last_peer_in_watcher_handle
+                .as_ref()
+                .is_some_and(|handle| handle.is_closed())
+        {
+            tracing::debug!(
+                "Updating sync watcher channel with new highest seen cumulative difficulty: {new_cumulative_difficulty}"
+            );
+            let _ = self.new_height_watcher.send(NewSyncInfo {
+                top_hash: core_sync_data.top_id,
+                chain_height: core_sync_data.current_height,
+                cumulative_difficulty: new_cumulative_difficulty,
+            });
+            self.last_peer_in_watcher_handle.replace(handle);
+        }
+
+        Ok(())
+    }
+}
+
+impl<N: NetworkZone> Service<PeerSyncRequest<N>> for PeerSyncSvc<N> {
+    type Response = PeerSyncResponse<N>;
+    type Error = tower::BoxError;
+    type Future = Ready<Result<Self::Response, Self::Error>>;
+
+    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+        self.poll_disconnected(cx);
+
+        Poll::Ready(Ok(()))
+    }
+
+    fn call(&mut self, req: PeerSyncRequest<N>) -> Self::Future {
+        let res = match req {
+            PeerSyncRequest::PeersToSyncFrom {
+                current_cumulative_difficulty,
+                block_needed,
+            } => Ok(PeerSyncResponse::PeersToSyncFrom(self.peers_to_sync_from(
+                current_cumulative_difficulty,
+                block_needed,
+            ))),
+            PeerSyncRequest::IncomingCoreSyncData(peer_id, handle, sync_data) => self
+                .update_peer_sync_info(peer_id, handle, sync_data)
+                .map(|_| PeerSyncResponse::Ok),
+        };
+
+        ready(res)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::sync::Arc;
+
+    use tokio::sync::Semaphore;
+    use tower::{Service, ServiceExt};
+
+    use monero_p2p::{client::InternalPeerID, handles::HandleBuilder, services::PeerSyncRequest};
+    use monero_wire::CoreSyncData;
+
+    use cuprate_test_utils::test_netzone::TestNetZone;
+    use monero_p2p::services::PeerSyncResponse;
+
+    use super::PeerSyncSvc;
+
+    #[tokio::test]
+    async fn top_sync_channel_updates() {
+        let semaphore = Arc::new(Semaphore::new(1));
+
+        let (_g, handle) = HandleBuilder::new()
+            .with_permit(semaphore.try_acquire_owned().unwrap())
+            .build();
+
+        let (mut svc, mut watch) = PeerSyncSvc::<TestNetZone<true, true, true>>::new();
+
+        assert!(!watch.has_changed().unwrap());
+
+        svc.ready()
+            .await
+            .unwrap()
+            .call(PeerSyncRequest::IncomingCoreSyncData(
+                InternalPeerID::Unknown(0),
+                handle.clone(),
+                CoreSyncData {
+                    cumulative_difficulty: 1_000,
+                    cumulative_difficulty_top64: 0,
+                    current_height: 0,
+                    pruning_seed: 0,
+                    top_id: [0; 32],
+                    top_version: 0,
+                },
+            ))
+            .await
+            .unwrap();
+
+        assert!(watch.has_changed().unwrap());
+
+        assert_eq!(watch.borrow().top_hash, [0; 32]);
+        assert_eq!(watch.borrow().cumulative_difficulty, 1000);
+        assert_eq!(watch.borrow_and_update().chain_height, 0);
+
+        svc.ready()
+            .await
+            .unwrap()
+            .call(PeerSyncRequest::IncomingCoreSyncData(
+                InternalPeerID::Unknown(1),
+                handle.clone(),
+                CoreSyncData {
+                    cumulative_difficulty: 1_000,
+                    cumulative_difficulty_top64: 0,
+                    current_height: 0,
+                    pruning_seed: 0,
+                    top_id: [0; 32],
+                    top_version: 0,
+                },
+            ))
+            .await
+            .unwrap();
+
+        assert!(!watch.has_changed().unwrap());
+
+        svc.ready()
+            .await
+            .unwrap()
+            .call(PeerSyncRequest::IncomingCoreSyncData(
+                InternalPeerID::Unknown(2),
+                handle.clone(),
+                CoreSyncData {
+                    cumulative_difficulty: 1_001,
+                    cumulative_difficulty_top64: 0,
+                    current_height: 0,
+                    pruning_seed: 0,
+                    top_id: [1; 32],
+                    top_version: 0,
+                },
+            ))
+            .await
+            .unwrap();
+
+        assert!(watch.has_changed().unwrap());
+
+        assert_eq!(watch.borrow().top_hash, [1; 32]);
+        assert_eq!(watch.borrow().cumulative_difficulty, 1001);
+        assert_eq!(watch.borrow_and_update().chain_height, 0);
+    }
+
+    #[tokio::test]
+    async fn peer_sync_info_updates() {
+        let semaphore = Arc::new(Semaphore::new(1));
+
+        let (_g, handle) = HandleBuilder::new()
+            .with_permit(semaphore.try_acquire_owned().unwrap())
+            .build();
+
+        let (mut svc, _watch) = PeerSyncSvc::<TestNetZone<true, true, true>>::new();
+
+        svc.ready()
+            .await
+            .unwrap()
+            .call(PeerSyncRequest::IncomingCoreSyncData(
+                InternalPeerID::Unknown(0),
+                handle.clone(),
+                CoreSyncData {
+                    cumulative_difficulty: 1_000,
+                    cumulative_difficulty_top64: 0,
+                    current_height: 0,
+                    pruning_seed: 0,
+                    top_id: [0; 32],
+                    top_version: 0,
+                },
+            ))
+            .await
+            .unwrap();
+
+        assert_eq!(svc.peers.len(), 1);
+        assert_eq!(svc.cumulative_difficulties.len(), 1);
+
+        svc.ready()
+            .await
+            .unwrap()
+            .call(PeerSyncRequest::IncomingCoreSyncData(
+                InternalPeerID::Unknown(0),
+                handle.clone(),
+                CoreSyncData {
+                    cumulative_difficulty: 1_001,
+                    cumulative_difficulty_top64: 0,
+                    current_height: 0,
+                    pruning_seed: 0,
+                    top_id: [0; 32],
+                    top_version: 0,
+                },
+            ))
+            .await
+            .unwrap();
+
+        assert_eq!(svc.peers.len(), 1);
+        assert_eq!(svc.cumulative_difficulties.len(), 1);
+
+        svc.ready()
+            .await
+            .unwrap()
+            .call(PeerSyncRequest::IncomingCoreSyncData(
+                InternalPeerID::Unknown(1),
+                handle.clone(),
+                CoreSyncData {
+                    cumulative_difficulty: 10,
+                    cumulative_difficulty_top64: 0,
+                    current_height: 0,
+                    pruning_seed: 0,
+                    top_id: [0; 32],
+                    top_version: 0,
+                },
+            ))
+            .await
+            .unwrap();
+
+        assert_eq!(svc.peers.len(), 2);
+        assert_eq!(svc.cumulative_difficulties.len(), 2);
+
+        let PeerSyncResponse::PeersToSyncFrom(peers) = svc
+            .ready()
+            .await
+            .unwrap()
+            .call(PeerSyncRequest::PeersToSyncFrom {
+                block_needed: None,
+                current_cumulative_difficulty: 0,
+            })
+            .await
+            .unwrap()
+        else {
+            panic!("Wrong response for request.")
+        };
+
+        assert!(
+            peers.contains(&InternalPeerID::Unknown(0))
+                && peers.contains(&InternalPeerID::Unknown(1))
+        )
+    }
+}
diff --git a/p2p/dandelion/Cargo.toml b/p2p/dandelion/Cargo.toml
new file mode 100644
index 00000000..a8a04691
--- /dev/null
+++ b/p2p/dandelion/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "dandelion_tower"
+version = "0.1.0"
+edition = "2021"
+license = "MIT"
+authors = ["Boog900"]
+
+[features]
+default = ["txpool"]
+txpool = ["dep:rand_distr", "dep:tokio-util", "dep:tokio"]
+
+[dependencies]
+tower = { workspace = true, features = ["discover", "util"] }
+tracing = { workspace = true, features = ["std"] }
+
+futures = { workspace = true, features = ["std"] }
+tokio = { workspace = true, features = ["rt", "sync", "macros"], optional = true}
+tokio-util = { workspace = true, features = ["time"], optional = true }
+
+rand = { workspace = true, features = ["std", "std_rng"] }
+rand_distr = { workspace = true, features = ["std"], optional = true }
+
+thiserror = { workspace = true }
+
+[dev-dependencies]
+tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync"] }
+proptest = { workspace = true, features = ["default"] }
\ No newline at end of file
diff --git a/p2p/dandelion/src/config.rs b/p2p/dandelion/src/config.rs
new file mode 100644
index 00000000..71a4e5b2
--- /dev/null
+++ b/p2p/dandelion/src/config.rs
@@ -0,0 +1,149 @@
+use std::{
+    ops::{Mul, Neg},
+    time::Duration,
+};
+
+/// When calculating the embargo timeout using the formula: `(-k*(k-1)*hop)/(2*log(1-ep))`
+///
+/// (1 - ep) is the probability that a transaction travels for `k` hops before a nodes embargo timeout fires, this constant is (1 - ep).
+const EMBARGO_FULL_TRAVEL_PROBABILITY: f64 = 0.90;
+
+/// The graph type to use for dandelion routing, the dandelion paper recommends [Graph::FourRegular].
+///
+/// The decision between line graphs and 4-regular graphs depend on the priorities of the system, if
+/// linkability of transactions is a first order concern then line graphs may be better, however 4-regular graphs
+/// can give constant-order privacy benefits against adversaries with knowledge of the graph.
+///
+/// See appendix C of the dandelion++ paper.
+#[derive(Default, Debug, Copy, Clone)]
+pub enum Graph {
+    /// Line graph.
+    ///
+    /// When this is selected one peer will be chosen from the outbound peers each epoch to route transactions
+    /// to.
+    ///
+    /// In general this is not recommend over [`Graph::FourRegular`] but may be better for certain systems.
+    Line,
+    /// Quasi-4-Regular.
+    ///
+    /// When this is selected two peers will be chosen from the outbound peers each epoch, each stem transaction
+    /// received will then be sent to one of these two peers. Transactions from the same node will always go to the
+    /// same peer.
+    #[default]
+    FourRegular,
+}
+
+/// The config used to initialize dandelion.
+///
+/// One notable missing item from the config is `Tbase` AKA the timeout parameter to prevent black hole
+/// attacks. This is removed from the config for simplicity, `Tbase` is calculated using the formula provided
+/// in the D++ paper:
+///
+///  `(-k*(k-1)*hop)/(2*log(1-ep))`
+///
+/// Where `k` is calculated from the fluff probability, `hop` is `time_between_hop` and `ep` is fixed at `0.1`.
+///
+#[derive(Debug, Clone, Copy)]
+pub struct DandelionConfig {
+    /// The time it takes for a stem transaction to pass through a node, including network latency.
+    ///
+    /// It's better to be safe and put a slightly higher value than lower.
+    pub time_between_hop: Duration,
+    /// The duration of an epoch.
+    pub epoch_duration: Duration,
+    /// `q` in the dandelion paper, this is the probability that a node will be in the fluff state for
+    /// a certain epoch.
+    ///
+    /// The dandelion paper recommends to make this value small, but the smaller this value, the higher
+    /// the broadcast latency.
+    ///
+    /// It is recommended for this value to be <= `0.2`, this value *MUST* be in range `0.0..=1.0`.
+    pub fluff_probability: f64,
+    /// The graph type.
+    pub graph: Graph,
+}
+
+impl DandelionConfig {
+    /// Returns the number of outbound peers to use to stem transactions.
+    ///
+    /// This value depends on the [`Graph`] chosen.
+    pub fn number_of_stems(&self) -> usize {
+        match self.graph {
+            Graph::Line => 1,
+            Graph::FourRegular => 2,
+        }
+    }
+
+    /// Returns the average embargo timeout, `Tbase` in the dandelion++ paper.
+    ///
+    /// This is the average embargo timeout _only including this node_ with `k` nodes also putting an embargo timeout
+    /// using the exponential distribution, the average until one of them fluffs is `Tbase / k`.
+    pub fn average_embargo_timeout(&self) -> Duration {
+        // we set k equal to the expected stem length with this fluff probability.
+        let k = self.expected_stem_length();
+        let time_between_hop = self.time_between_hop.as_secs_f64();
+
+        Duration::from_secs_f64(
+            // (-k*(k-1)*hop)/(2*ln(1-ep))
+            ((k.neg() * (k - 1.0) * time_between_hop)
+                / EMBARGO_FULL_TRAVEL_PROBABILITY.ln().mul(2.0))
+            .ceil(),
+        )
+    }
+
+    /// Returns the expected length of a stem.
+    pub fn expected_stem_length(&self) -> f64 {
+        self.fluff_probability.recip()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::{
+        f64::consts::E,
+        ops::{Mul, Neg},
+        time::Duration,
+    };
+
+    use proptest::{prop_assert, proptest};
+
+    use super::*;
+
+    #[test]
+    fn monerod_average_embargo_timeout() {
+        let cfg = DandelionConfig {
+            time_between_hop: Duration::from_millis(175),
+            epoch_duration: Default::default(),
+            fluff_probability: 0.125,
+            graph: Default::default(),
+        };
+
+        assert_eq!(cfg.average_embargo_timeout(), Duration::from_secs(47));
+    }
+
+    proptest! {
+        #[test]
+        fn embargo_full_travel_probablity_correct(time_between_hop in 1_u64..1_000_000, fluff_probability in 0.000001..1.0) {
+            let cfg = DandelionConfig {
+                time_between_hop: Duration::from_millis(time_between_hop),
+                epoch_duration: Default::default(),
+                fluff_probability,
+                graph: Default::default(),
+            };
+
+            // assert that the `average_embargo_timeout` is high enough that the probability of `k` nodes
+            // not diffusing before expected diffusion is greater than or equal to `EMBARGO_FULL_TRAVEL_PROBABLY`
+            //
+            // using the formula from in appendix B.5
+            let k = cfg.expected_stem_length();
+            let time_between_hop = cfg.time_between_hop.as_secs_f64();
+
+            let average_embargo_timeout = cfg.average_embargo_timeout().as_secs_f64();
+
+            let probability =
+                E.powf((k.neg() * (k - 1.0) * time_between_hop) / average_embargo_timeout.mul(2.0));
+
+            prop_assert!(probability >= EMBARGO_FULL_TRAVEL_PROBABILITY, "probability = {probability}, average_embargo_timeout = {average_embargo_timeout}");
+        }
+    }
+}
diff --git a/p2p/dandelion/src/lib.rs b/p2p/dandelion/src/lib.rs
new file mode 100644
index 00000000..f162724f
--- /dev/null
+++ b/p2p/dandelion/src/lib.rs
@@ -0,0 +1,70 @@
+//! # Dandelion Tower
+//!
+//! This crate implements [dandelion++](https://arxiv.org/pdf/1805.11060.pdf), using [`tower`].
+//!  
+//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPool`](pool::DandelionPool).
+//! The router is pretty minimal and only handles the absolute necessary data to route transactions, whereas the
+//! pool keeps track of all data necessary for dandelion++ but requires you to provide a backing tx-pool.
+//!
+//! This split was done not because the [`DandelionPool`](pool::DandelionPool) is unnecessary but because it is hard
+//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPool`](pool::DandelionPool)
+//! requires you to implement part of the paper yourself.
+//!
+//! # Features
+//!
+//! This crate only has one feature `txpool` which enables [`DandelionPool`](pool::DandelionPool).
+//!
+//! # Needed Services
+//!
+//! To use this crate you need to provide a few types.
+//!
+//! ## Diffuse Service
+//!
+//! This service should implement diffusion, which is sending the transaction to every peer, with each peer
+//! having a timer using the exponential distribution and batch sending all txs that were queued in that time.
+//!
+//! The diffuse service should have a request of [`DiffuseRequest`](traits::DiffuseRequest) and it's error
+//! should be [`tower::BoxError`].
+//!
+//! ## Outbound Peer Discoverer
+//!
+//! The outbound peer [`Discover`](tower::discover::Discover) should provide a stream of randomly selected outbound
+//! peers, these peers will then be used to route stem txs to.
+//!
+//! The peers will not be returned anywhere, so it is recommended to wrap them in some sort of drop guard that returns
+//! them back to a peer set.
+//!
+//! ## Peer Service
+//!
+//! This service represents a connection to an individual peer, this should be returned from the Outbound Peer
+//! Discover. This should immediately send the transaction to the peer when requested, i.e. it should _not_ set
+//! a timer.
+//!
+//! The diffuse service should have a request of [`StemRequest`](traits::StemRequest) and it's error
+//! should be [`tower::BoxError`].
+//!
+//! ## Backing Pool
+//!
+//! ([`DandelionPool`](pool::DandelionPool) only)
+//!
+//! This service is a backing tx-pool, in memory or on disk.
+//! The backing pool should have a request of [`TxStoreRequest`](traits::TxStoreRequest) and a response of
+//! [`TxStoreResponse`](traits::TxStoreResponse), with an error of [`tower::BoxError`].
+//!
+//! Users should keep a handle to the backing pool to request data from it, when requesting data you _must_
+//! make sure you only look in the public pool if you are going to be giving data to peers, as stem transactions
+//! must stay private.
+//!
+//! When removing data, for example because of a new block, you can remove from both pools provided it doesn't leak
+//! any data about stem transactions. You will probably want to set up a task that monitors the tx pool for stuck transactions,
+//! transactions that slipped in just as one was removed etc, this crate does not handle that.
+mod config;
+#[cfg(feature = "txpool")]
+pub mod pool;
+mod router;
+#[cfg(test)]
+mod tests;
+pub mod traits;
+
+pub use config::*;
+pub use router::*;
diff --git a/p2p/dandelion/src/pool.rs b/p2p/dandelion/src/pool.rs
new file mode 100644
index 00000000..eddcc670
--- /dev/null
+++ b/p2p/dandelion/src/pool.rs
@@ -0,0 +1,510 @@
+//! # Dandelion++ Pool
+//!
+//! This module contains [`DandelionPool`] which is a thin wrapper around a backing transaction store,
+//! which fully implements the dandelion++ protocol.
+//!
+//! ### How To Get Txs From [`DandelionPool`].
+//!
+//! [`DandelionPool`] does not provide a full tx-pool API. You cannot retrieve transactions from it or
+//! check what transactions are in it, to do this you must keep a handle to the backing transaction store
+//! yourself.
+//!
+//! The reason for this is, the [`DandelionPool`] will only itself be passing these requests onto the backing
+//! pool, so it makes sense to remove the "middle man".
+//!
+//! ### Keep Stem Transactions Hidden
+//!
+//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden.
+//! So handle any requests to the tx-pool like the stem side of the pool does not exist.
+//!
+use std::{
+    collections::{HashMap, HashSet},
+    future::Future,
+    hash::Hash,
+    marker::PhantomData,
+    pin::Pin,
+    task::{Context, Poll},
+    time::Duration,
+};
+
+use futures::{FutureExt, StreamExt};
+use rand::prelude::*;
+use rand_distr::Exp;
+use tokio::{
+    sync::{mpsc, oneshot},
+    task::JoinSet,
+};
+use tokio_util::{sync::PollSender, time::DelayQueue};
+use tower::{Service, ServiceExt};
+use tracing::Instrument;
+
+use crate::{
+    traits::{TxStoreRequest, TxStoreResponse},
+    DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState,
+};
+
+/// Start the [`DandelionPool`].
+///
+/// This function spawns the [`DandelionPool`] and returns [`DandelionPoolService`] which can be used to send
+/// requests to the pool.
+///
+/// ### Args
+///
+/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPool`].
+/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow
+/// user to customise routing functionality.
+/// - `backing_pool` is the backing transaction storage service
+/// - `config` is [`DandelionConfig`].
+pub fn start_dandelion_pool<P, R, Tx, TxID, PID>(
+    buffer_size: usize,
+    dandelion_router: R,
+    backing_pool: P,
+    config: DandelionConfig,
+) -> DandelionPoolService<Tx, TxID, PID>
+where
+    Tx: Clone + Send + 'static,
+    TxID: Hash + Eq + Clone + Send + 'static,
+    PID: Hash + Eq + Clone + Send + 'static,
+    P: Service<
+            TxStoreRequest<Tx, TxID>,
+            Response = TxStoreResponse<Tx, TxID>,
+            Error = tower::BoxError,
+        > + Send
+        + 'static,
+    P::Future: Send + 'static,
+    R: Service<DandelionRouteReq<Tx, PID>, Response = State, Error = DandelionRouterError>
+        + Send
+        + 'static,
+    R::Future: Send + 'static,
+{
+    let (tx, rx) = mpsc::channel(buffer_size);
+
+    let pool = DandelionPool {
+        dandelion_router,
+        backing_pool,
+        routing_set: JoinSet::new(),
+        stem_origins: HashMap::new(),
+        embargo_timers: DelayQueue::new(),
+        embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(),
+        config,
+        _tx: PhantomData,
+    };
+
+    let span = tracing::debug_span!("dandelion_pool");
+
+    tokio::spawn(pool.run(rx).instrument(span));
+
+    DandelionPoolService {
+        tx: PollSender::new(tx),
+    }
+}
+
+#[derive(Copy, Clone, Debug, thiserror::Error)]
+#[error("The dandelion pool was shutdown")]
+pub struct DandelionPoolShutDown;
+
+/// An incoming transaction for the [`DandelionPool`] to handle.
+///
+/// Users may notice there is no way to check if the dandelion-pool wants a tx according to an inventory message like seen
+/// in Bitcoin, only having a request for a full tx. Users should look in the *public* backing pool to handle inv messages,
+/// and request txs even if they are in the stem pool.
+pub struct IncomingTx<Tx, TxID, PID> {
+    /// The transaction.
+    ///
+    /// It is recommended to put this in an [`Arc`](std::sync::Arc) as it needs to be cloned to send to the backing
+    /// tx pool and [`DandelionRouter`](crate::DandelionRouter)
+    pub tx: Tx,
+    /// The transaction ID.
+    pub tx_id: TxID,
+    /// The routing state of this transaction.
+    pub tx_state: TxState<PID>,
+}
+
+/// The dandelion tx pool service.
+#[derive(Clone)]
+pub struct DandelionPoolService<Tx, TxID, PID> {
+    /// The channel to [`DandelionPool`].
+    tx: PollSender<(IncomingTx<Tx, TxID, PID>, oneshot::Sender<()>)>,
+}
+
+impl<Tx, TxID, PID> Service<IncomingTx<Tx, TxID, PID>> for DandelionPoolService<Tx, TxID, PID>
+where
+    Tx: Clone + Send,
+    TxID: Hash + Eq + Clone + Send + 'static,
+    PID: Hash + Eq + Clone + Send + 'static,
+{
+    type Response = ();
+    type Error = DandelionPoolShutDown;
+    type Future =
+        Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
+
+    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+        self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown)
+    }
+
+    fn call(&mut self, req: IncomingTx<Tx, TxID, PID>) -> Self::Future {
+        // although the channel isn't sending anything we want to wait for the request to be handled before continuing.
+        let (tx, rx) = oneshot::channel();
+
+        let res = self
+            .tx
+            .send_item((req, tx))
+            .map_err(|_| DandelionPoolShutDown);
+
+        async move {
+            res?;
+            rx.await.expect("Oneshot dropped before response!");
+
+            Ok(())
+        }
+        .boxed()
+    }
+}
+
+/// The dandelion++ tx pool.
+///
+/// See the [module docs](self) for more.
+pub struct DandelionPool<P, R, Tx, TxID, PID> {
+    /// The dandelion++ router
+    dandelion_router: R,
+    /// The backing tx storage.
+    backing_pool: P,
+    /// The set of tasks that are running the future returned from `dandelion_router`.
+    routing_set: JoinSet<(TxID, Result<State, TxState<PID>>)>,
+
+    /// The origin of stem transactions.
+    stem_origins: HashMap<TxID, HashSet<PID>>,
+
+    /// Current stem pool embargo timers.
+    embargo_timers: DelayQueue<TxID>,
+    /// The distrobution to sample to get embargo timers.
+    embargo_dist: Exp<f64>,
+
+    /// The d++ config.
+    config: DandelionConfig,
+
+    _tx: PhantomData<Tx>,
+}
+
+impl<P, R, Tx, TxID, PID> DandelionPool<P, R, Tx, TxID, PID>
+where
+    Tx: Clone + Send,
+    TxID: Hash + Eq + Clone + Send + 'static,
+    PID: Hash + Eq + Clone + Send + 'static,
+    P: Service<
+        TxStoreRequest<Tx, TxID>,
+        Response = TxStoreResponse<Tx, TxID>,
+        Error = tower::BoxError,
+    >,
+    P::Future: Send + 'static,
+    R: Service<DandelionRouteReq<Tx, PID>, Response = State, Error = DandelionRouterError>,
+    R::Future: Send + 'static,
+{
+    /// Stores the tx in the backing pools stem pool, setting the embargo timer, stem origin and steming the tx.
+    async fn store_tx_and_stem(
+        &mut self,
+        tx: Tx,
+        tx_id: TxID,
+        from: Option<PID>,
+    ) -> Result<(), tower::BoxError> {
+        self.backing_pool
+            .ready()
+            .await?
+            .call(TxStoreRequest::Store(
+                tx.clone(),
+                tx_id.clone(),
+                State::Stem,
+            ))
+            .await?;
+
+        let embargo_timer = self.embargo_dist.sample(&mut thread_rng());
+        tracing::debug!(
+            "Setting embargo timer for stem tx: {} seconds.",
+            embargo_timer
+        );
+        self.embargo_timers
+            .insert(tx_id.clone(), Duration::from_secs_f64(embargo_timer));
+
+        self.stem_tx(tx, tx_id, from).await
+    }
+
+    /// Stems the tx, setting the stem origin, if it wasn't already set.
+    ///
+    /// This function does not add the tx to the backing pool.
+    async fn stem_tx(
+        &mut self,
+        tx: Tx,
+        tx_id: TxID,
+        from: Option<PID>,
+    ) -> Result<(), tower::BoxError> {
+        if let Some(peer) = &from {
+            self.stem_origins
+                .entry(tx_id.clone())
+                .or_default()
+                .insert(peer.clone());
+        }
+
+        let state = from
+            .map(|from| TxState::Stem { from })
+            .unwrap_or(TxState::Local);
+
+        let fut = self
+            .dandelion_router
+            .ready()
+            .await?
+            .call(DandelionRouteReq {
+                tx,
+                state: state.clone(),
+            });
+
+        self.routing_set
+            .spawn(fut.map(|res| (tx_id, res.map_err(|_| state))));
+        Ok(())
+    }
+
+    /// Stores the tx in the backing pool and fluffs the tx, removing the stem data for this tx.
+    async fn store_and_fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> {
+        // fluffs the tx first to prevent timing attacks where we could fluff at different average times
+        // depending on if the tx was in the stem pool already or not.
+        // Massively overkill but this is a minimal change.
+        self.fluff_tx(tx.clone(), tx_id.clone()).await?;
+
+        // Remove the tx from the maps used during the stem phase.
+        self.stem_origins.remove(&tx_id);
+
+        self.backing_pool
+            .ready()
+            .await?
+            .call(TxStoreRequest::Store(tx, tx_id, State::Fluff))
+            .await?;
+
+        // The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
+        // map. These timers should be relatively short, so it shouldn't be a problem.
+        //self.embargo_timers.try_remove(&tx_id);
+
+        Ok(())
+    }
+
+    /// Fluffs a tx, does not add the tx to the tx pool.
+    async fn fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> {
+        let fut = self
+            .dandelion_router
+            .ready()
+            .await?
+            .call(DandelionRouteReq {
+                tx,
+                state: TxState::Fluff,
+            });
+
+        self.routing_set
+            .spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff))));
+        Ok(())
+    }
+
+    /// Function to handle an incoming [`DandelionPoolRequest::IncomingTx`].
+    async fn handle_incoming_tx(
+        &mut self,
+        tx: Tx,
+        tx_state: TxState<PID>,
+        tx_id: TxID,
+    ) -> Result<(), tower::BoxError> {
+        let TxStoreResponse::Contains(have_tx) = self
+            .backing_pool
+            .ready()
+            .await?
+            .call(TxStoreRequest::Contains(tx_id.clone()))
+            .await?
+        else {
+            panic!("Backing tx pool responded with wrong response for request.");
+        };
+        // If we have already fluffed this tx then we don't need to do anything.
+        if have_tx == Some(State::Fluff) {
+            tracing::debug!("Already fluffed incoming tx, ignoring.");
+            return Ok(());
+        }
+
+        match tx_state {
+            TxState::Stem { from } => {
+                if self
+                    .stem_origins
+                    .get(&tx_id)
+                    .is_some_and(|peers| peers.contains(&from))
+                {
+                    tracing::debug!("Received stem tx twice from same peer, fluffing it");
+                    // The same peer sent us a tx twice, fluff it.
+                    self.promote_and_fluff_tx(tx_id).await
+                } else {
+                    // This could be a new tx or it could have already been stemed, but we still stem it again
+                    // unless the same peer sends us a tx twice.
+                    tracing::debug!("Steming incoming tx");
+                    self.store_tx_and_stem(tx, tx_id, Some(from)).await
+                }
+            }
+            TxState::Fluff => {
+                tracing::debug!("Fluffing incoming tx");
+                self.store_and_fluff_tx(tx, tx_id).await
+            }
+            TxState::Local => {
+                // If we have already stemed this tx then nothing to do.
+                if have_tx.is_some() {
+                    tracing::debug!("Received a local tx that we already have, skipping");
+                    return Ok(());
+                }
+                tracing::debug!("Steming local transaction");
+                self.store_tx_and_stem(tx, tx_id, None).await
+            }
+        }
+    }
+
+    /// Promotes a tx to the clear pool.
+    async fn promote_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> {
+        // Remove the tx from the maps used during the stem phase.
+        self.stem_origins.remove(&tx_id);
+
+        // The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
+        // map. These timers should be relatively short, so it shouldn't be a problem.
+        //self.embargo_timers.try_remove(&tx_id);
+
+        self.backing_pool
+            .ready()
+            .await?
+            .call(TxStoreRequest::Promote(tx_id))
+            .await?;
+
+        Ok(())
+    }
+
+    /// Promotes a tx to the public fluff pool and fluffs the tx.
+    async fn promote_and_fluff_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> {
+        tracing::debug!("Promoting transaction to public pool and fluffing it.");
+
+        let TxStoreResponse::Transaction(tx) = self
+            .backing_pool
+            .ready()
+            .await?
+            .call(TxStoreRequest::Get(tx_id.clone()))
+            .await?
+        else {
+            panic!("Backing tx pool responded with wrong response for request.");
+        };
+
+        let Some((tx, state)) = tx else {
+            tracing::debug!("Could not find tx, skipping.");
+            return Ok(());
+        };
+
+        if state == State::Fluff {
+            tracing::debug!("Transaction already fluffed, skipping.");
+            return Ok(());
+        }
+
+        self.promote_tx(tx_id.clone()).await?;
+        self.fluff_tx(tx, tx_id).await
+    }
+
+    /// Returns a tx stored in the fluff _OR_ stem pool.
+    async fn get_tx_from_pool(&mut self, tx_id: TxID) -> Result<Option<Tx>, tower::BoxError> {
+        let TxStoreResponse::Transaction(tx) = self
+            .backing_pool
+            .ready()
+            .await?
+            .call(TxStoreRequest::Get(tx_id))
+            .await?
+        else {
+            panic!("Backing tx pool responded with wrong response for request.");
+        };
+
+        Ok(tx.map(|tx| tx.0))
+    }
+
+    /// Starts the [`DandelionPool`].
+    async fn run(
+        mut self,
+        mut rx: mpsc::Receiver<(IncomingTx<Tx, TxID, PID>, oneshot::Sender<()>)>,
+    ) {
+        tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config);
+
+        // On start up we just fluff all txs left in the stem pool.
+        let Ok(TxStoreResponse::IDs(ids)) = (&mut self.backing_pool)
+            .oneshot(TxStoreRequest::IDsInStemPool)
+            .await
+        else {
+            tracing::error!("Failed to get transactions in stem pool.");
+            return;
+        };
+
+        tracing::debug!(
+            "Fluffing {} txs that are currently in the stem pool",
+            ids.len()
+        );
+
+        for id in ids {
+            if let Err(e) = self.promote_and_fluff_tx(id).await {
+                tracing::error!("Failed to fluff tx in the stem pool at start up, {e}.");
+                return;
+            }
+        }
+
+        loop {
+            tracing::trace!("Waiting for next event.");
+            tokio::select! {
+                // biased to handle current txs before routing new ones.
+                biased;
+                Some(fired) = self.embargo_timers.next() => {
+                    tracing::debug!("Embargo timer fired, did not see stem tx in time.");
+
+                    let tx_id = fired.into_inner();
+                    if let Err(e) = self.promote_and_fluff_tx(tx_id).await {
+                        tracing::error!("Error handling fired embargo timer: {e}");
+                        return;
+                    }
+                }
+                Some(Ok((tx_id, res))) = self.routing_set.join_next() => {
+                    tracing::trace!("Received d++ routing result.");
+
+                    let res = match res {
+                        Ok(State::Fluff) => {
+                            tracing::debug!("Transaction was fluffed upgrading it to the public pool.");
+                            self.promote_tx(tx_id).await
+                        }
+                        Err(tx_state) => {
+                            tracing::debug!("Error routing transaction, trying again.");
+
+                            match self.get_tx_from_pool(tx_id.clone()).await {
+                                Ok(Some(tx)) => match tx_state {
+                                    TxState::Fluff => self.fluff_tx(tx, tx_id).await,
+                                    TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await,
+                                    TxState::Local => self.stem_tx(tx, tx_id, None).await,
+                                }
+                                Err(e) => Err(e),
+                                _ => continue,
+                            }
+                        }
+                        Ok(State::Stem) => continue,
+                    };
+
+                    if let Err(e) = res {
+                        tracing::error!("Error handling transaction routing return: {e}");
+                        return;
+                    }
+                }
+                req = rx.recv() => {
+                    tracing::debug!("Received new tx to route.");
+
+                    let Some((IncomingTx { tx, tx_state, tx_id }, res_tx)) = req else {
+                        return;
+                    };
+
+                    if let Err(e) = self.handle_incoming_tx(tx, tx_state, tx_id).await {
+                        let _ = res_tx.send(());
+
+                        tracing::error!("Error handling transaction in dandelion pool: {e}");
+                        return;
+                    }
+                    let _ = res_tx.send(());
+
+                }
+            }
+        }
+    }
+}
diff --git a/p2p/dandelion/src/router.rs b/p2p/dandelion/src/router.rs
new file mode 100644
index 00000000..61e962c3
--- /dev/null
+++ b/p2p/dandelion/src/router.rs
@@ -0,0 +1,348 @@
+//! # Dandelion++ Router
+//!
+//! This module contains [`DandelionRouter`] which is a [`Service`]. It that handles keeping the
+//! current dandelion++ [`State`] and deciding where to send transactions based on their [`TxState`].
+//!
+//! ### What The Router Does Not Do
+//!
+//! It does not handle anything to do with keeping transactions long term, i.e. embargo timers and handling
+//! loops in the stem. It is up to implementers to do this if they decide not top use [`DandelionPool`](crate::pool::DandelionPool)
+//!
+use std::{
+    collections::HashMap,
+    future::Future,
+    hash::Hash,
+    marker::PhantomData,
+    pin::Pin,
+    task::{ready, Context, Poll},
+    time::Instant,
+};
+
+use futures::TryFutureExt;
+use rand::{distributions::Bernoulli, prelude::*, thread_rng};
+use tower::{
+    discover::{Change, Discover},
+    Service,
+};
+
+use crate::{
+    traits::{DiffuseRequest, StemRequest},
+    DandelionConfig,
+};
+
+/// An error returned from the [`DandelionRouter`]
+#[derive(thiserror::Error, Debug)]
+pub enum DandelionRouterError {
+    /// This error is probably recoverable so the request should be retried.
+    #[error("Peer chosen to route stem txs to had an err: {0}.")]
+    PeerError(tower::BoxError),
+    /// The broadcast service returned an error.
+    #[error("Broadcast service returned an err: {0}.")]
+    BroadcastError(tower::BoxError),
+    /// The outbound peer discoverer returned an error, this is critical.
+    #[error("The outbound peer discoverer returned an err: {0}.")]
+    OutboundPeerDiscoverError(tower::BoxError),
+    /// The outbound peer discoverer returned [`None`].
+    #[error("The outbound peer discoverer exited.")]
+    OutboundPeerDiscoverExited,
+}
+
+/// The dandelion++ state.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum State {
+    /// Fluff state, in this state we are diffusing stem transactions to all peers.
+    Fluff,
+    /// Stem state, in this state we are stemming stem transactions to a single outbound peer.
+    Stem,
+}
+
+/// The routing state of a transaction.
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub enum TxState<ID> {
+    /// Fluff state.
+    Fluff,
+    /// Stem state.
+    Stem {
+        /// The peer who sent us this transaction's ID.
+        from: ID,
+    },
+    /// Local - the transaction originated from our node.
+    Local,
+}
+
+/// A request to route a transaction.
+pub struct DandelionRouteReq<Tx, ID> {
+    /// The transaction.
+    pub tx: Tx,
+    /// The transaction state.
+    pub state: TxState<ID>,
+}
+
+/// The dandelion router service.
+pub struct DandelionRouter<P, B, ID, S, Tx> {
+    // pub(crate) is for tests
+    /// A [`Discover`] where we can get outbound peers from.
+    outbound_peer_discover: Pin<Box<P>>,
+    /// A [`Service`] which handle broadcasting (diffusing) transactions.
+    broadcast_svc: B,
+
+    /// The current state.
+    current_state: State,
+    /// The time at which this epoch started.
+    epoch_start: Instant,
+
+    /// The stem our local transactions will be sent to.
+    local_route: Option<ID>,
+    /// A [`HashMap`] linking peer's IDs to IDs in `stem_peers`.
+    stem_routes: HashMap<ID, ID>,
+    /// Peers we are using for stemming.
+    ///
+    /// This will contain peers, even in [`State::Fluff`] to allow us to stem [`TxState::Local`]
+    /// transactions.
+    pub(crate) stem_peers: HashMap<ID, S>,
+
+    /// The distribution to sample to get the [`State`], true is [`State::Fluff`].
+    state_dist: Bernoulli,
+
+    /// The config.
+    config: DandelionConfig,
+
+    /// The routers tracing span.
+    span: tracing::Span,
+
+    _tx: PhantomData<Tx>,
+}
+
+impl<Tx, ID, P, B, S> DandelionRouter<P, B, ID, S, Tx>
+where
+    ID: Hash + Eq + Clone,
+    P: Discover<Key = ID, Service = S, Error = tower::BoxError>,
+    B: Service<DiffuseRequest<Tx>, Error = tower::BoxError>,
+    S: Service<StemRequest<Tx>, Error = tower::BoxError>,
+{
+    /// Creates a new [`DandelionRouter`], with the provided services and config.
+    ///
+    /// # Panics
+    /// This function panics if [`DandelionConfig::fluff_probability`] is not `0.0..=1.0`.
+    pub fn new(broadcast_svc: B, outbound_peer_discover: P, config: DandelionConfig) -> Self {
+        // get the current state
+        let state_dist = Bernoulli::new(config.fluff_probability)
+            .expect("Fluff probability was not between 0 and 1");
+
+        let current_state = if state_dist.sample(&mut thread_rng()) {
+            State::Fluff
+        } else {
+            State::Stem
+        };
+
+        DandelionRouter {
+            outbound_peer_discover: Box::pin(outbound_peer_discover),
+            broadcast_svc,
+            current_state,
+            epoch_start: Instant::now(),
+            local_route: None,
+            stem_routes: HashMap::new(),
+            stem_peers: HashMap::new(),
+            state_dist,
+            config,
+            span: tracing::debug_span!("dandelion_router", state = ?current_state),
+            _tx: PhantomData,
+        }
+    }
+
+    /// This function gets the number of outbound peers from the [`Discover`] required for the selected [`Graph`](crate::Graph).
+    fn poll_prepare_graph(
+        &mut self,
+        cx: &mut Context<'_>,
+    ) -> Poll<Result<(), DandelionRouterError>> {
+        let peers_needed = match self.current_state {
+            State::Stem => self.config.number_of_stems(),
+            // When in the fluff state we only need one peer, the one for our txs.
+            State::Fluff => 1,
+        };
+
+        while self.stem_peers.len() < peers_needed {
+            match ready!(self
+                .outbound_peer_discover
+                .as_mut()
+                .poll_discover(cx)
+                .map_err(DandelionRouterError::OutboundPeerDiscoverError))
+            .ok_or(DandelionRouterError::OutboundPeerDiscoverExited)??
+            {
+                Change::Insert(key, svc) => {
+                    self.stem_peers.insert(key, svc);
+                }
+                Change::Remove(key) => {
+                    self.stem_peers.remove(&key);
+                }
+            }
+        }
+
+        Poll::Ready(Ok(()))
+    }
+
+    fn fluff_tx(&mut self, tx: Tx) -> B::Future {
+        self.broadcast_svc.call(DiffuseRequest(tx))
+    }
+
+    fn stem_tx(&mut self, tx: Tx, from: ID) -> S::Future {
+        loop {
+            let stem_route = self.stem_routes.entry(from.clone()).or_insert_with(|| {
+                self.stem_peers
+                    .iter()
+                    .choose(&mut thread_rng())
+                    .expect("No peers in `stem_peers` was poll_ready called?")
+                    .0
+                    .clone()
+            });
+
+            let Some(peer) = self.stem_peers.get_mut(stem_route) else {
+                self.stem_routes.remove(&from);
+                continue;
+            };
+
+            return peer.call(StemRequest(tx));
+        }
+    }
+
+    fn stem_local_tx(&mut self, tx: Tx) -> S::Future {
+        loop {
+            let stem_route = self.local_route.get_or_insert_with(|| {
+                self.stem_peers
+                    .iter()
+                    .choose(&mut thread_rng())
+                    .expect("No peers in `stem_peers` was poll_ready called?")
+                    .0
+                    .clone()
+            });
+
+            let Some(peer) = self.stem_peers.get_mut(stem_route) else {
+                self.local_route.take();
+                continue;
+            };
+
+            return peer.call(StemRequest(tx));
+        }
+    }
+}
+
+/*
+## Generics ##
+
+Tx: The tx type
+ID: Peer Id type - unique identifier for nodes.
+P: Peer Set discover - where we can get outbound peers from
+B: Broadcast service - where we send txs to get diffused.
+S: The Peer service - handles routing messages to a single node.
+ */
+impl<Tx, ID, P, B, S> Service<DandelionRouteReq<Tx, ID>> for DandelionRouter<P, B, ID, S, Tx>
+where
+    ID: Hash + Eq + Clone,
+    P: Discover<Key = ID, Service = S, Error = tower::BoxError>,
+    B: Service<DiffuseRequest<Tx>, Error = tower::BoxError>,
+    B::Future: Send + 'static,
+    S: Service<StemRequest<Tx>, Error = tower::BoxError>,
+    S::Future: Send + 'static,
+{
+    type Response = State;
+    type Error = DandelionRouterError;
+    type Future =
+        Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
+
+    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+        if self.epoch_start.elapsed() > self.config.epoch_duration {
+            // clear all the stem routing data.
+            self.stem_peers.clear();
+            self.stem_routes.clear();
+            self.local_route.take();
+
+            self.current_state = if self.state_dist.sample(&mut thread_rng()) {
+                State::Fluff
+            } else {
+                State::Stem
+            };
+
+            self.span
+                .record("state", format!("{:?}", self.current_state));
+            tracing::debug!(parent: &self.span, "Starting new d++ epoch",);
+
+            self.epoch_start = Instant::now();
+        }
+
+        let mut peers_pending = false;
+
+        let span = &self.span;
+
+        self.stem_peers
+            .retain(|_, peer_svc| match peer_svc.poll_ready(cx) {
+                Poll::Ready(res) => res
+                    .inspect_err(|e| {
+                        tracing::debug!(
+                            parent: span,
+                            "Peer returned an error on `poll_ready`: {e}, removing from router.",
+                        )
+                    })
+                    .is_ok(),
+                Poll::Pending => {
+                    // Pending peers should be kept - they have not errored yet.
+                    peers_pending = true;
+                    true
+                }
+            });
+
+        if peers_pending {
+            return Poll::Pending;
+        }
+
+        // now we have removed the failed peers check if we still have enough for the graph chosen.
+        ready!(self.poll_prepare_graph(cx)?);
+
+        ready!(self
+            .broadcast_svc
+            .poll_ready(cx)
+            .map_err(DandelionRouterError::BroadcastError)?);
+
+        Poll::Ready(Ok(()))
+    }
+
+    fn call(&mut self, req: DandelionRouteReq<Tx, ID>) -> Self::Future {
+        tracing::trace!(parent: &self.span,  "Handling route request.");
+
+        match req.state {
+            TxState::Fluff => Box::pin(
+                self.fluff_tx(req.tx)
+                    .map_ok(|_| State::Fluff)
+                    .map_err(DandelionRouterError::BroadcastError),
+            ),
+            TxState::Stem { from } => match self.current_state {
+                State::Fluff => {
+                    tracing::debug!(parent: &self.span, "Fluffing stem tx.");
+
+                    Box::pin(
+                        self.fluff_tx(req.tx)
+                            .map_ok(|_| State::Fluff)
+                            .map_err(DandelionRouterError::BroadcastError),
+                    )
+                }
+                State::Stem => {
+                    tracing::trace!(parent: &self.span, "Steming transaction");
+
+                    Box::pin(
+                        self.stem_tx(req.tx, from)
+                            .map_ok(|_| State::Stem)
+                            .map_err(DandelionRouterError::PeerError),
+                    )
+                }
+            },
+            TxState::Local => {
+                tracing::debug!(parent: &self.span, "Steming local tx.");
+
+                Box::pin(
+                    self.stem_local_tx(req.tx)
+                        .map_ok(|_| State::Stem)
+                        .map_err(DandelionRouterError::PeerError),
+                )
+            }
+        }
+    }
+}
diff --git a/p2p/dandelion/src/tests/mod.rs b/p2p/dandelion/src/tests/mod.rs
new file mode 100644
index 00000000..1f3ba3e8
--- /dev/null
+++ b/p2p/dandelion/src/tests/mod.rs
@@ -0,0 +1,130 @@
+mod pool;
+mod router;
+
+use std::{collections::HashMap, future::Future, hash::Hash, sync::Arc};
+
+use futures::TryStreamExt;
+use tokio::sync::mpsc::{self, UnboundedReceiver};
+use tower::{
+    discover::{Discover, ServiceList},
+    util::service_fn,
+    Service, ServiceExt,
+};
+
+use crate::{
+    traits::{TxStoreRequest, TxStoreResponse},
+    State,
+};
+
+pub fn mock_discover_svc<Req: Send + 'static>() -> (
+    impl Discover<
+        Key = usize,
+        Service = impl Service<
+            Req,
+            Future = impl Future<Output = Result<(), tower::BoxError>> + Send + 'static,
+            Error = tower::BoxError,
+        > + Send
+                      + 'static,
+        Error = tower::BoxError,
+    >,
+    UnboundedReceiver<(u64, Req)>,
+) {
+    let (tx, rx) = mpsc::unbounded_channel();
+
+    let discover = ServiceList::new((0..).map(move |i| {
+        let tx_2 = tx.clone();
+
+        service_fn(move |req| {
+            tx_2.send((i, req)).unwrap();
+
+            async move { Ok::<(), tower::BoxError>(()) }
+        })
+    }))
+    .map_err(Into::into);
+
+    (discover, rx)
+}
+
+pub fn mock_broadcast_svc<Req: Send + 'static>() -> (
+    impl Service<
+            Req,
+            Future = impl Future<Output = Result<(), tower::BoxError>> + Send + 'static,
+            Error = tower::BoxError,
+        > + Send
+        + 'static,
+    UnboundedReceiver<Req>,
+) {
+    let (tx, rx) = mpsc::unbounded_channel();
+
+    (
+        service_fn(move |req| {
+            tx.send(req).unwrap();
+
+            async move { Ok::<(), tower::BoxError>(()) }
+        }),
+        rx,
+    )
+}
+
+#[allow(clippy::type_complexity)] // just test code.
+pub fn mock_in_memory_backing_pool<
+    Tx: Clone + Send + 'static,
+    TxID: Clone + Hash + Eq + Send + 'static,
+>() -> (
+    impl Service<
+            TxStoreRequest<Tx, TxID>,
+            Response = TxStoreResponse<Tx, TxID>,
+            Future = impl Future<Output = Result<TxStoreResponse<Tx, TxID>, tower::BoxError>>
+                         + Send
+                         + 'static,
+            Error = tower::BoxError,
+        > + Send
+        + 'static,
+    Arc<std::sync::Mutex<HashMap<TxID, (Tx, State)>>>,
+) {
+    let txs = Arc::new(std::sync::Mutex::new(HashMap::new()));
+    let txs_2 = txs.clone();
+
+    (
+        service_fn(move |req: TxStoreRequest<Tx, TxID>| {
+            let txs = txs.clone();
+            async move {
+                match req {
+                    TxStoreRequest::Store(tx, tx_id, state) => {
+                        txs.lock().unwrap().insert(tx_id, (tx, state));
+                        Ok(TxStoreResponse::Ok)
+                    }
+                    TxStoreRequest::Get(tx_id) => {
+                        let tx_state = txs.lock().unwrap().get(&tx_id).cloned();
+                        Ok(TxStoreResponse::Transaction(tx_state))
+                    }
+                    TxStoreRequest::Contains(tx_id) => Ok(TxStoreResponse::Contains(
+                        txs.lock().unwrap().get(&tx_id).map(|res| res.1),
+                    )),
+                    TxStoreRequest::IDsInStemPool => {
+                        // horribly inefficient, but it's test code :)
+                        let ids = txs
+                            .lock()
+                            .unwrap()
+                            .iter()
+                            .filter(|(_, (_, state))| matches!(state, State::Stem))
+                            .map(|tx| tx.0.clone())
+                            .collect::<Vec<_>>();
+
+                        Ok(TxStoreResponse::IDs(ids))
+                    }
+                    TxStoreRequest::Promote(tx_id) => {
+                        let _ = txs
+                            .lock()
+                            .unwrap()
+                            .get_mut(&tx_id)
+                            .map(|tx| tx.1 = State::Fluff);
+
+                        Ok(TxStoreResponse::Ok)
+                    }
+                }
+            }
+        }),
+        txs_2,
+    )
+}
diff --git a/p2p/dandelion/src/tests/pool.rs b/p2p/dandelion/src/tests/pool.rs
new file mode 100644
index 00000000..4a7c87dd
--- /dev/null
+++ b/p2p/dandelion/src/tests/pool.rs
@@ -0,0 +1,42 @@
+use std::time::Duration;
+
+use crate::{
+    pool::{start_dandelion_pool, IncomingTx},
+    DandelionConfig, DandelionRouter, Graph, TxState,
+};
+
+use super::*;
+
+#[tokio::test]
+async fn basic_functionality() {
+    let config = DandelionConfig {
+        time_between_hop: Duration::from_millis(175),
+        epoch_duration: Duration::from_secs(0), // make every poll ready change state
+        fluff_probability: 0.2,
+        graph: Graph::FourRegular,
+    };
+
+    let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
+    let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
+
+    let router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
+
+    let (pool_svc, pool) = mock_in_memory_backing_pool();
+
+    let mut pool_svc = start_dandelion_pool(15, router, pool_svc, config);
+
+    pool_svc
+        .ready()
+        .await
+        .unwrap()
+        .call(IncomingTx {
+            tx: 0_usize,
+            tx_id: 1_usize,
+            tx_state: TxState::Fluff,
+        })
+        .await
+        .unwrap();
+
+    assert!(pool.lock().unwrap().contains_key(&1));
+    assert!(broadcast_rx.try_recv().is_ok())
+}
diff --git a/p2p/dandelion/src/tests/router.rs b/p2p/dandelion/src/tests/router.rs
new file mode 100644
index 00000000..0170edb7
--- /dev/null
+++ b/p2p/dandelion/src/tests/router.rs
@@ -0,0 +1,237 @@
+use std::time::Duration;
+
+use tower::{Service, ServiceExt};
+
+use crate::{DandelionConfig, DandelionRouteReq, DandelionRouter, Graph, TxState};
+
+use super::*;
+
+/// make sure the number of stemm peers is correct.
+#[tokio::test]
+async fn number_stems_correct() {
+    let mut config = DandelionConfig {
+        time_between_hop: Duration::from_millis(175),
+        epoch_duration: Duration::from_secs(60_000),
+        fluff_probability: 0.0, // we want to be in stem state
+        graph: Graph::FourRegular,
+    };
+
+    let (broadcast_svc, _broadcast_rx) = mock_broadcast_svc();
+    let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
+
+    let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
+
+    const FROM_PEER: usize = 20;
+
+    // send a request to make the generic bound inference work, without specifying types.
+    router
+        .ready()
+        .await
+        .unwrap()
+        .call(DandelionRouteReq {
+            tx: 0_usize,
+            state: TxState::Stem { from: FROM_PEER },
+        })
+        .await
+        .unwrap();
+
+    assert_eq!(router.stem_peers.len(), 2); // Graph::FourRegular
+
+    config.graph = Graph::Line;
+
+    let (broadcast_svc, _broadcast_rx) = mock_broadcast_svc();
+    let (outbound_peer_svc, _outbound_rx) = mock_discover_svc();
+
+    let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
+
+    // send a request to make the generic bound inference work, without specifying types.
+    router
+        .ready()
+        .await
+        .unwrap()
+        .call(DandelionRouteReq {
+            tx: 0_usize,
+            state: TxState::Stem { from: FROM_PEER },
+        })
+        .await
+        .unwrap();
+
+    assert_eq!(router.stem_peers.len(), 1); // Graph::Line
+}
+
+/// make sure a tx from the same peer goes to the same peer.
+#[tokio::test]
+async fn routes_consistent() {
+    let config = DandelionConfig {
+        time_between_hop: Duration::from_millis(175),
+        epoch_duration: Duration::from_secs(60_000),
+        fluff_probability: 0.0, // we want this test to always stem
+        graph: Graph::FourRegular,
+    };
+
+    let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
+    let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
+
+    let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
+
+    const FROM_PEER: usize = 20;
+
+    // The router will panic if it attempts to flush.
+    broadcast_rx.close();
+
+    for _ in 0..30 {
+        router
+            .ready()
+            .await
+            .unwrap()
+            .call(DandelionRouteReq {
+                tx: 0_usize,
+                state: TxState::Stem { from: FROM_PEER },
+            })
+            .await
+            .unwrap();
+    }
+
+    let mut stem_peer = None;
+    let mut total_txs = 0;
+
+    while let Ok((peer_id, _)) = outbound_rx.try_recv() {
+        let stem_peer = stem_peer.get_or_insert(peer_id);
+        // make sure all peer ids are the same (so the same svc got all txs).
+        assert_eq!(*stem_peer, peer_id);
+
+        total_txs += 1;
+    }
+
+    assert_eq!(total_txs, 30);
+}
+
+/// make sure local txs always stem - even in fluff state.
+#[tokio::test]
+async fn local_always_stem() {
+    let config = DandelionConfig {
+        time_between_hop: Duration::from_millis(175),
+        epoch_duration: Duration::from_secs(60_000),
+        fluff_probability: 1.0, // we want this test to always fluff
+        graph: Graph::FourRegular,
+    };
+
+    let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
+    let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
+
+    let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
+
+    // The router will panic if it attempts to flush.
+    broadcast_rx.close();
+
+    for _ in 0..30 {
+        router
+            .ready()
+            .await
+            .unwrap()
+            .call(DandelionRouteReq {
+                tx: 0_usize,
+                state: TxState::Local,
+            })
+            .await
+            .unwrap();
+    }
+
+    let mut stem_peer = None;
+    let mut total_txs = 0;
+
+    while let Ok((peer_id, _)) = outbound_rx.try_recv() {
+        let stem_peer = stem_peer.get_or_insert(peer_id);
+        // make sure all peer ids are the same (so the same svc got all txs).
+        assert_eq!(*stem_peer, peer_id);
+
+        total_txs += 1;
+    }
+
+    assert_eq!(total_txs, 30);
+}
+
+/// make sure local txs always stem - even in fluff state.
+#[tokio::test]
+async fn stem_txs_fluff_in_state_fluff() {
+    let config = DandelionConfig {
+        time_between_hop: Duration::from_millis(175),
+        epoch_duration: Duration::from_secs(60_000),
+        fluff_probability: 1.0, // we want this test to always fluff
+        graph: Graph::FourRegular,
+    };
+
+    let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
+    let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
+
+    let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
+
+    const FROM_PEER: usize = 20;
+
+    // The router will panic if it attempts to stem.
+    outbound_rx.close();
+
+    for _ in 0..30 {
+        router
+            .ready()
+            .await
+            .unwrap()
+            .call(DandelionRouteReq {
+                tx: 0_usize,
+                state: TxState::Stem { from: FROM_PEER },
+            })
+            .await
+            .unwrap();
+    }
+
+    let mut total_txs = 0;
+
+    while broadcast_rx.try_recv().is_ok() {
+        total_txs += 1;
+    }
+
+    assert_eq!(total_txs, 30);
+}
+
+/// make sure we get all txs sent to the router out in a stem or a fluff.
+#[tokio::test]
+async fn random_routing() {
+    let config = DandelionConfig {
+        time_between_hop: Duration::from_millis(175),
+        epoch_duration: Duration::from_secs(0), // make every poll ready change state
+        fluff_probability: 0.2,
+        graph: Graph::FourRegular,
+    };
+
+    let (broadcast_svc, mut broadcast_rx) = mock_broadcast_svc();
+    let (outbound_peer_svc, mut outbound_rx) = mock_discover_svc();
+
+    let mut router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
+
+    for _ in 0..3000 {
+        router
+            .ready()
+            .await
+            .unwrap()
+            .call(DandelionRouteReq {
+                tx: 0_usize,
+                state: TxState::Stem {
+                    from: rand::random(),
+                },
+            })
+            .await
+            .unwrap();
+    }
+
+    let mut total_txs = 0;
+
+    while broadcast_rx.try_recv().is_ok() {
+        total_txs += 1;
+    }
+
+    while outbound_rx.try_recv().is_ok() {
+        total_txs += 1;
+    }
+
+    assert_eq!(total_txs, 3000);
+}
diff --git a/p2p/dandelion/src/traits.rs b/p2p/dandelion/src/traits.rs
new file mode 100644
index 00000000..c84ecf04
--- /dev/null
+++ b/p2p/dandelion/src/traits.rs
@@ -0,0 +1,49 @@
+/// A request to diffuse a transaction to all connected peers.
+///
+/// This crate does not handle diffusion it is left to implementers.
+pub struct DiffuseRequest<Tx>(pub Tx);
+
+/// A request sent to a single peer to stem this transaction.
+pub struct StemRequest<Tx>(pub Tx);
+
+#[cfg(feature = "txpool")]
+/// A request sent to the backing transaction pool storage.
+pub enum TxStoreRequest<Tx, TxID> {
+    /// A request to store a transaction with the ID to store it under and the pool to store it in.
+    ///
+    /// If the tx is already in the pool then do nothing, unless the tx is in the stem pool then move it
+    /// to the fluff pool, _if this request state is fluff_.
+    Store(Tx, TxID, crate::State),
+    /// A request to retrieve a `Tx` with the given ID from the pool, should not remove that tx from the pool.
+    ///
+    /// Must return [`TxStoreResponse::Transaction`]
+    Get(TxID),
+    /// Promote a transaction from the stem pool to the public pool.
+    ///
+    /// If the tx is already in the fluff pool do nothing.
+    ///
+    /// This should not error if the tx isn't in the pool at all.
+    Promote(TxID),
+    /// A request to check if a translation is in the pool.
+    ///
+    /// Must return [`TxStoreResponse::Contains`]
+    Contains(TxID),
+    /// Returns the IDs of all the transaction in the stem pool.
+    ///
+    /// Must return [`TxStoreResponse::IDs`]
+    IDsInStemPool,
+}
+
+#[cfg(feature = "txpool")]
+/// A response sent back from the backing transaction pool.
+pub enum TxStoreResponse<Tx, TxID> {
+    /// A generic ok response.
+    Ok,
+    /// A response containing a [`Option`] for if the transaction is in the pool (Some) or not (None) and in which pool
+    /// the tx is in.
+    Contains(Option<crate::State>),
+    /// A response containing a requested transaction.
+    Transaction(Option<(Tx, crate::State)>),
+    /// A list of transaction IDs.
+    IDs(Vec<TxID>),
+}
diff --git a/p2p/monero-p2p/Cargo.toml b/p2p/monero-p2p/Cargo.toml
index 50202f82..e416fbbb 100644
--- a/p2p/monero-p2p/Cargo.toml
+++ b/p2p/monero-p2p/Cargo.toml
@@ -17,14 +17,14 @@ monero-pruning = { path = "../../pruning" }
 tokio = { workspace = true, features = ["net", "sync", "macros", "time"]}
 tokio-util = { workspace = true, features = ["codec"] }
 tokio-stream = { workspace = true, features = ["sync"]}
-futures = { workspace = true, features = ["std", "async-await"] }
+futures = { workspace = true, features = ["std"] }
 async-trait = { workspace = true }
-tower = { workspace = true, features = ["util"] }
+tower = { workspace = true, features = ["util", "tracing"] }
 
 thiserror = { workspace = true }
 tracing = { workspace = true, features = ["std", "attributes"] }
 
-borsh = { workspace = true, default-features = false, features = ["derive", "std"], optional = true }
+borsh = { workspace = true, features = ["derive", "std"], optional = true }
 
 [dev-dependencies]
 cuprate-test-utils = {path = "../../test-utils"}
diff --git a/p2p/monero-p2p/src/client.rs b/p2p/monero-p2p/src/client.rs
index 8e3ca488..8aab306d 100644
--- a/p2p/monero-p2p/src/client.rs
+++ b/p2p/monero-p2p/src/client.rs
@@ -1,32 +1,40 @@
-use std::fmt::Formatter;
 use std::{
-    fmt::{Debug, Display},
-    task::{Context, Poll},
+    fmt::{Debug, Display, Formatter},
+    sync::Arc,
+    task::{ready, Context, Poll},
 };
 
 use futures::channel::oneshot;
-use tokio::{sync::mpsc, task::JoinHandle};
-use tokio_util::sync::PollSender;
+use tokio::{
+    sync::{mpsc, OwnedSemaphorePermit, Semaphore},
+    task::JoinHandle,
+};
+use tokio_util::sync::PollSemaphore;
 use tower::Service;
 
 use cuprate_helper::asynch::InfallibleOneshotReceiver;
 
 use crate::{
-    handles::ConnectionHandle, NetworkZone, PeerError, PeerRequest, PeerResponse, SharedError,
+    handles::ConnectionHandle, ConnectionDirection, NetworkZone, PeerError, PeerRequest,
+    PeerResponse, SharedError,
 };
 
 mod connection;
 mod connector;
 pub mod handshaker;
+mod timeout_monitor;
 
 pub use connector::{ConnectRequest, Connector};
 pub use handshaker::{DoHandshakeRequest, HandShaker, HandshakeError};
+use monero_pruning::PruningSeed;
 
 /// An internal identifier for a given peer, will be their address if known
 /// or a random u64 if not.
 #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
 pub enum InternalPeerID<A> {
+    /// A known address.
     KnownAddr(A),
+    /// An unknown address (probably an inbound anonymity network connection).
     Unknown(u64),
 }
 
@@ -34,38 +42,72 @@ impl<A: Display> Display for InternalPeerID<A> {
     fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
         match self {
             InternalPeerID::KnownAddr(addr) => addr.fmt(f),
-            InternalPeerID::Unknown(id) => f.write_str(&format!("Unknown addr, ID: {}", id)),
+            InternalPeerID::Unknown(id) => f.write_str(&format!("Unknown, ID: {id}")),
         }
     }
 }
 
+/// Information on a connected peer.
+#[derive(Debug, Clone)]
+pub struct PeerInformation<A> {
+    /// The internal peer ID of this peer.
+    pub id: InternalPeerID<A>,
+    /// The [`ConnectionHandle`] for this peer, allows banning this peer and checking if it is still
+    /// alive.
+    pub handle: ConnectionHandle,
+    /// The direction of this connection (inbound|outbound).
+    pub direction: ConnectionDirection,
+    /// The peers pruning seed.
+    pub pruning_seed: PruningSeed,
+}
+
+/// This represents a connection to a peer.
+///
+/// It allows sending requests to the peer, but does only does minimal checks that the data returned
+/// is the data asked for, i.e. for a certain request the only thing checked will be that the response
+/// is the correct response for that request, not that the response contains the correct data.
 pub struct Client<Z: NetworkZone> {
-    id: InternalPeerID<Z::Addr>,
-    handle: ConnectionHandle,
+    /// Information on the connected peer.
+    pub info: PeerInformation<Z::Addr>,
 
-    connection_tx: PollSender<connection::ConnectionTaskRequest>,
+    /// The channel to the [`Connection`](connection::Connection) task.
+    connection_tx: mpsc::Sender<connection::ConnectionTaskRequest>,
+    /// The [`JoinHandle`] of the spawned connection task.
     connection_handle: JoinHandle<()>,
+    /// The [`JoinHandle`] of the spawned timeout monitor task.
+    timeout_handle: JoinHandle<Result<(), tower::BoxError>>,
 
+    /// The semaphore that limits the requests sent to the peer.
+    semaphore: PollSemaphore,
+    /// A permit for the semaphore, will be [`Some`] after `poll_ready` returns ready.
+    permit: Option<OwnedSemaphorePermit>,
+
+    /// The error slot shared between the [`Client`] and [`Connection`](connection::Connection).
     error: SharedError<PeerError>,
 }
 
 impl<Z: NetworkZone> Client<Z> {
-    pub fn new(
-        id: InternalPeerID<Z::Addr>,
-        handle: ConnectionHandle,
+    /// Creates a new [`Client`].
+    pub(crate) fn new(
+        info: PeerInformation<Z::Addr>,
         connection_tx: mpsc::Sender<connection::ConnectionTaskRequest>,
         connection_handle: JoinHandle<()>,
+        timeout_handle: JoinHandle<Result<(), tower::BoxError>>,
+        semaphore: Arc<Semaphore>,
         error: SharedError<PeerError>,
     ) -> Self {
         Self {
-            id,
-            handle,
-            connection_tx: PollSender::new(connection_tx),
+            info,
+            connection_tx,
+            timeout_handle,
+            semaphore: PollSemaphore::new(semaphore),
+            permit: None,
             connection_handle,
             error,
         }
     }
 
+    /// Internal function to set an error on the [`SharedError`].
     fn set_err(&self, err: PeerError) -> tower::BoxError {
         let err_str = err.to_string();
         match self.error.try_insert_err(err) {
@@ -86,25 +128,38 @@ impl<Z: NetworkZone> Service<PeerRequest> for Client<Z> {
             return Poll::Ready(Err(err.to_string().into()));
         }
 
-        if self.connection_handle.is_finished() {
+        if self.connection_handle.is_finished() || self.timeout_handle.is_finished() {
             let err = self.set_err(PeerError::ClientChannelClosed);
             return Poll::Ready(Err(err));
         }
 
-        self.connection_tx
-            .poll_reserve(cx)
-            .map_err(|_| PeerError::ClientChannelClosed.into())
+        if self.permit.is_some() {
+            return Poll::Ready(Ok(()));
+        }
+
+        let permit = ready!(self.semaphore.poll_acquire(cx))
+            .expect("Client semaphore should not be closed!");
+
+        self.permit = Some(permit);
+
+        Poll::Ready(Ok(()))
     }
 
     fn call(&mut self, request: PeerRequest) -> Self::Future {
+        let permit = self
+            .permit
+            .take()
+            .expect("poll_ready did not return ready before call to call");
+
         let (tx, rx) = oneshot::channel();
         let req = connection::ConnectionTaskRequest {
             response_channel: tx,
             request,
+            permit: Some(permit),
         };
 
         self.connection_tx
-            .send_item(req)
+            .try_send(req)
             .map_err(|_| ())
             .expect("poll_ready should have been called");
 
diff --git a/p2p/monero-p2p/src/client/connection.rs b/p2p/monero-p2p/src/client/connection.rs
index b458c3da..266dcf7f 100644
--- a/p2p/monero-p2p/src/client/connection.rs
+++ b/p2p/monero-p2p/src/client/connection.rs
@@ -1,37 +1,59 @@
-use std::sync::Arc;
+//! The Connection Task
+//!
+//! This module handles routing requests from a [`Client`](crate::client::Client) or a broadcast channel to
+//! a peer. This module also handles routing requests from the connected peer to a request handler.
+//!
+use std::pin::Pin;
 
 use futures::{
     channel::oneshot,
     stream::{Fuse, FusedStream},
-    SinkExt, StreamExt,
+    SinkExt, Stream, StreamExt,
 };
-use tokio::sync::{broadcast, mpsc};
-use tokio_stream::wrappers::{BroadcastStream, ReceiverStream};
+use tokio::{
+    sync::{mpsc, OwnedSemaphorePermit},
+    time::{sleep, timeout, Sleep},
+};
+use tokio_stream::wrappers::ReceiverStream;
 use tower::ServiceExt;
 
 use monero_wire::{LevinCommand, Message, ProtocolMessage};
 
 use crate::{
-    handles::ConnectionGuard, MessageID, NetworkZone, PeerBroadcast, PeerError, PeerRequest,
-    PeerRequestHandler, PeerResponse, SharedError,
+    constants::{REQUEST_TIMEOUT, SENDING_TIMEOUT},
+    handles::ConnectionGuard,
+    BroadcastMessage, MessageID, NetworkZone, PeerError, PeerRequest, PeerRequestHandler,
+    PeerResponse, SharedError,
 };
 
+/// A request to the connection task from a [`Client`](crate::client::Client).
 pub struct ConnectionTaskRequest {
+    /// The request.
     pub request: PeerRequest,
+    /// The response channel.
     pub response_channel: oneshot::Sender<Result<PeerResponse, tower::BoxError>>,
+    /// A permit for this request
+    pub permit: Option<OwnedSemaphorePermit>,
 }
 
+/// The connection state.
 pub enum State {
+    /// Waiting for a request from Cuprate or the connected peer.
     WaitingForRequest,
+    /// Waiting for a response from the peer.
     WaitingForResponse {
+        /// The requests ID.
         request_id: MessageID,
+        /// The channel to send the response down.
         tx: oneshot::Sender<Result<PeerResponse, tower::BoxError>>,
+        /// A permit for this request.
+        _req_permit: Option<OwnedSemaphorePermit>,
     },
 }
 
 /// Returns if the [`LevinCommand`] is the correct response message for our request.
 ///
-/// e.g that we didn't get a block for a txs request.
+/// e.g. that we didn't get a block for a txs request.
 fn levin_command_response(message_id: &MessageID, command: LevinCommand) -> bool {
     matches!(
         (message_id, command),
@@ -49,46 +71,82 @@ fn levin_command_response(message_id: &MessageID, command: LevinCommand) -> bool
     )
 }
 
-pub struct Connection<Z: NetworkZone, ReqHndlr> {
+/// This represents a connection to a peer.
+pub struct Connection<Z: NetworkZone, ReqHndlr, BrdcstStrm> {
+    /// The peer sink - where we send messages to the peer.
     peer_sink: Z::Sink,
 
+    /// The connections current state.
     state: State,
-    client_rx: Fuse<ReceiverStream<ConnectionTaskRequest>>,
-    broadcast_rx: Fuse<BroadcastStream<Arc<PeerBroadcast>>>,
+    /// Will be [`Some`] if we are expecting a response from the peer.
+    request_timeout: Option<Pin<Box<Sleep>>>,
 
+    /// The client channel where requests from Cuprate to this peer will come from for us to route.
+    client_rx: Fuse<ReceiverStream<ConnectionTaskRequest>>,
+    /// A stream of messages to broadcast from Cuprate.
+    broadcast_stream: Pin<Box<BrdcstStrm>>,
+
+    /// The inner handler for any requests that come from the requested peer.
     peer_request_handler: ReqHndlr,
 
+    /// The connection guard which will send signals to other parts of Cuprate when this connection is dropped.
     connection_guard: ConnectionGuard,
+    /// An error slot which is shared with the client.
     error: SharedError<PeerError>,
 }
 
-impl<Z: NetworkZone, ReqHndlr> Connection<Z, ReqHndlr>
+impl<Z: NetworkZone, ReqHndlr, BrdcstStrm> Connection<Z, ReqHndlr, BrdcstStrm>
 where
     ReqHndlr: PeerRequestHandler,
+    BrdcstStrm: Stream<Item = BroadcastMessage> + Send + 'static,
 {
+    /// Create a new connection struct.
     pub fn new(
         peer_sink: Z::Sink,
         client_rx: mpsc::Receiver<ConnectionTaskRequest>,
-        broadcast_rx: broadcast::Receiver<Arc<PeerBroadcast>>,
+        broadcast_stream: BrdcstStrm,
         peer_request_handler: ReqHndlr,
         connection_guard: ConnectionGuard,
         error: SharedError<PeerError>,
-    ) -> Connection<Z, ReqHndlr> {
+    ) -> Connection<Z, ReqHndlr, BrdcstStrm> {
         Connection {
             peer_sink,
             state: State::WaitingForRequest,
+            request_timeout: None,
             client_rx: ReceiverStream::new(client_rx).fuse(),
-            broadcast_rx: BroadcastStream::new(broadcast_rx).fuse(),
+            broadcast_stream: Box::pin(broadcast_stream),
             peer_request_handler,
             connection_guard,
             error,
         }
     }
 
+    /// Sends a message to the peer, this function implements a timeout, so we don't get stuck sending a message to the
+    /// peer.
     async fn send_message_to_peer(&mut self, mes: Message) -> Result<(), PeerError> {
-        Ok(self.peer_sink.send(mes.into()).await?)
+        tracing::debug!("Sending message: [{}] to peer", mes.command());
+
+        timeout(SENDING_TIMEOUT, self.peer_sink.send(mes.into()))
+            .await
+            .map_err(|_| PeerError::TimedOut)
+            .and_then(|res| res.map_err(PeerError::BucketError))
     }
 
+    /// Handles a broadcast request from Cuprate.
+    async fn handle_client_broadcast(&mut self, mes: BroadcastMessage) -> Result<(), PeerError> {
+        match mes {
+            BroadcastMessage::NewFluffyBlock(block) => {
+                self.send_message_to_peer(Message::Protocol(ProtocolMessage::NewFluffyBlock(block)))
+                    .await
+            }
+            BroadcastMessage::NewTransaction(txs) => {
+                self.send_message_to_peer(Message::Protocol(ProtocolMessage::NewTransactions(txs)))
+                    .await
+            }
+        }
+    }
+
+    /// Handles a request from Cuprate, unlike a broadcast this request will be directed specifically at this peer.
     async fn handle_client_request(&mut self, req: ConnectionTaskRequest) -> Result<(), PeerError> {
         tracing::debug!("handling client request, id: {:?}", req.request.id());
 
@@ -96,21 +154,34 @@ where
             self.state = State::WaitingForResponse {
                 request_id: req.request.id(),
                 tx: req.response_channel,
+                _req_permit: req.permit,
             };
+
             self.send_message_to_peer(req.request.into()).await?;
-        } else {
-            let res = self.send_message_to_peer(req.request.into()).await;
-            if let Err(e) = res {
-                let err_str = e.to_string();
-                let _ = req.response_channel.send(Err(err_str.clone().into()));
-                Err(e)?
-            } else {
-                req.response_channel.send(Ok(PeerResponse::NA));
-            }
+            // Set the timeout after sending the message, TODO: Is this a good idea.
+            self.request_timeout = Some(Box::pin(sleep(REQUEST_TIMEOUT)));
+            return Ok(());
         }
+
+        // INVARIANT: This function cannot exit early without sending a response back down the
+        // response channel.
+        let res = self.send_message_to_peer(req.request.into()).await;
+
+        // send the response now, the request does not need a response from the peer.
+        if let Err(e) = res {
+            // can't clone the error so turn it to a string first, hacky but oh well.
+            let err_str = e.to_string();
+            let _ = req.response_channel.send(Err(err_str.clone().into()));
+            return Err(e);
+        } else {
+            // We still need to respond even if the response is this.
+            let _ = req.response_channel.send(Ok(PeerResponse::NA));
+        }
+
         Ok(())
     }
 
+    /// Handles a request from the connected peer to this node.
     async fn handle_peer_request(&mut self, req: PeerRequest) -> Result<(), PeerError> {
         tracing::debug!("Received peer request: {:?}", req.id());
 
@@ -120,12 +191,19 @@ where
             return Ok(());
         }
 
-        self.send_message_to_peer(res.try_into().unwrap()).await
+        self.send_message_to_peer(
+            res.try_into()
+                .expect("We just checked if the response was `NA`"),
+        )
+        .await
     }
 
+    /// Handles a message from a peer when we are in [`State::WaitingForResponse`].
     async fn handle_potential_response(&mut self, mes: Message) -> Result<(), PeerError> {
         tracing::debug!("Received peer message, command: {:?}", mes.command());
 
+        // If the message is defiantly a request then there is no way it can be a response to
+        // our request.
         if mes.is_request() {
             return self.handle_peer_request(mes.try_into().unwrap()).await;
         }
@@ -134,6 +212,7 @@ where
             panic!("Not in correct state, can't receive response!")
         };
 
+        // Check if the message is a response to our request.
         if levin_command_response(request_id, mes.command()) {
             // TODO: Do more checks before returning response.
 
@@ -143,7 +222,12 @@ where
                 panic!("Not in correct state, can't receive response!")
             };
 
-            let _ = tx.send(Ok(mes.try_into().unwrap()));
+            let _ = tx.send(Ok(mes
+                .try_into()
+                .map_err(|_| PeerError::PeerSentInvalidMessage)?));
+
+            self.request_timeout = None;
+
             Ok(())
         } else {
             self.handle_peer_request(
@@ -154,15 +238,21 @@ where
         }
     }
 
+    /// The main-loop for when we are in [`State::WaitingForRequest`].
     async fn state_waiting_for_request<Str>(&mut self, stream: &mut Str) -> Result<(), PeerError>
     where
         Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin,
     {
         tracing::debug!("waiting for peer/client request.");
+
         tokio::select! {
             biased;
-            broadcast_req = self.broadcast_rx.next() => {
-                todo!()
+            broadcast_req = self.broadcast_stream.next() => {
+                if let Some(broadcast_req) = broadcast_req {
+                    self.handle_client_broadcast(broadcast_req).await
+                } else {
+                    Err(PeerError::ClientChannelClosed)
+                }
             }
             client_req = self.client_rx.next() => {
                 if let Some(client_req) = client_req {
@@ -181,16 +271,26 @@ where
         }
     }
 
+    /// The main-loop for when we are in [`State::WaitingForResponse`].
     async fn state_waiting_for_response<Str>(&mut self, stream: &mut Str) -> Result<(), PeerError>
     where
         Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin,
     {
-        tracing::debug!("waiting for peer response..");
+        tracing::debug!("waiting for peer response.");
+
         tokio::select! {
             biased;
-            broadcast_req = self.broadcast_rx.next() => {
-                todo!()
+            _ = self.request_timeout.as_mut().expect("Request timeout was not set!") => {
+                Err(PeerError::ClientChannelClosed)
             }
+            broadcast_req = self.broadcast_stream.next() => {
+                if let Some(broadcast_req) = broadcast_req {
+                    self.handle_client_broadcast(broadcast_req).await
+                } else {
+                    Err(PeerError::ClientChannelClosed)
+                }
+            }
+            // We don't wait for client requests as we are already handling one.
             peer_message = stream.next() => {
                 if let Some(peer_message) = peer_message {
                     self.handle_potential_response(peer_message?).await
@@ -201,6 +301,9 @@ where
         }
     }
 
+    /// Runs the Connection handler logic, this should be put in a separate task.
+    ///
+    /// `eager_protocol_messages` are protocol messages that we received during a handshake.
     pub async fn run<Str>(mut self, mut stream: Str, eager_protocol_messages: Vec<ProtocolMessage>)
     where
         Str: FusedStream<Item = Result<Message, monero_wire::BucketError>> + Unpin,
@@ -241,8 +344,11 @@ where
         }
     }
 
+    /// Shutdowns the connection, flushing pending requests and setting the error slot, if it hasn't been
+    /// set already.
     fn shutdown(mut self, err: PeerError) {
         tracing::debug!("Connection task shutting down: {}", err);
+
         let mut client_rx = self.client_rx.into_inner().into_inner();
         client_rx.close();
 
@@ -251,6 +357,12 @@ where
             tracing::debug!("Shared error already contains an error: {}", err);
         }
 
+        if let State::WaitingForResponse { tx, .. } =
+            std::mem::replace(&mut self.state, State::WaitingForRequest)
+        {
+            let _ = tx.send(Err(err_str.clone().into()));
+        }
+
         while let Ok(req) = client_rx.try_recv() {
             let _ = req.response_channel.send(Err(err_str.clone().into()));
         }
diff --git a/p2p/monero-p2p/src/client/connector.rs b/p2p/monero-p2p/src/client/connector.rs
index 3f9f6047..278d7407 100644
--- a/p2p/monero-p2p/src/client/connector.rs
+++ b/p2p/monero-p2p/src/client/connector.rs
@@ -1,39 +1,58 @@
+//! Connector
+//!
+//! This module handles connecting to peers and giving the sink/stream to the handshaker which will then
+//! perform a handshake and create a [`Client`].
+//!
+//! This is where outbound connections are created.
+//!
 use std::{
     future::Future,
     pin::Pin,
     task::{Context, Poll},
 };
 
-use futures::FutureExt;
+use futures::{FutureExt, Stream};
 use tokio::sync::OwnedSemaphorePermit;
 use tower::{Service, ServiceExt};
 
 use crate::{
     client::{Client, DoHandshakeRequest, HandShaker, HandshakeError, InternalPeerID},
-    AddressBook, ConnectionDirection, CoreSyncSvc, NetworkZone, PeerRequestHandler,
+    AddressBook, BroadcastMessage, ConnectionDirection, CoreSyncSvc, NetworkZone,
+    PeerRequestHandler, PeerSyncSvc,
 };
 
+/// A request to connect to a peer.
 pub struct ConnectRequest<Z: NetworkZone> {
+    /// The peer's address.
     pub addr: Z::Addr,
+    /// A permit which will be held be the connection allowing you to set limits on the number of
+    /// connections.
     pub permit: OwnedSemaphorePermit,
 }
 
-pub struct Connector<Z: NetworkZone, AdrBook, CSync, ReqHdlr> {
-    handshaker: HandShaker<Z, AdrBook, CSync, ReqHdlr>,
+/// The connector service, this service connects to peer and returns the [`Client`].
+pub struct Connector<Z: NetworkZone, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr> {
+    handshaker: HandShaker<Z, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr>,
 }
 
-impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> Connector<Z, AdrBook, CSync, ReqHdlr> {
-    pub fn new(handshaker: HandShaker<Z, AdrBook, CSync, ReqHdlr>) -> Self {
+impl<Z: NetworkZone, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr>
+    Connector<Z, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr>
+{
+    /// Create a new connector from a handshaker.
+    pub fn new(handshaker: HandShaker<Z, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr>) -> Self {
         Self { handshaker }
     }
 }
 
-impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> Service<ConnectRequest<Z>>
-    for Connector<Z, AdrBook, CSync, ReqHdlr>
+impl<Z: NetworkZone, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr, BrdcstStrm>
+    Service<ConnectRequest<Z>> for Connector<Z, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr>
 where
     AdrBook: AddressBook<Z> + Clone,
     CSync: CoreSyncSvc + Clone,
+    PSync: PeerSyncSvc<Z> + Clone,
     ReqHdlr: PeerRequestHandler + Clone,
+    BrdcstStrm: Stream<Item = BroadcastMessage> + Send + 'static,
+    BrdcstStrmMkr: Fn(InternalPeerID<Z::Addr>) -> BrdcstStrm + Clone + Send + 'static,
 {
     type Response = Client<Z>;
     type Error = HandshakeError;
diff --git a/p2p/monero-p2p/src/client/handshaker.rs b/p2p/monero-p2p/src/client/handshaker.rs
index bad4882b..03f3f563 100644
--- a/p2p/monero-p2p/src/client/handshaker.rs
+++ b/p2p/monero-p2p/src/client/handshaker.rs
@@ -10,16 +10,15 @@ use std::{
     pin::Pin,
     sync::Arc,
     task::{Context, Poll},
-    time::Duration,
 };
 
-use futures::{FutureExt, SinkExt, StreamExt};
+use futures::{FutureExt, SinkExt, Stream, StreamExt};
 use tokio::{
-    sync::{broadcast, mpsc, OwnedSemaphorePermit},
+    sync::{mpsc, OwnedSemaphorePermit, Semaphore},
     time::{error::Elapsed, timeout},
 };
 use tower::{Service, ServiceExt};
-use tracing::{info_span, instrument, Instrument};
+use tracing::{info_span, Instrument};
 
 use monero_pruning::{PruningError, PruningSeed};
 use monero_wire::{
@@ -28,40 +27,25 @@ use monero_wire::{
         PING_OK_RESPONSE_STATUS_TEXT,
     },
     common::PeerSupportFlags,
-    BasicNodeData, BucketError, CoreSyncData, LevinCommand, Message, RequestMessage,
-    ResponseMessage,
+    BasicNodeData, BucketError, LevinCommand, Message, RequestMessage, ResponseMessage,
 };
 
 use crate::{
-    client::{connection::Connection, Client, InternalPeerID},
+    client::{
+        connection::Connection, timeout_monitor::connection_timeout_monitor_task, Client,
+        InternalPeerID, PeerInformation,
+    },
+    constants::{
+        HANDSHAKE_TIMEOUT, MAX_EAGER_PROTOCOL_MESSAGES, MAX_PEERS_IN_PEER_LIST_MESSAGE,
+        PING_TIMEOUT,
+    },
     handles::HandleBuilder,
-    AddressBook, AddressBookRequest, AddressBookResponse, ConnectionDirection, CoreSyncDataRequest,
-    CoreSyncDataResponse, CoreSyncSvc, MessageID, NetZoneAddress, NetworkZone, PeerBroadcast,
-    PeerRequestHandler, SharedError, MAX_PEERS_IN_PEER_LIST_MESSAGE,
+    services::PeerSyncRequest,
+    AddressBook, AddressBookRequest, AddressBookResponse, BroadcastMessage, ConnectionDirection,
+    CoreSyncDataRequest, CoreSyncDataResponse, CoreSyncSvc, NetZoneAddress, NetworkZone,
+    PeerRequestHandler, PeerSyncSvc, SharedError,
 };
 
-/// This is a Cuprate specific constant.
-///
-/// When completing a handshake monerod might send protocol messages before the handshake is actually
-/// complete, this is a problem for Cuprate as we must complete the handshake before responding to any
-/// protocol requests. So when we receive a protocol message during a handshake we keep them around to handle
-/// after the handshake.
-///
-/// Because we use the [bytes crate](https://crates.io/crates/bytes) in monero-wire for zero-copy parsing
-/// it is not safe to keep too many of these messages around for long.
-const MAX_EAGER_PROTOCOL_MESSAGES: usize = 1;
-/// The time given to complete a handshake before the handshake fails.
-const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(120);
-
-/// A timeout put on pings during handshakes.
-///
-/// When we receive an inbound connection we open an outbound connection to the node and send a ping message
-/// to see if we can reach the node, so we can add it to our address book.
-///
-/// This timeout must be significantly shorter than [`HANDSHAKE_TIMEOUT`] so we don't drop inbound connections that
-/// don't have ports open.
-const PING_TIMEOUT: Duration = Duration::from_secs(10);
-
 #[derive(Debug, thiserror::Error)]
 pub enum HandshakeError {
     #[error("The handshake timed out")]
@@ -100,52 +84,60 @@ pub struct DoHandshakeRequest<Z: NetworkZone> {
 
 /// The peer handshaking service.
 #[derive(Debug, Clone)]
-pub struct HandShaker<Z: NetworkZone, AdrBook, CSync, ReqHdlr> {
+pub struct HandShaker<Z: NetworkZone, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr> {
     /// The address book service.
     address_book: AdrBook,
     /// The core sync data service.
     core_sync_svc: CSync,
+    /// The peer sync service.
+    peer_sync_svc: PSync,
     /// The peer request handler service.
     peer_request_svc: ReqHdlr,
 
     /// Our [`BasicNodeData`]
     our_basic_node_data: BasicNodeData,
 
-    /// The channel to broadcast messages to all peers created with this handshaker.
-    broadcast_tx: broadcast::Sender<Arc<PeerBroadcast>>,
+    /// A function that returns a stream that will give items to be broadcast by a connection.
+    broadcast_stream_maker: BrdcstStrmMkr,
 
     /// The network zone.
     _zone: PhantomData<Z>,
 }
 
-impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> HandShaker<Z, AdrBook, CSync, ReqHdlr> {
+impl<Z: NetworkZone, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr>
+    HandShaker<Z, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr>
+{
     /// Creates a new handshaker.
     pub fn new(
         address_book: AdrBook,
+        peer_sync_svc: PSync,
         core_sync_svc: CSync,
         peer_request_svc: ReqHdlr,
-
-        broadcast_tx: broadcast::Sender<Arc<PeerBroadcast>>,
+        broadcast_stream_maker: BrdcstStrmMkr,
 
         our_basic_node_data: BasicNodeData,
     ) -> Self {
         Self {
             address_book,
+            peer_sync_svc,
             core_sync_svc,
             peer_request_svc,
-            broadcast_tx,
+            broadcast_stream_maker,
             our_basic_node_data,
             _zone: PhantomData,
         }
     }
 }
 
-impl<Z: NetworkZone, AdrBook, CSync, ReqHdlr> Service<DoHandshakeRequest<Z>>
-    for HandShaker<Z, AdrBook, CSync, ReqHdlr>
+impl<Z: NetworkZone, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr, BrdcstStrm>
+    Service<DoHandshakeRequest<Z>> for HandShaker<Z, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr>
 where
     AdrBook: AddressBook<Z> + Clone,
     CSync: CoreSyncSvc + Clone,
+    PSync: PeerSyncSvc<Z> + Clone,
     ReqHdlr: PeerRequestHandler + Clone,
+    BrdcstStrm: Stream<Item = BroadcastMessage> + Send + 'static,
+    BrdcstStrmMkr: Fn(InternalPeerID<Z::Addr>) -> BrdcstStrm + Clone + Send + 'static,
 {
     type Response = Client<Z>;
     type Error = HandshakeError;
@@ -157,11 +149,12 @@ where
     }
 
     fn call(&mut self, req: DoHandshakeRequest<Z>) -> Self::Future {
-        let broadcast_rx = self.broadcast_tx.subscribe();
+        let broadcast_stream_maker = self.broadcast_stream_maker.clone();
 
         let address_book = self.address_book.clone();
         let peer_request_svc = self.peer_request_svc.clone();
         let core_sync_svc = self.core_sync_svc.clone();
+        let peer_sync_svc = self.peer_sync_svc.clone();
         let our_basic_node_data = self.our_basic_node_data.clone();
 
         let span = info_span!(parent: &tracing::Span::current(), "handshaker", addr=%req.addr);
@@ -171,9 +164,10 @@ where
                 HANDSHAKE_TIMEOUT,
                 handshake(
                     req,
-                    broadcast_rx,
+                    broadcast_stream_maker,
                     address_book,
                     core_sync_svc,
+                    peer_sync_svc,
                     peer_request_svc,
                     our_basic_node_data,
                 ),
@@ -226,20 +220,24 @@ pub async fn ping<N: NetworkZone>(addr: N::Addr) -> Result<u64, HandshakeError>
 }
 
 /// This function completes a handshake with the requested peer.
-async fn handshake<Z: NetworkZone, AdrBook, CSync, ReqHdlr>(
+async fn handshake<Z: NetworkZone, AdrBook, CSync, PSync, ReqHdlr, BrdcstStrmMkr, BrdcstStrm>(
     req: DoHandshakeRequest<Z>,
 
-    broadcast_rx: broadcast::Receiver<Arc<PeerBroadcast>>,
+    broadcast_stream_maker: BrdcstStrmMkr,
 
     mut address_book: AdrBook,
     mut core_sync_svc: CSync,
+    mut peer_sync_svc: PSync,
     peer_request_svc: ReqHdlr,
     our_basic_node_data: BasicNodeData,
 ) -> Result<Client<Z>, HandshakeError>
 where
     AdrBook: AddressBook<Z>,
     CSync: CoreSyncSvc,
+    PSync: PeerSyncSvc<Z>,
     ReqHdlr: PeerRequestHandler,
+    BrdcstStrm: Stream<Item = BroadcastMessage> + Send + 'static,
+    BrdcstStrmMkr: Fn(InternalPeerID<Z::Addr>) -> BrdcstStrm + Send + 'static,
 {
     let DoHandshakeRequest {
         addr,
@@ -253,7 +251,7 @@ where
     // see: [`MAX_EAGER_PROTOCOL_MESSAGES`]
     let mut eager_protocol_messages = Vec::new();
 
-    let (peer_core_sync, mut peer_node_data) = match direction {
+    let (peer_core_sync, peer_node_data) = match direction {
         ConnectionDirection::InBound => {
             // Inbound handshake the peer sends the request.
             tracing::debug!("waiting for handshake request.");
@@ -424,40 +422,40 @@ where
         }
     };
 
-    // Tell the core sync service about the new peer.
-    core_sync_svc
-        .ready()
-        .await?
-        .call(CoreSyncDataRequest::HandleIncoming(peer_core_sync.clone()))
-        .await?;
-
     tracing::debug!("Handshake complete.");
 
     // Set up the connection data.
     let error_slot = SharedError::new();
     let (connection_guard, handle) = HandleBuilder::new().with_permit(permit).build();
-    let (connection_tx, client_rx) = mpsc::channel(3);
+    let (connection_tx, client_rx) = mpsc::channel(1);
 
-    let connection = Connection::<Z, _>::new(
+    let connection = Connection::<Z, _, _>::new(
         peer_sink,
         client_rx,
-        broadcast_rx,
+        broadcast_stream_maker(addr),
         peer_request_svc,
         connection_guard,
         error_slot.clone(),
     );
 
-    let connection_handle =
-        tokio::spawn(connection.run(peer_stream.fuse(), eager_protocol_messages));
-
-    let client = Client::<Z>::new(
-        addr,
-        handle.clone(),
-        connection_tx,
-        connection_handle,
-        error_slot,
+    let connection_span = tracing::error_span!(parent: &tracing::Span::none(), "connection", %addr);
+    let connection_handle = tokio::spawn(
+        connection
+            .run(peer_stream.fuse(), eager_protocol_messages)
+            .instrument(connection_span),
     );
 
+    // Tell the core sync service about the new peer.
+    peer_sync_svc
+        .ready()
+        .await?
+        .call(PeerSyncRequest::IncomingCoreSyncData(
+            addr,
+            handle.clone(),
+            peer_core_sync,
+        ))
+        .await?;
+
     // Tell the address book about the new connection.
     address_book
         .ready()
@@ -465,7 +463,7 @@ where
         .call(AddressBookRequest::NewConnection {
             internal_peer_id: addr,
             public_address,
-            handle,
+            handle: handle.clone(),
             id: peer_node_data.peer_id,
             pruning_seed,
             rpc_port: peer_node_data.rpc_port,
@@ -473,6 +471,34 @@ where
         })
         .await?;
 
+    let info = PeerInformation {
+        id: addr,
+        handle,
+        direction,
+        pruning_seed,
+    };
+
+    let semaphore = Arc::new(Semaphore::new(1));
+
+    let timeout_handle = tokio::spawn(connection_timeout_monitor_task(
+        info.id,
+        info.handle.clone(),
+        connection_tx.clone(),
+        semaphore.clone(),
+        address_book,
+        core_sync_svc,
+        peer_sync_svc,
+    ));
+
+    let client = Client::<Z>::new(
+        info,
+        connection_tx,
+        connection_handle,
+        timeout_handle,
+        semaphore,
+        error_slot,
+    );
+
     Ok(client)
 }
 
@@ -485,14 +511,11 @@ async fn send_hs_request<Z: NetworkZone, CSync>(
 where
     CSync: CoreSyncSvc,
 {
-    let CoreSyncDataResponse::Ours(our_core_sync_data) = core_sync_svc
+    let CoreSyncDataResponse(our_core_sync_data) = core_sync_svc
         .ready()
         .await?
-        .call(CoreSyncDataRequest::Ours)
-        .await?
-    else {
-        panic!("core sync service returned wrong response!");
-    };
+        .call(CoreSyncDataRequest)
+        .await?;
 
     let req = HandshakeRequest {
         node_data: our_basic_node_data,
@@ -519,14 +542,11 @@ where
     AdrBook: AddressBook<Z>,
     CSync: CoreSyncSvc,
 {
-    let CoreSyncDataResponse::Ours(our_core_sync_data) = core_sync_svc
+    let CoreSyncDataResponse(our_core_sync_data) = core_sync_svc
         .ready()
         .await?
-        .call(CoreSyncDataRequest::Ours)
-        .await?
-    else {
-        panic!("core sync service returned wrong response!");
-    };
+        .call(CoreSyncDataRequest)
+        .await?;
 
     let AddressBookResponse::Peers(our_peer_list) = address_book
         .ready()
@@ -612,7 +632,7 @@ async fn wait_for_message<Z: NetworkZone>(
                         continue;
                     }
                     RequestMessage::Ping => {
-                        if !allow_support_flag_req {
+                        if !allow_ping {
                             return Err(HandshakeError::PeerSentInvalidMessage(
                                 "Peer sent 2 ping requests",
                             ));
diff --git a/p2p/monero-p2p/src/client/timeout_monitor.rs b/p2p/monero-p2p/src/client/timeout_monitor.rs
new file mode 100644
index 00000000..dcdf85d7
--- /dev/null
+++ b/p2p/monero-p2p/src/client/timeout_monitor.rs
@@ -0,0 +1,135 @@
+//! Timeout Monitor
+//!
+//! This module holds the task that sends periodic [TimedSync](PeerRequest::TimedSync) requests to a peer to make
+//! sure the connection is still active.
+use std::sync::Arc;
+
+use futures::channel::oneshot;
+use monero_wire::admin::TimedSyncRequest;
+use tokio::{
+    sync::{mpsc, Semaphore},
+    time::{interval, MissedTickBehavior},
+};
+use tower::ServiceExt;
+use tracing::instrument;
+
+use crate::{
+    client::{connection::ConnectionTaskRequest, InternalPeerID},
+    constants::{MAX_PEERS_IN_PEER_LIST_MESSAGE, TIMEOUT_INTERVAL},
+    handles::ConnectionHandle,
+    services::{AddressBookRequest, CoreSyncDataRequest, CoreSyncDataResponse, PeerSyncRequest},
+    AddressBook, CoreSyncSvc, NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc,
+};
+
+/// The timeout monitor task, this task will send periodic timed sync requests to the peer to make sure it is still active.
+#[instrument(
+    name = "timeout_monitor",
+    level = "debug",
+    fields(addr = %id),
+    skip_all,
+)]
+pub async fn connection_timeout_monitor_task<N: NetworkZone, AdrBook, CSync, PSync>(
+    id: InternalPeerID<N::Addr>,
+    handle: ConnectionHandle,
+
+    connection_tx: mpsc::Sender<ConnectionTaskRequest>,
+    semaphore: Arc<Semaphore>,
+
+    mut address_book_svc: AdrBook,
+    mut core_sync_svc: CSync,
+    mut peer_core_sync_svc: PSync,
+) -> Result<(), tower::BoxError>
+where
+    AdrBook: AddressBook<N>,
+    CSync: CoreSyncSvc,
+    PSync: PeerSyncSvc<N>,
+{
+    // Instead of tracking the time from last message from the peer and sending a timed sync if this value is too high,
+    // we just send a timed sync every [TIMEOUT_INTERVAL] seconds.
+    let mut interval = interval(TIMEOUT_INTERVAL);
+
+    interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
+
+    // The first tick ticks instantly.
+    interval.tick().await;
+
+    loop {
+        interval.tick().await;
+
+        tracing::trace!("timeout monitor tick.");
+
+        if connection_tx.is_closed() {
+            tracing::debug!("Closing timeout monitor, connection disconnected.");
+            return Ok(());
+        }
+
+        let Ok(permit) = semaphore.clone().try_acquire_owned() else {
+            // If we can't get a permit the connection is currently waiting for a response, so no need to
+            // do a timed sync.
+            continue;
+        };
+
+        let ping_span = tracing::debug_span!("timed_sync");
+
+        // get our core sync data
+        tracing::trace!(parent: &ping_span, "Attempting to get our core sync data");
+        let CoreSyncDataResponse(core_sync_data) = core_sync_svc
+            .ready()
+            .await?
+            .call(CoreSyncDataRequest)
+            .await?;
+
+        let (tx, rx) = oneshot::channel();
+
+        // TODO: Instead of always sending timed syncs, send pings if we have a full peer list.
+
+        tracing::debug!(parent: &ping_span, "Sending timed sync to peer");
+        connection_tx
+            .send(ConnectionTaskRequest {
+                request: PeerRequest::TimedSync(TimedSyncRequest {
+                    payload_data: core_sync_data,
+                }),
+                response_channel: tx,
+                permit: Some(permit),
+            })
+            .await?;
+
+        let PeerResponse::TimedSync(timed_sync) = rx.await?? else {
+            panic!("Connection task returned wrong response!");
+        };
+
+        tracing::debug!(
+            parent: &ping_span,
+            "Received timed sync response, incoming peer list len: {}",
+            timed_sync.local_peerlist_new.len()
+        );
+
+        if timed_sync.local_peerlist_new.len() > MAX_PEERS_IN_PEER_LIST_MESSAGE {
+            return Err("Peer sent too many peers in peer list".into());
+        }
+
+        // Tell our address book about the new peers.
+        address_book_svc
+            .ready()
+            .await?
+            .call(AddressBookRequest::IncomingPeerList(
+                timed_sync
+                    .local_peerlist_new
+                    .into_iter()
+                    .map(TryInto::try_into)
+                    .collect::<Result<_, _>>()?,
+            ))
+            .await?;
+
+        // Tell the peer sync service about the peers core sync data
+        peer_core_sync_svc
+            .ready()
+            .await?
+            .call(PeerSyncRequest::IncomingCoreSyncData(
+                id,
+                handle.clone(),
+                timed_sync.payload_data,
+            ))
+            .await?;
+    }
+}
diff --git a/p2p/monero-p2p/src/constants.rs b/p2p/monero-p2p/src/constants.rs
new file mode 100644
index 00000000..c7b18f77
--- /dev/null
+++ b/p2p/monero-p2p/src/constants.rs
@@ -0,0 +1,43 @@
+//! Constants used around monero-p2p
+
+use std::time::Duration;
+
+/// The request timeout - the time we give a peer to respond to a request.
+pub(crate) const REQUEST_TIMEOUT: Duration = Duration::from_secs(60);
+
+/// The timeout used when sending messages to a peer.
+///
+/// TODO: Make this configurable?
+/// TODO: Is this a good default.
+pub(crate) const SENDING_TIMEOUT: Duration = Duration::from_secs(20);
+
+/// The interval between timed syncs.
+///
+/// TODO: Make this configurable?
+/// TODO: Is this a good default.
+pub(crate) const TIMEOUT_INTERVAL: Duration = Duration::from_secs(61);
+
+/// This is a Cuprate specific constant.
+///
+/// When completing a handshake monerod might send protocol messages before the handshake is actually
+/// complete, this is a problem for Cuprate as we must complete the handshake before responding to any
+/// protocol requests. So when we receive a protocol message during a handshake we keep them around to handle
+/// after the handshake.
+///
+/// Because we use the [bytes crate](https://crates.io/crates/bytes) in monero-wire for zero-copy parsing
+/// it is not safe to keep too many of these messages around for long.
+pub(crate) const MAX_EAGER_PROTOCOL_MESSAGES: usize = 1;
+
+/// A timeout put on pings during handshakes.
+///
+/// When we receive an inbound connection we open an outbound connection to the node and send a ping message
+/// to see if we can reach the node, so we can add it to our address book.
+///
+/// This timeout must be significantly shorter than [`HANDSHAKE_TIMEOUT`] so we don't drop inbound connections that
+/// don't have ports open.
+pub(crate) const PING_TIMEOUT: Duration = Duration::from_secs(10);
+
+/// A timeout for a handshake - the handshake must complete before this.
+pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(60);
+
+pub(crate) const MAX_PEERS_IN_PEER_LIST_MESSAGE: usize = 250;
diff --git a/p2p/monero-p2p/src/error.rs b/p2p/monero-p2p/src/error.rs
index 2b8ace84..e74a2bb6 100644
--- a/p2p/monero-p2p/src/error.rs
+++ b/p2p/monero-p2p/src/error.rs
@@ -30,6 +30,8 @@ impl<T> SharedError<T> {
 
 #[derive(Debug, thiserror::Error)]
 pub enum PeerError {
+    #[error("The connection timed out.")]
+    TimedOut,
     #[error("The connection was closed.")]
     ConnectionClosed,
     #[error("The connection tasks client channel was closed")]
diff --git a/p2p/monero-p2p/src/handles.rs b/p2p/monero-p2p/src/handles.rs
index ed8a30da..f3831708 100644
--- a/p2p/monero-p2p/src/handles.rs
+++ b/p2p/monero-p2p/src/handles.rs
@@ -7,9 +7,8 @@ use std::{
     time::Duration,
 };
 
-use futures::SinkExt;
-use tokio::sync::{OwnedSemaphorePermit, Semaphore};
-use tokio_util::sync::CancellationToken;
+use tokio::sync::OwnedSemaphorePermit;
+use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned};
 
 /// A [`ConnectionHandle`] builder.
 #[derive(Default, Debug)]
@@ -40,7 +39,7 @@ impl HandleBuilder {
         (
             ConnectionGuard {
                 token: token.clone(),
-                permit: self.permit.expect("connection permit was not set!"),
+                _permit: self.permit.expect("connection permit was not set!"),
             },
             ConnectionHandle {
                 token: token.clone(),
@@ -57,7 +56,7 @@ pub struct BanPeer(pub Duration);
 /// A struct given to the connection task.
 pub struct ConnectionGuard {
     token: CancellationToken,
-    permit: OwnedSemaphorePermit,
+    _permit: OwnedSemaphorePermit,
 }
 
 impl ConnectionGuard {
@@ -88,9 +87,13 @@ pub struct ConnectionHandle {
 }
 
 impl ConnectionHandle {
+    pub fn closed(&self) -> WaitForCancellationFutureOwned {
+        self.token.clone().cancelled_owned()
+    }
     /// Bans the peer for the given `duration`.
     pub fn ban_peer(&self, duration: Duration) {
         let _ = self.ban.set(BanPeer(duration));
+        self.token.cancel();
     }
     /// Checks if this connection is closed.
     pub fn is_closed(&self) -> bool {
diff --git a/p2p/monero-p2p/src/lib.rs b/p2p/monero-p2p/src/lib.rs
index 0105e7e1..9c171320 100644
--- a/p2p/monero-p2p/src/lib.rs
+++ b/p2p/monero-p2p/src/lib.rs
@@ -1,5 +1,17 @@
-#![allow(unused)]
-
+//! # Monero P2P
+//!
+//! This crate is general purpose P2P networking library for working with Monero. This is a low level
+//! crate, which means it may seem verbose for a lot of use cases, if you want a crate that handles
+//! more of the P2P logic have a look at `cuprate-p2p`.
+//!
+//! # Network Zones
+//!
+//! This crate abstracts over network zones, Tor/I2p/clearnet with the [NetworkZone] trait. Currently only clearnet is implemented: [ClearNet](network_zones::ClearNet).
+//!
+//! # Usage
+//!
+//! TODO
+//!
 use std::{fmt::Debug, future::Future, hash::Hash, pin::Pin};
 
 use futures::{Sink, Stream};
@@ -10,6 +22,7 @@ use monero_wire::{
 };
 
 pub mod client;
+mod constants;
 pub mod error;
 pub mod handles;
 pub mod network_zones;
@@ -20,8 +33,6 @@ pub use error::*;
 pub use protocol::*;
 use services::*;
 
-const MAX_PEERS_IN_PEER_LIST_MESSAGE: usize = 250;
-
 #[derive(Debug, Copy, Clone, Eq, PartialEq)]
 pub enum ConnectionDirection {
     InBound,
@@ -35,9 +46,9 @@ pub trait NetZoneAddress:
     + std::fmt::Display
     + Hash
     + Eq
-    + Clone
     + Copy
     + Send
+    + Sync
     + Unpin
     + 'static
 {
@@ -48,6 +59,11 @@ pub trait NetZoneAddress:
     /// TODO: IP zone banning?
     type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static;
 
+    /// Changes the port of this address to `port`.
+    fn set_port(&mut self, port: u16);
+
+    fn make_canonical(&mut self);
+
     fn ban_id(&self) -> Self::BanID;
 
     fn should_add_to_peer_list(&self) -> bool;
@@ -64,6 +80,7 @@ pub trait NetZoneAddress:
     + Eq
     + Copy
     + Send
+    + Sync
     + Unpin
     + 'static
 {
@@ -77,6 +94,8 @@ pub trait NetZoneAddress:
     /// Changes the port of this address to `port`.
     fn set_port(&mut self, port: u16);
 
+    fn make_canonical(&mut self);
+
     fn ban_id(&self) -> Self::BanID;
 
     fn should_add_to_peer_list(&self) -> bool;
@@ -100,6 +119,8 @@ pub trait NetworkZone: Clone + Copy + Send + 'static {
     /// This has privacy implications on an anonymity network if true so should be set
     /// to false.
     const CHECK_NODE_ID: bool;
+    /// Fixed seed nodes for this network.
+    const SEEDS: &'static [Self::Addr];
 
     /// The address type of this network.
     type Addr: NetZoneAddress;
@@ -124,7 +145,35 @@ pub trait NetworkZone: Clone + Copy + Send + 'static {
     ) -> Result<Self::Listener, std::io::Error>;
 }
 
-pub(crate) trait AddressBook<Z: NetworkZone>:
+// ####################################################################################
+// Below here is just helper traits, so we don't have to type out tower::Service bounds
+// everywhere but still get to use tower.
+
+pub trait PeerSyncSvc<Z: NetworkZone>:
+    tower::Service<
+        PeerSyncRequest<Z>,
+        Response = PeerSyncResponse<Z>,
+        Error = tower::BoxError,
+        Future = Self::Future2,
+    > + Send
+    + 'static
+{
+    // This allows us to put more restrictive bounds on the future without defining the future here
+    // explicitly.
+    type Future2: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static;
+}
+
+impl<T, Z: NetworkZone> PeerSyncSvc<Z> for T
+where
+    T: tower::Service<PeerSyncRequest<Z>, Response = PeerSyncResponse<Z>, Error = tower::BoxError>
+        + Send
+        + 'static,
+    T::Future: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static,
+{
+    type Future2 = T::Future;
+}
+
+pub trait AddressBook<Z: NetworkZone>:
     tower::Service<
         AddressBookRequest<Z>,
         Response = AddressBookResponse<Z>,
@@ -151,7 +200,7 @@ where
     type Future2 = T::Future;
 }
 
-pub(crate) trait CoreSyncSvc:
+pub trait CoreSyncSvc:
     tower::Service<
         CoreSyncDataRequest,
         Response = CoreSyncDataResponse,
@@ -183,7 +232,7 @@ impl<T> CoreSyncSvc for T where
 {
 }
 
-pub(crate) trait PeerRequestHandler:
+pub trait PeerRequestHandler:
     tower::Service<
         PeerRequest,
         Response = PeerResponse,
diff --git a/p2p/monero-p2p/src/network_zones/clear.rs b/p2p/monero-p2p/src/network_zones/clear.rs
index 7c3c599a..5141a069 100644
--- a/p2p/monero-p2p/src/network_zones/clear.rs
+++ b/p2p/monero-p2p/src/network_zones/clear.rs
@@ -1,8 +1,8 @@
-use std::net::{IpAddr, SocketAddr};
-use std::pin::Pin;
-use std::task::{Context, Poll};
-
-use monero_wire::MoneroWireCodec;
+use std::{
+    net::{IpAddr, Ipv4Addr, SocketAddr},
+    pin::Pin,
+    task::{Context, Poll},
+};
 
 use futures::Stream;
 use tokio::net::{
@@ -11,6 +11,8 @@ use tokio::net::{
 };
 use tokio_util::codec::{FramedRead, FramedWrite};
 
+use monero_wire::MoneroWireCodec;
+
 use crate::{NetZoneAddress, NetworkZone};
 
 impl NetZoneAddress for SocketAddr {
@@ -24,8 +26,14 @@ impl NetZoneAddress for SocketAddr {
         self.ip()
     }
 
+    fn make_canonical(&mut self) {
+        let ip = self.ip().to_canonical();
+        self.set_ip(ip);
+    }
+
     fn should_add_to_peer_list(&self) -> bool {
-        todo!()
+        // TODO
+        true
     }
 }
 
@@ -36,9 +44,19 @@ pub struct ClearNetServerCfg {
 #[derive(Clone, Copy)]
 pub enum ClearNet {}
 
+const fn ip_v4(a: u8, b: u8, c: u8, d: u8, port: u16) -> SocketAddr {
+    SocketAddr::new(IpAddr::V4(Ipv4Addr::new(a, b, c, d)), port)
+}
+
 #[async_trait::async_trait]
 impl NetworkZone for ClearNet {
     const NAME: &'static str = "ClearNet";
+
+    const SEEDS: &'static [Self::Addr] = &[
+        ip_v4(37, 187, 74, 171, 18080),
+        ip_v4(192, 99, 8, 110, 18080),
+    ];
+
     const ALLOW_SYNC: bool = true;
     const DANDELION_PP: bool = true;
     const CHECK_NODE_ID: bool = true;
@@ -85,7 +103,10 @@ impl Stream for InBoundStream {
     fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
         self.listener
             .poll_accept(cx)
-            .map_ok(|(stream, addr)| {
+            .map_ok(|(stream, mut addr)| {
+                let ip = addr.ip().to_canonical();
+                addr.set_ip(ip);
+
                 let (read, write) = stream.into_split();
                 (
                     Some(addr),
diff --git a/p2p/monero-p2p/src/protocol.rs b/p2p/monero-p2p/src/protocol.rs
index a4fb6e9e..10157ae6 100644
--- a/p2p/monero-p2p/src/protocol.rs
+++ b/p2p/monero-p2p/src/protocol.rs
@@ -1,28 +1,27 @@
-/// This module defines InternalRequests and InternalResponses. Cuprate's P2P works by translating network messages into an internal
-/// request/ response, this is easy for levin "requests" and "responses" (admin messages) but takes a bit more work with "notifications"
-/// (protocol messages).
-///
-/// Some notifications are easy to translate, like `GetObjectsRequest` is obviously a request but others like `NewFluffyBlock` are a
-/// bit tricker. To translate a `NewFluffyBlock` into a request/ response we will have to look to see if we asked for `FluffyMissingTransactionsRequest`
-/// if we have we interpret `NewFluffyBlock` as a response if not its a request that doesn't require a response.
-///
-/// Here is every P2P request/ response. *note admin messages are already request/ response so "Handshake" is actually made of a HandshakeRequest & HandshakeResponse
-///
-/// Admin:
-///     Handshake,
-///     TimedSync,
-///     Ping,
-///     SupportFlags
-/// Protocol:
-///     Request: GetObjectsRequest,                 Response: GetObjectsResponse,
-///     Request: ChainRequest,                      Response: ChainResponse,
-///     Request: FluffyMissingTransactionsRequest,  Response: NewFluffyBlock,  <- these 2 could be requests or responses
-///     Request: GetTxPoolCompliment,               Response: NewTransactions, <-
-///     Request: NewBlock,                          Response: None,
-///     Request: NewFluffyBlock,                    Response: None,
-///     Request: NewTransactions,                   Response: None
-///
-///
+//! This module defines InternalRequests and InternalResponses. Cuprate's P2P works by translating network messages into an internal
+//! request/ response, this is easy for levin "requests" and "responses" (admin messages) but takes a bit more work with "notifications"
+//! (protocol messages).
+//!
+//! Some notifications are easy to translate, like `GetObjectsRequest` is obviously a request but others like `NewFluffyBlock` are a
+//! bit tri cker. To translate a `NewFluffyBlock` into a request/ response we will have to look to see if we asked for `FluffyMissingTransactionsRequest`
+//! if we have we interpret `NewFluffyBlock` as a response if not its a request that doesn't require a response.
+//!
+//! Here is every P2P request/ response. *note admin messages are already request/ response so "Handshake" is actually made of a HandshakeRequest & HandshakeResponse
+//!
+//! Admin:
+//!     Handshake,
+//!     TimedSync,
+//!     Ping,
+//!     SupportFlags
+//! Protocol:
+//!     Request: GetObjectsRequest,                 Response: GetObjectsResponse,
+//!     Request: ChainRequest,                      Response: ChainResponse,
+//!     Request: FluffyMissingTransactionsRequest,  Response: NewFluffyBlock,  <- these 2 could be requests or responses
+//!     Request: GetTxPoolCompliment,               Response: NewTransactions, <-
+//!     Request: NewBlock,                          Response: None,
+//!     Request: NewFluffyBlock,                    Response: None,
+//!     Request: NewTransactions,                   Response: None
+//!
 use monero_wire::{
     admin::{
         HandshakeRequest, HandshakeResponse, PingResponse, SupportFlagsResponse, TimedSyncRequest,
@@ -55,13 +54,12 @@ pub enum MessageID {
     NewTransactions,
 }
 
-/// This is a sub-set of [`PeerRequest`] for requests that should be sent to all nodes.
-pub enum PeerBroadcast {
-    Transactions(NewTransactions),
-    NewBlock(NewBlock),
+pub enum BroadcastMessage {
     NewFluffyBlock(NewFluffyBlock),
+    NewTransaction(NewTransactions),
 }
 
+#[derive(Debug, Clone)]
 pub enum PeerRequest {
     Handshake(HandshakeRequest),
     TimedSync(TimedSyncRequest),
@@ -105,6 +103,7 @@ impl PeerRequest {
     }
 }
 
+#[derive(Debug, Clone)]
 pub enum PeerResponse {
     Handshake(HandshakeResponse),
     TimedSync(TimedSyncResponse),
diff --git a/p2p/monero-p2p/src/services.rs b/p2p/monero-p2p/src/services.rs
index 6c6df6ce..e86e2776 100644
--- a/p2p/monero-p2p/src/services.rs
+++ b/p2p/monero-p2p/src/services.rs
@@ -1,21 +1,35 @@
 use monero_pruning::{PruningError, PruningSeed};
-use monero_wire::{NetZone, NetworkAddress, PeerListEntryBase};
+use monero_wire::{CoreSyncData, PeerListEntryBase};
 
 use crate::{
     client::InternalPeerID, handles::ConnectionHandle, NetZoneAddress, NetworkAddressIncorrectZone,
     NetworkZone,
 };
 
-pub enum CoreSyncDataRequest {
-    Ours,
-    HandleIncoming(monero_wire::CoreSyncData),
+pub enum PeerSyncRequest<N: NetworkZone> {
+    /// Request some peers to sync from.
+    ///
+    /// This takes in the current cumulative difficulty of our chain and will return peers that
+    /// claim to have a higher cumulative difficulty.
+    PeersToSyncFrom {
+        current_cumulative_difficulty: u128,
+        block_needed: Option<u64>,
+    },
+    /// Add/update a peers core sync data to the sync state service.
+    IncomingCoreSyncData(InternalPeerID<N::Addr>, ConnectionHandle, CoreSyncData),
 }
 
-pub enum CoreSyncDataResponse {
-    Ours(monero_wire::CoreSyncData),
+pub enum PeerSyncResponse<N: NetworkZone> {
+    /// The return value of [`PeerSyncRequest::PeersToSyncFrom`].
+    PeersToSyncFrom(Vec<InternalPeerID<N::Addr>>),
+    /// A generic ok response.
     Ok,
 }
 
+pub struct CoreSyncDataRequest;
+
+pub struct CoreSyncDataResponse(pub CoreSyncData);
+
 #[derive(Debug, Copy, Clone, Eq, PartialEq)]
 #[cfg_attr(
     feature = "borsh",
diff --git a/p2p/monero-p2p/tests/fragmented_handshake.rs b/p2p/monero-p2p/tests/fragmented_handshake.rs
index fdc25193..60d490f8 100644
--- a/p2p/monero-p2p/tests/fragmented_handshake.rs
+++ b/p2p/monero-p2p/tests/fragmented_handshake.rs
@@ -13,7 +13,7 @@ use tokio::{
         tcp::{OwnedReadHalf, OwnedWriteHalf},
         TcpListener, TcpStream,
     },
-    sync::{broadcast, Semaphore},
+    sync::Semaphore,
     time::timeout,
 };
 use tokio_util::{
@@ -47,6 +47,7 @@ pub enum FragNet {}
 #[async_trait::async_trait]
 impl NetworkZone for FragNet {
     const NAME: &'static str = "FragNet";
+    const SEEDS: &'static [Self::Addr] = &[];
     const ALLOW_SYNC: bool = true;
     const DANDELION_PP: bool = true;
     const CHECK_NODE_ID: bool = true;
@@ -133,7 +134,6 @@ impl Encoder<LevinMessage<Message>> for FragmentCodec {
 
 #[tokio::test]
 async fn fragmented_handshake_cuprate_to_monerod() {
-    let (broadcast_tx, _) = broadcast::channel(1); // this isn't actually used in this test.
     let semaphore = Arc::new(Semaphore::new(10));
     let permit = semaphore.acquire_owned().await.unwrap();
 
@@ -141,18 +141,19 @@ async fn fragmented_handshake_cuprate_to_monerod() {
 
     let our_basic_node_data = BasicNodeData {
         my_port: 0,
-        network_id: Network::Mainnet.network_id().into(),
+        network_id: Network::Mainnet.network_id(),
         peer_id: 87980,
         support_flags: PeerSupportFlags::from(1_u32),
         rpc_port: 0,
         rpc_credits_per_hash: 0,
     };
 
-    let handshaker = HandShaker::<FragNet, _, _, _>::new(
+    let handshaker = HandShaker::<FragNet, _, _, _, _, _>::new(
         DummyAddressBook,
+        DummyPeerSyncSvc,
         DummyCoreSyncSvc,
         DummyPeerRequestHandlerSvc,
-        broadcast_tx,
+        |_| futures::stream::pending(),
         our_basic_node_data,
     );
 
@@ -172,24 +173,24 @@ async fn fragmented_handshake_cuprate_to_monerod() {
 
 #[tokio::test]
 async fn fragmented_handshake_monerod_to_cuprate() {
-    let (broadcast_tx, _) = broadcast::channel(1); // this isn't actually used in this test.
     let semaphore = Arc::new(Semaphore::new(10));
     let permit = semaphore.acquire_owned().await.unwrap();
 
     let our_basic_node_data = BasicNodeData {
         my_port: 18081,
-        network_id: Network::Mainnet.network_id().into(),
+        network_id: Network::Mainnet.network_id(),
         peer_id: 87980,
         support_flags: PeerSupportFlags::from(1_u32),
         rpc_port: 0,
         rpc_credits_per_hash: 0,
     };
 
-    let mut handshaker = HandShaker::<FragNet, _, _, _>::new(
+    let mut handshaker = HandShaker::<FragNet, _, _, _, _, _>::new(
         DummyAddressBook,
+        DummyPeerSyncSvc,
         DummyCoreSyncSvc,
         DummyPeerRequestHandlerSvc,
-        broadcast_tx,
+        |_| futures::stream::pending(),
         our_basic_node_data,
     );
 
diff --git a/p2p/monero-p2p/tests/handshake.rs b/p2p/monero-p2p/tests/handshake.rs
index 2634263d..1d8b649c 100644
--- a/p2p/monero-p2p/tests/handshake.rs
+++ b/p2p/monero-p2p/tests/handshake.rs
@@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration};
 use futures::StreamExt;
 use tokio::{
     io::{duplex, split},
-    sync::{broadcast, Semaphore},
+    sync::Semaphore,
     time::timeout,
 };
 use tokio_util::codec::{FramedRead, FramedWrite};
@@ -31,14 +31,13 @@ async fn handshake_cuprate_to_cuprate() {
     // Tests a Cuprate <-> Cuprate handshake by making 2 handshake services and making them talk to
     // each other.
 
-    let (broadcast_tx, _) = broadcast::channel(1); // this isn't actually used in this test.
     let semaphore = Arc::new(Semaphore::new(10));
     let permit_1 = semaphore.clone().acquire_owned().await.unwrap();
     let permit_2 = semaphore.acquire_owned().await.unwrap();
 
     let our_basic_node_data_1 = BasicNodeData {
         my_port: 0,
-        network_id: Network::Mainnet.network_id().into(),
+        network_id: Network::Mainnet.network_id(),
         peer_id: 87980,
         // TODO: This fails if the support flags are empty (0)
         support_flags: PeerSupportFlags::from(1_u32),
@@ -49,19 +48,21 @@ async fn handshake_cuprate_to_cuprate() {
     let mut our_basic_node_data_2 = our_basic_node_data_1.clone();
     our_basic_node_data_2.peer_id = 2344;
 
-    let mut handshaker_1 = HandShaker::<TestNetZone<true, true, true>, _, _, _>::new(
+    let mut handshaker_1 = HandShaker::<TestNetZone<true, true, true>, _, _, _, _, _>::new(
         DummyAddressBook,
+        DummyPeerSyncSvc,
         DummyCoreSyncSvc,
         DummyPeerRequestHandlerSvc,
-        broadcast_tx.clone(),
+        |_| futures::stream::pending(),
         our_basic_node_data_1,
     );
 
-    let mut handshaker_2 = HandShaker::<TestNetZone<true, true, true>, _, _, _>::new(
+    let mut handshaker_2 = HandShaker::<TestNetZone<true, true, true>, _, _, _, _, _>::new(
         DummyAddressBook,
+        DummyPeerSyncSvc,
         DummyCoreSyncSvc,
         DummyPeerRequestHandlerSvc,
-        broadcast_tx.clone(),
+        |_| futures::stream::pending(),
         our_basic_node_data_2,
     );
 
@@ -106,14 +107,13 @@ async fn handshake_cuprate_to_cuprate() {
             .unwrap()
     });
 
-    let (res1, res2) = futures::join!(p1, p2);
+    let (res1, res2) = tokio::join!(p1, p2);
     res1.unwrap();
     res2.unwrap();
 }
 
 #[tokio::test]
 async fn handshake_cuprate_to_monerod() {
-    let (broadcast_tx, _) = broadcast::channel(1); // this isn't actually used in this test.
     let semaphore = Arc::new(Semaphore::new(10));
     let permit = semaphore.acquire_owned().await.unwrap();
 
@@ -121,18 +121,19 @@ async fn handshake_cuprate_to_monerod() {
 
     let our_basic_node_data = BasicNodeData {
         my_port: 0,
-        network_id: Network::Mainnet.network_id().into(),
+        network_id: Network::Mainnet.network_id(),
         peer_id: 87980,
         support_flags: PeerSupportFlags::from(1_u32),
         rpc_port: 0,
         rpc_credits_per_hash: 0,
     };
 
-    let handshaker = HandShaker::<ClearNet, _, _, _>::new(
+    let handshaker = HandShaker::<ClearNet, _, _, _, _, _>::new(
         DummyAddressBook,
+        DummyPeerSyncSvc,
         DummyCoreSyncSvc,
         DummyPeerRequestHandlerSvc,
-        broadcast_tx,
+        |_| futures::stream::pending(),
         our_basic_node_data,
     );
 
@@ -152,24 +153,24 @@ async fn handshake_cuprate_to_monerod() {
 
 #[tokio::test]
 async fn handshake_monerod_to_cuprate() {
-    let (broadcast_tx, _) = broadcast::channel(1); // this isn't actually used in this test.
     let semaphore = Arc::new(Semaphore::new(10));
     let permit = semaphore.acquire_owned().await.unwrap();
 
     let our_basic_node_data = BasicNodeData {
         my_port: 18081,
-        network_id: Network::Mainnet.network_id().into(),
+        network_id: Network::Mainnet.network_id(),
         peer_id: 87980,
         support_flags: PeerSupportFlags::from(1_u32),
         rpc_port: 0,
         rpc_credits_per_hash: 0,
     };
 
-    let mut handshaker = HandShaker::<ClearNet, _, _, _>::new(
+    let mut handshaker = HandShaker::<ClearNet, _, _, _, _, _>::new(
         DummyAddressBook,
+        DummyPeerSyncSvc,
         DummyCoreSyncSvc,
         DummyPeerRequestHandlerSvc,
-        broadcast_tx,
+        |_| futures::stream::pending(),
         our_basic_node_data,
     );
 
diff --git a/p2p/monero-p2p/tests/sending_receiving.rs b/p2p/monero-p2p/tests/sending_receiving.rs
new file mode 100644
index 00000000..fc5c369b
--- /dev/null
+++ b/p2p/monero-p2p/tests/sending_receiving.rs
@@ -0,0 +1,78 @@
+use std::sync::Arc;
+
+use tokio::sync::Semaphore;
+use tower::{Service, ServiceExt};
+
+use cuprate_helper::network::Network;
+use monero_wire::{common::PeerSupportFlags, protocol::GetObjectsRequest, BasicNodeData};
+
+use monero_p2p::{
+    client::{ConnectRequest, Connector, HandShaker},
+    network_zones::ClearNet,
+    protocol::{PeerRequest, PeerResponse},
+};
+
+use cuprate_test_utils::monerod::monerod;
+
+mod utils;
+use utils::*;
+
+#[tokio::test]
+async fn get_single_block_from_monerod() {
+    let semaphore = Arc::new(Semaphore::new(10));
+    let permit = semaphore.acquire_owned().await.unwrap();
+
+    let monerod = monerod(["--out-peers=0"]).await;
+
+    let our_basic_node_data = BasicNodeData {
+        my_port: 0,
+        network_id: Network::Mainnet.network_id(),
+        peer_id: 87980,
+        support_flags: PeerSupportFlags::FLUFFY_BLOCKS,
+        rpc_port: 0,
+        rpc_credits_per_hash: 0,
+    };
+
+    let handshaker = HandShaker::<ClearNet, _, _, _, _, _>::new(
+        DummyAddressBook,
+        DummyPeerSyncSvc,
+        DummyCoreSyncSvc,
+        DummyPeerRequestHandlerSvc,
+        |_| futures::stream::pending(),
+        our_basic_node_data,
+    );
+
+    let mut connector = Connector::new(handshaker);
+
+    let mut connected_peer = connector
+        .ready()
+        .await
+        .unwrap()
+        .call(ConnectRequest {
+            addr: monerod.p2p_addr(),
+            permit,
+        })
+        .await
+        .unwrap();
+
+    let PeerResponse::GetObjects(obj) = connected_peer
+        .ready()
+        .await
+        .unwrap()
+        .call(PeerRequest::GetObjects(GetObjectsRequest {
+            blocks: hex::decode("418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3")
+                .unwrap()
+                .try_into()
+                .unwrap(),
+            pruned: false,
+        }))
+        .await
+        .unwrap()
+    else {
+        panic!("Client returned wrong response");
+    };
+
+    assert_eq!(obj.blocks.len(), 1);
+    assert_eq!(obj.missed_ids.len(), 0);
+    assert_eq!(obj.current_blockchain_height, 1);
+}
diff --git a/p2p/monero-p2p/tests/utils.rs b/p2p/monero-p2p/tests/utils.rs
index e6b457f7..9836cbfa 100644
--- a/p2p/monero-p2p/tests/utils.rs
+++ b/p2p/monero-p2p/tests/utils.rs
@@ -10,6 +10,7 @@ use tower::Service;
 use monero_p2p::{
     services::{
         AddressBookRequest, AddressBookResponse, CoreSyncDataRequest, CoreSyncDataResponse,
+        PeerSyncRequest, PeerSyncResponse,
     },
     NetworkZone, PeerRequest, PeerResponse,
 };
@@ -51,31 +52,45 @@ impl Service<CoreSyncDataRequest> for DummyCoreSyncSvc {
         Poll::Ready(Ok(()))
     }
 
-    fn call(&mut self, req: CoreSyncDataRequest) -> Self::Future {
+    fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future {
         async move {
-            match req {
-                CoreSyncDataRequest::Ours => {
-                    Ok(CoreSyncDataResponse::Ours(monero_wire::CoreSyncData {
-                        cumulative_difficulty: 1,
-                        cumulative_difficulty_top64: 0,
-                        current_height: 1,
-                        pruning_seed: 0,
-                        top_id: hex::decode(
-                            "418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3",
-                        )
-                        .unwrap()
-                        .try_into()
-                        .unwrap(),
-                        top_version: 1,
-                    }))
-                }
-                CoreSyncDataRequest::HandleIncoming(_) => Ok(CoreSyncDataResponse::Ok),
-            }
+            Ok(CoreSyncDataResponse(monero_wire::CoreSyncData {
+                cumulative_difficulty: 1,
+                cumulative_difficulty_top64: 0,
+                current_height: 1,
+                pruning_seed: 0,
+                top_id: hex::decode(
+                    "418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3",
+                )
+                .unwrap()
+                .try_into()
+                .unwrap(),
+                top_version: 1,
+            }))
         }
         .boxed()
     }
 }
 
+#[derive(Clone)]
+pub struct DummyPeerSyncSvc;
+
+impl<N: NetworkZone> Service<PeerSyncRequest<N>> for DummyPeerSyncSvc {
+    type Error = tower::BoxError;
+    type Future =
+        Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
+
+    type Response = PeerSyncResponse<N>;
+
+    fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+        Poll::Ready(Ok(()))
+    }
+
+    fn call(&mut self, _: PeerSyncRequest<N>) -> Self::Future {
+        async { Ok(PeerSyncResponse::Ok) }.boxed()
+    }
+}
+
 #[derive(Clone)]
 pub struct DummyPeerRequestHandlerSvc;
 
diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs
index 9cff0824..af6017a1 100644
--- a/pruning/src/lib.rs
+++ b/pruning/src/lib.rs
@@ -126,6 +126,14 @@ impl PruningSeed {
         }
     }
 
+    /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block.
+    pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool {
+        match self {
+            PruningSeed::NotPruned => true,
+            PruningSeed::Pruned(seed) => seed.has_full_block(height, blockchain_height),
+        }
+    }
+
     /// Gets the next pruned block for a given `block_height` and `blockchain_height`
     ///
     /// Each seed will store, in a cyclic manner, a portion of blocks while discarding
@@ -303,6 +311,14 @@ impl DecompressedPruningSeed {
             | ((self.stripe - 1) << PRUNING_SEED_STRIPE_SHIFT)
     }
 
+    /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block.
+    pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool {
+        match get_block_pruning_stripe(height, blockchain_height, self.log_stripes) {
+            Some(block_stripe) => self.stripe == block_stripe,
+            None => true,
+        }
+    }
+
     /// Gets the next unpruned block for a given `block_height` and `blockchain_height`
     ///
     /// Each seed will store, in a cyclic manner, a portion of blocks while discarding
diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml
index 4143a941..82f7e523 100644
--- a/test-utils/Cargo.toml
+++ b/test-utils/Cargo.toml
@@ -3,29 +3,28 @@ name = "cuprate-test-utils"
 version = "0.1.0"
 edition = "2021"
 license = "MIT"
-authors = ["Boog900"]
+authors = ["Boog900", "hinto-janai"]
 
 [dependencies]
-monero-wire = {path = "../net/monero-wire"}
-monero-p2p = {path = "../p2p/monero-p2p", features = ["borsh"] }
+cuprate-types = { path = "../types" }
+cuprate-helper = { path = "../helper", features = ["map"] }
+monero-wire = { path = "../net/monero-wire" }
+monero-p2p = { path = "../p2p/monero-p2p", features = ["borsh"] }
 
-monero-serai = { workspace = true }
+hex = { workspace = true }
+hex-literal = { workspace = true }
+monero-serai = { workspace = true, features = ["std", "http-rpc"] }
 futures = { workspace = true, features = ["std"] }
 async-trait = { workspace = true }
 tokio = { workspace = true, features = ["full"] }
 tokio-util = { workspace = true }
-reqwest = { workspace = true }
+serde = { workspace = true }
+serde_json = { workspace = true }
 bytes = { workspace = true, features = ["std"] }
 tempfile = { workspace = true }
 
 borsh = { workspace = true, features = ["derive"]}
 
-[target.'cfg(unix)'.dependencies]
-tar = "0.4.40"
-bzip2 = "0.4.4"
-
-[target.'cfg(windows)'.dependencies]
-zip = "0.6"
-
 [dev-dependencies]
 hex = { workspace = true }
+pretty_assertions = { workspace = true }
\ No newline at end of file
diff --git a/test-utils/README.MD b/test-utils/README.md
similarity index 85%
rename from test-utils/README.MD
rename to test-utils/README.md
index e7fd3bcc..c210686a 100644
--- a/test-utils/README.MD
+++ b/test-utils/README.md
@@ -6,3 +6,4 @@ Cuprate crate, only in tests.
 It currently contains:
 - Code to spawn monerod instances and a testing network zone
 - Real raw and typed Monero data, e.g. `Block, Transaction`
+- An RPC client to generate types from `cuprate_types`
diff --git a/test-utils/src/data/block/5da0a3d004c352a90cc86b00fab676695d76a4d1de16036c41ba4dd188c4d76f.bin b/test-utils/src/data/block/5da0a3d004c352a90cc86b00fab676695d76a4d1de16036c41ba4dd188c4d76f.bin
new file mode 100644
index 00000000..bfe51fb5
Binary files /dev/null and b/test-utils/src/data/block/5da0a3d004c352a90cc86b00fab676695d76a4d1de16036c41ba4dd188c4d76f.bin differ
diff --git a/test-utils/src/data/block/5ecb7e663bbe947c734c8059e7d7d52dc7d6644bb82d81a6ad4057d127ee8eda.bin b/test-utils/src/data/block/5ecb7e663bbe947c734c8059e7d7d52dc7d6644bb82d81a6ad4057d127ee8eda.bin
new file mode 100644
index 00000000..a6f2ba34
Binary files /dev/null and b/test-utils/src/data/block/5ecb7e663bbe947c734c8059e7d7d52dc7d6644bb82d81a6ad4057d127ee8eda.bin differ
diff --git a/test-utils/src/data/constants.rs b/test-utils/src/data/constants.rs
index 77795a66..c1da6d01 100644
--- a/test-utils/src/data/constants.rs
+++ b/test-utils/src/data/constants.rs
@@ -3,147 +3,218 @@
 //---------------------------------------------------------------------------------------------------- Import
 
 //---------------------------------------------------------------------------------------------------- Block
-/// Block with height `202612` and hash `bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698`.
+/// Generate a `const _: &[u8]` pointing to a block blob.
 ///
-/// ```rust
-/// use monero_serai::{block::Block, transaction::Input};
+/// This will deserialize with `Block` to assume the blob is at least deserializable.
 ///
-/// let block = Block::read(&mut
-///     cuprate_test_utils::data::BLOCK_BBD604
-/// ).unwrap();
+/// This requires some static block input for testing.
 ///
-/// assert_eq!(block.header.major_version, 1);
-/// assert_eq!(block.header.minor_version, 0);
-/// assert_eq!(block.header.timestamp, 1409804570);
-/// assert_eq!(block.header.nonce, 1073744198);
-/// assert!(matches!(block.miner_tx.prefix.inputs[0], Input::Gen(202612)));
-/// assert_eq!(block.txs.len(), 513);
+/// The actual block blob data on disk is found in `data/block`.
 ///
-/// assert_eq!(
-///     hex::encode(block.hash()),
-///     "bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698",
-/// );
-/// ```
-pub const BLOCK_BBD604: &[u8] =
-    include_bytes!("block/bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698.bin");
+/// See below for actual usage.
+macro_rules! const_block_blob {
+    (
+        name: $name:ident, // Name of the `const` created
+        height: $height:literal, // Block height
+        hash: $hash:literal, // Block hash
+        data_path: $data_path:literal, // Path to the block blob
+        major_version: $major_version:literal, // Block's major version
+        minor_version: $minor_version:literal, // Block's minor version
+        timestamp: $timestamp:literal, // Block's timestamp
+        nonce: $nonce:literal, // Block's nonce
+        tx_len: $tx_len:literal, // How many transactions there are in the block
+    ) => {
+        #[doc = concat!("Block with hash `", $hash, "`.")]
+        ///
+        #[doc = concat!("Height: `", $height, "`.")]
+        ///
+        /// ```rust
+        #[doc = "# use cuprate_test_utils::data::*;"]
+        #[doc = "use monero_serai::{block::Block, transaction::Input};"]
+        #[doc = ""]
+        #[doc = concat!("let block = Block::read(&mut ", stringify!($name), ").unwrap();")]
+        #[doc = ""]
+        #[doc = concat!("assert_eq!(block.header.major_version, ", $major_version, ");")]
+        #[doc = concat!("assert_eq!(block.header.minor_version, ", $minor_version, ");")]
+        #[doc = concat!("assert_eq!(block.header.timestamp, ", $timestamp, ");")]
+        #[doc = concat!("assert_eq!(block.header.nonce, ", $nonce, ");")]
+        #[doc = concat!("assert!(matches!(block.miner_tx.prefix.inputs[0], Input::Gen(", $height, ")));")]
+        #[doc = concat!("assert_eq!(block.txs.len(), ", $tx_len, ");")]
+        #[doc = concat!("assert_eq!(hex::encode(block.hash()), \"", $hash, "\")")]
+        /// ```
+        pub const $name: &[u8] = include_bytes!($data_path);
+    };
+}
 
-/// Block with height `2751506` and hash `f910435a5477ca27be1986c080d5476aeab52d0c07cf3d9c72513213350d25d4`.
-///
-/// ```rust
-/// use monero_serai::{block::Block, transaction::Input};
-///
-/// let block = Block::read(&mut
-///     cuprate_test_utils::data::BLOCK_F91043
-/// ).unwrap();
-///
-/// assert_eq!(block.header.major_version, 9);
-/// assert_eq!(block.header.minor_version, 9);
-/// assert_eq!(block.header.timestamp, 1545423190);
-/// assert_eq!(block.header.nonce, 4123173351);
-/// assert!(matches!(block.miner_tx.prefix.inputs[0], Input::Gen(1731606)));
-/// assert_eq!(block.txs.len(), 3);
-///
-/// assert_eq!(
-///     hex::encode(block.hash()),
-///     "f910435a5477ca27be1986c080d5476aeab52d0c07cf3d9c72513213350d25d4",
-/// );
-/// ```
-pub const BLOCK_F91043: &[u8] =
-    include_bytes!("block/f910435a5477ca27be1986c080d5476aeab52d0c07cf3d9c72513213350d25d4.bin");
+const_block_blob! {
+    name: BLOCK_BBD604,
+    height: 202_612,
+    hash: "bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698",
+    data_path: "block/bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698.bin",
+    major_version: 1,
+    minor_version: 0,
+    timestamp: 1409804570,
+    nonce: 1073744198,
+    tx_len: 513,
+}
 
-/// Block with height `2751506` and hash `43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428`.
-///
-/// ```rust
-/// use monero_serai::{block::Block, transaction::Input};
-///
-/// let block = Block::read(&mut
-///     cuprate_test_utils::data::BLOCK_43BD1F
-/// ).unwrap();
-///
-/// assert_eq!(block.header.major_version, 16);
-/// assert_eq!(block.header.minor_version, 16);
-/// assert_eq!(block.header.timestamp, 1667941829);
-/// assert_eq!(block.header.nonce, 4110909056);
-/// assert!(matches!(block.miner_tx.prefix.inputs[0], Input::Gen(2751506)));
-/// assert_eq!(block.txs.len(), 0);
-///
-/// assert_eq!(
-///     hex::encode(block.hash()),
-///     "43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428",
-/// );
-/// ```
-pub const BLOCK_43BD1F: &[u8] =
-    include_bytes!("block/43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428.bin");
+const_block_blob! {
+    name: BLOCK_5ECB7E,
+    height: 202_609,
+    hash: "5ecb7e663bbe947c734c8059e7d7d52dc7d6644bb82d81a6ad4057d127ee8eda",
+    data_path: "block/5ecb7e663bbe947c734c8059e7d7d52dc7d6644bb82d81a6ad4057d127ee8eda.bin",
+    major_version: 1,
+    minor_version: 0,
+    timestamp: 1409804315,
+    nonce: 48426,
+    tx_len: 2,
+}
+
+const_block_blob! {
+    name: BLOCK_F91043,
+    height: 1_731_606,
+    hash: "f910435a5477ca27be1986c080d5476aeab52d0c07cf3d9c72513213350d25d4",
+    data_path: "block/f910435a5477ca27be1986c080d5476aeab52d0c07cf3d9c72513213350d25d4.bin",
+    major_version: 9,
+    minor_version: 9,
+    timestamp: 1545423190,
+    nonce: 4123173351,
+    tx_len: 3,
+}
+
+const_block_blob! {
+    name: BLOCK_43BD1F,
+    height: 2_751_506,
+    hash: "43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428",
+    data_path: "block/43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428.bin",
+    major_version: 16,
+    minor_version: 16,
+    timestamp: 1667941829,
+    nonce: 4110909056,
+    tx_len: 0,
+}
 
 //---------------------------------------------------------------------------------------------------- Transaction
-/// Transaction with hash `3bc7ff015b227e7313cc2e8668bfbb3f3acbee274a9c201d6211cf681b5f6bb1`.
+/// Generate a `const _: &[u8]` pointing to a transaction blob.
 ///
-/// ```rust
-/// use monero_serai::transaction::{Transaction, Timelock};
-///
-/// let tx = Transaction::read(&mut
-///     cuprate_test_utils::data::TX_3BC7FF
-/// ).unwrap();
-///
-/// assert_eq!(tx.prefix.version, 1);
-/// assert_eq!(tx.prefix.timelock, Timelock::Block(100_081));
-/// assert_eq!(tx.prefix.inputs.len(), 1);
-/// assert_eq!(tx.prefix.outputs.len(), 5);
-/// assert_eq!(tx.signatures.len(), 0);
-///
-/// assert_eq!(
-///     hex::encode(tx.hash()),
-///     "3bc7ff015b227e7313cc2e8668bfbb3f3acbee274a9c201d6211cf681b5f6bb1",
-/// );
-/// ```
-pub const TX_3BC7FF: &[u8] =
-    include_bytes!("tx/3bc7ff015b227e7313cc2e8668bfbb3f3acbee274a9c201d6211cf681b5f6bb1.bin");
+/// Same as [`const_block_blob`] but for transactions.
+macro_rules! const_tx_blob {
+    (
+        name: $name:ident, // Name of the `const` created
+        hash: $hash:literal, // Transaction hash
+        data_path: $data_path:literal, // Path to the transaction blob
+        version: $version:literal, // Transaction version
+        timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`)
+        input_len: $input_len:literal, // Amount of inputs
+        output_len: $output_len:literal, // Amount of outputs
+        signatures_len: $signatures_len:literal, // Amount of signatures
+    ) => {
+        #[doc = concat!("Transaction with hash `", $hash, "`.")]
+        ///
+        /// ```rust
+        #[doc = "# use cuprate_test_utils::data::*;"]
+        #[doc = "use monero_serai::transaction::{Transaction, Timelock};"]
+        #[doc = ""]
+        #[doc = concat!("let tx = Transaction::read(&mut ", stringify!($name), ").unwrap();")]
+        #[doc = ""]
+        #[doc = concat!("assert_eq!(tx.prefix.version, ", $version, ");")]
+        #[doc = concat!("assert_eq!(tx.prefix.timelock, ", stringify!($timelock), ");")]
+        #[doc = concat!("assert_eq!(tx.prefix.inputs.len(), ", $input_len, ");")]
+        #[doc = concat!("assert_eq!(tx.prefix.outputs.len(), ", $output_len, ");")]
+        #[doc = concat!("assert_eq!(tx.signatures.len(), ", $signatures_len, ");")]
+        #[doc = concat!("assert_eq!(hex::encode(tx.hash()), \"", $hash, "\")")]
+        /// ```
+        pub const $name: &[u8] = include_bytes!($data_path);
+    };
+}
 
-/// Transaction with hash `9e3f73e66d7c7293af59c59c1ff5d6aae047289f49e5884c66caaf4aea49fb34`.
-///
-/// ```rust
-/// use monero_serai::transaction::{Transaction, Timelock};
-///
-/// let tx = Transaction::read(&mut
-///     cuprate_test_utils::data::TX_9E3F73
-/// ).unwrap();
-///
-/// assert_eq!(tx.prefix.version, 1);
-/// assert_eq!(tx.prefix.timelock, Timelock::None);
-/// assert_eq!(tx.prefix.inputs.len(), 2);
-/// assert_eq!(tx.prefix.outputs.len(), 5);
-/// assert_eq!(tx.signatures.len(), 2);
-///
-/// assert_eq!(
-///     hex::encode(tx.hash()),
-///     "9e3f73e66d7c7293af59c59c1ff5d6aae047289f49e5884c66caaf4aea49fb34",
-/// );
-/// ```
-pub const TX_9E3F73: &[u8] =
-    include_bytes!("tx/9e3f73e66d7c7293af59c59c1ff5d6aae047289f49e5884c66caaf4aea49fb34.bin");
+const_tx_blob! {
+    name: TX_3BC7FF,
+    hash: "3bc7ff015b227e7313cc2e8668bfbb3f3acbee274a9c201d6211cf681b5f6bb1",
+    data_path: "tx/3bc7ff015b227e7313cc2e8668bfbb3f3acbee274a9c201d6211cf681b5f6bb1.bin",
+    version: 1,
+    timelock: Timelock::Block(100_081),
+    input_len: 1,
+    output_len: 5,
+    signatures_len: 0,
+}
 
-/// Transaction with hash `84d48dc11ec91950f8b70a85af9db91fe0c8abef71ef5db08304f7344b99ea66`.
-///
-/// ```rust
-/// use monero_serai::transaction::{Transaction, Timelock};
-///
-/// let tx = Transaction::read(&mut
-///     cuprate_test_utils::data::TX_84D48D
-/// ).unwrap();
-///
-/// assert_eq!(tx.prefix.version, 2);
-/// assert_eq!(tx.prefix.timelock, Timelock::None);
-/// assert_eq!(tx.prefix.inputs.len(), 2);
-/// assert_eq!(tx.prefix.outputs.len(), 2);
-/// assert_eq!(tx.signatures.len(), 0);
-///
-/// assert_eq!(
-///     hex::encode(tx.hash()),
-///     "84d48dc11ec91950f8b70a85af9db91fe0c8abef71ef5db08304f7344b99ea66",
-/// );
-/// ```
-pub const TX_84D48D: &[u8] =
-    include_bytes!("tx/84d48dc11ec91950f8b70a85af9db91fe0c8abef71ef5db08304f7344b99ea66.bin");
+const_tx_blob! {
+    name: TX_2180A8,
+    hash: "2180a87f724702d37af087e22476297e818a73579ef7b7da947da963245202a3",
+    data_path: "tx/2180a87f724702d37af087e22476297e818a73579ef7b7da947da963245202a3.bin",
+    version: 1,
+    timelock: Timelock::None,
+    input_len: 19,
+    output_len: 61,
+    signatures_len: 19,
+}
+
+const_tx_blob! {
+    name: TX_D7FEBD,
+    hash: "d7febd16293799d9c6a8e0fe9199b8a0a3e0da5a8a165098937b60f0bbd582df",
+    data_path: "tx/d7febd16293799d9c6a8e0fe9199b8a0a3e0da5a8a165098937b60f0bbd582df.bin",
+    version: 1,
+    timelock: Timelock::None,
+    input_len: 46,
+    output_len: 46,
+    signatures_len: 46,
+}
+
+const_tx_blob! {
+    name: TX_E2D393,
+    hash: "e2d39395dd1625b2d707b98af789e7eab9d24c2bd2978ec38ef910961a8cdcee",
+    data_path: "tx/e2d39395dd1625b2d707b98af789e7eab9d24c2bd2978ec38ef910961a8cdcee.bin",
+    version: 2,
+    timelock: Timelock::None,
+    input_len: 1,
+    output_len: 2,
+    signatures_len: 0,
+}
+
+const_tx_blob! {
+    name: TX_E57440,
+    hash: "e57440ec66d2f3b2a5fa2081af40128868973e7c021bb3877290db3066317474",
+    data_path: "tx/e57440ec66d2f3b2a5fa2081af40128868973e7c021bb3877290db3066317474.bin",
+    version: 2,
+    timelock: Timelock::None,
+    input_len: 1,
+    output_len: 2,
+    signatures_len: 0,
+}
+
+const_tx_blob! {
+    name: TX_B6B439,
+    hash: "b6b4394d4ec5f08ad63267c07962550064caa8d225dd9ad6d739ebf60291c169",
+    data_path: "tx/b6b4394d4ec5f08ad63267c07962550064caa8d225dd9ad6d739ebf60291c169.bin",
+    version: 2,
+    timelock: Timelock::None,
+    input_len: 2,
+    output_len: 2,
+    signatures_len: 0,
+}
+
+const_tx_blob! {
+    name: TX_9E3F73,
+    hash: "9e3f73e66d7c7293af59c59c1ff5d6aae047289f49e5884c66caaf4aea49fb34",
+    data_path: "tx/9e3f73e66d7c7293af59c59c1ff5d6aae047289f49e5884c66caaf4aea49fb34.bin",
+    version: 1,
+    timelock: Timelock::None,
+    input_len: 2,
+    output_len: 5,
+    signatures_len: 2,
+}
+
+const_tx_blob! {
+    name: TX_84D48D,
+    hash: "84d48dc11ec91950f8b70a85af9db91fe0c8abef71ef5db08304f7344b99ea66",
+    data_path: "tx/84d48dc11ec91950f8b70a85af9db91fe0c8abef71ef5db08304f7344b99ea66.bin",
+    version: 2,
+    timelock: Timelock::None,
+    input_len: 2,
+    output_len: 2,
+    signatures_len: 0,
+}
 
 //---------------------------------------------------------------------------------------------------- Tests
 #[cfg(test)]
diff --git a/test-utils/src/data/free.rs b/test-utils/src/data/free.rs
index e65f2675..c2c2c82b 100644
--- a/test-utils/src/data/free.rs
+++ b/test-utils/src/data/free.rs
@@ -6,105 +6,337 @@
 )]
 
 //---------------------------------------------------------------------------------------------------- Import
-use std::sync::OnceLock;
+use std::sync::{Arc, OnceLock};
 
+use hex_literal::hex;
 use monero_serai::{block::Block, transaction::Transaction};
 
+use cuprate_helper::map::combine_low_high_bits_to_u128;
+use cuprate_types::{TransactionVerificationData, VerifiedBlockInformation};
+
 use crate::data::constants::{
-    BLOCK_43BD1F, BLOCK_BBD604, BLOCK_F91043, TX_3BC7FF, TX_84D48D, TX_9E3F73,
+    BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73,
+    TX_B6B439, TX_D7FEBD, TX_E2D393, TX_E57440,
 };
 
+//---------------------------------------------------------------------------------------------------- Conversion
+/// Converts `monero_serai`'s `Block` into a
+/// `cuprate_types::VerifiedBlockInformation` (superset).
+///
+/// To prevent pulling other code in order to actually calculate things
+/// (e.g. `pow_hash`), some information must be provided statically,
+/// this struct represents that data that must be provided.
+///
+/// Consider using `cuprate_test_utils::rpc` to get this data easily.
+struct VerifiedBlockMap {
+    block_blob: &'static [u8],
+    pow_hash: [u8; 32],
+    height: u64,
+    generated_coins: u64,
+    weight: usize,
+    long_term_weight: usize,
+    cumulative_difficulty_low: u64,
+    cumulative_difficulty_high: u64,
+    // Vec of `tx_blob`'s, i.e. the data in `/test-utils/src/data/tx/`.
+    // This should the actual `tx_blob`'s of the transactions within this block.
+    txs: &'static [&'static [u8]],
+}
+
+impl VerifiedBlockMap {
+    /// Turn the various static data bits in `self` into a `VerifiedBlockInformation`.
+    ///
+    /// Transactions are verified that they at least match the block's,
+    /// although the correctness of data (whether this block actually existed or not)
+    /// is not checked.
+    fn into_verified(self) -> VerifiedBlockInformation {
+        let Self {
+            block_blob,
+            pow_hash,
+            height,
+            generated_coins,
+            weight,
+            long_term_weight,
+            cumulative_difficulty_low,
+            cumulative_difficulty_high,
+            txs,
+        } = self;
+
+        let block_blob = block_blob.to_vec();
+        let block = Block::read(&mut block_blob.as_slice()).unwrap();
+
+        let txs: Vec<Arc<TransactionVerificationData>> = txs
+            .iter()
+            .map(to_tx_verification_data)
+            .map(Arc::new)
+            .collect();
+
+        assert_eq!(
+            txs.len(),
+            block.txs.len(),
+            "(deserialized txs).len() != (txs hashes in block).len()"
+        );
+
+        for (tx, tx_hash_in_block) in txs.iter().zip(&block.txs) {
+            assert_eq!(
+                &tx.tx_hash, tx_hash_in_block,
+                "deserialized tx hash is not the same as the one in the parent block"
+            );
+        }
+
+        VerifiedBlockInformation {
+            block_hash: block.hash(),
+            block_blob,
+            block,
+            txs,
+            pow_hash,
+            height,
+            generated_coins,
+            weight,
+            long_term_weight,
+            cumulative_difficulty: combine_low_high_bits_to_u128(
+                cumulative_difficulty_low,
+                cumulative_difficulty_high,
+            ),
+        }
+    }
+}
+
+// Same as [`VerifiedBlockMap`] but for [`TransactionVerificationData`].
+fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> TransactionVerificationData {
+    let tx_blob = tx_blob.as_ref().to_vec();
+    let tx = Transaction::read(&mut tx_blob.as_slice()).unwrap();
+    TransactionVerificationData {
+        tx_weight: tx.weight(),
+        fee: tx.rct_signatures.base.fee,
+        tx_hash: tx.hash(),
+        tx_blob,
+        tx,
+    }
+}
+
 //---------------------------------------------------------------------------------------------------- Blocks
-/// Return [`BLOCK_BBD604`] as a [`Block`].
+/// Generate a block accessor function with this signature:
+///     `fn() -> &'static VerifiedBlockInformation`
 ///
-/// ```rust
-/// assert_eq!(
-///     &cuprate_test_utils::data::block_v1_tx513().serialize(),
-///     cuprate_test_utils::data::BLOCK_BBD604
-/// );
-/// ```
-pub fn block_v1_tx513() -> Block {
-    /// `OnceLock` holding the data.
-    static BLOCK: OnceLock<Block> = OnceLock::new();
-    BLOCK
-        .get_or_init(|| Block::read(&mut BLOCK_BBD604).unwrap())
-        .clone()
+/// This will use `VerifiedBlockMap` type above to do various
+/// checks on the input data and makes sure it seems correct.
+///
+/// This requires some static block/tx input (from data) and some fields.
+/// This data can be accessed more easily via:
+/// - A block explorer (https://xmrchain.net)
+/// - Monero RPC (see cuprate_test_utils::rpc for this)
+///
+/// See below for actual usage.
+macro_rules! verified_block_information_fn {
+    (
+        fn_name: $fn_name:ident, // Name of the function created
+        block_blob: $block_blob:ident, // Block blob ([u8], found in `constants.rs`)
+        tx_blobs: [$($tx_blob:ident),*], // Array of contained transaction blobs
+        pow_hash: $pow_hash:literal, // PoW hash as a string literal
+        height: $height:literal, // Block height
+        generated_coins: $generated_coins:literal, // Generated coins in block (minus fees)
+        weight: $weight:literal, // Block weight
+        long_term_weight: $long_term_weight:literal, // Block long term weight
+        cumulative_difficulty_low: $cumulative_difficulty_low:literal, // Least significant 64-bits of block cumulative difficulty
+        cumulative_difficulty_high: $cumulative_difficulty_high:literal, // Most significant 64-bits of block cumulative difficulty
+        tx_len: $tx_len:literal, // Amount of transactions in this block
+    ) => {
+        #[doc = concat!(
+            "Return [`",
+            stringify!($block_blob),
+            "`] as a [`VerifiedBlockInformation`].",
+        )]
+        ///
+        /// Contained transactions:
+        $(
+            #[doc = concat!("- [`", stringify!($tx_blob), "`]")]
+        )*
+        ///
+        /// ```rust
+        #[doc = "# use cuprate_test_utils::data::*;"]
+        #[doc = "# use hex_literal::hex;"]
+        #[doc = "use cuprate_helper::map::combine_low_high_bits_to_u128;"]
+        #[doc = ""]
+        #[doc = concat!("let block = ", stringify!($fn_name), "();")]
+        #[doc = concat!("assert_eq!(&block.block.serialize(), ", stringify!($block_blob), ");")]
+        #[doc = concat!("assert_eq!(block.pow_hash, hex!(\"", $pow_hash, "\"));")]
+        #[doc = concat!("assert_eq!(block.height, ", $height, ");")]
+        #[doc = concat!("assert_eq!(block.generated_coins, ", $generated_coins, ");")]
+        #[doc = concat!("assert_eq!(block.weight, ", $weight, ");")]
+        #[doc = concat!("assert_eq!(block.long_term_weight, ", $long_term_weight, ");")]
+        #[doc = concat!("assert_eq!(block.txs.len(), ", $tx_len, ");")]
+        #[doc = ""]
+        #[doc = concat!(
+            "assert_eq!(block.cumulative_difficulty, ",
+            "combine_low_high_bits_to_u128(",
+            stringify!($cumulative_difficulty_low),
+            ", ",
+            stringify!($cumulative_difficulty_high),
+            "));"
+        )]
+        /// ```
+        pub fn $fn_name() -> &'static VerifiedBlockInformation {
+            static BLOCK: OnceLock<VerifiedBlockInformation> = OnceLock::new();
+            BLOCK.get_or_init(|| {
+                VerifiedBlockMap {
+                    block_blob: $block_blob,
+                    pow_hash: hex!($pow_hash),
+                    height: $height,
+                    generated_coins: $generated_coins,
+                    weight: $weight,
+                    long_term_weight: $long_term_weight,
+                    cumulative_difficulty_low: $cumulative_difficulty_low,
+                    cumulative_difficulty_high: $cumulative_difficulty_high,
+                    txs: &[$($tx_blob),*],
+                }
+                .into_verified()
+            })
+        }
+    };
 }
 
-/// Return [`BLOCK_F91043`] as a [`Block`].
-///
-/// ```rust
-/// assert_eq!(
-///     &cuprate_test_utils::data::block_v9_tx3().serialize(),
-///     cuprate_test_utils::data::BLOCK_F91043
-/// );
-/// ```
-pub fn block_v9_tx3() -> Block {
-    /// `OnceLock` holding the data.
-    static BLOCK: OnceLock<Block> = OnceLock::new();
-    BLOCK
-        .get_or_init(|| Block::read(&mut BLOCK_F91043).unwrap())
-        .clone()
+verified_block_information_fn! {
+    fn_name: block_v1_tx2,
+    block_blob: BLOCK_5ECB7E,
+    tx_blobs: [TX_2180A8, TX_D7FEBD],
+    pow_hash: "c960d540000459480560b7816de968c7470083e5874e10040bdd4cc501000000",
+    height: 202_609,
+    generated_coins: 14_535_350_982_449,
+    weight: 21_905,
+    long_term_weight: 21_905,
+    cumulative_difficulty_low: 126_650_740_038_710,
+    cumulative_difficulty_high: 0,
+    tx_len: 2,
 }
 
-/// Return [`BLOCK_43BD1F`] as a [`Block`].
-///
-/// ```rust
-/// assert_eq!(
-///     &cuprate_test_utils::data::block_v16_tx0().serialize(),
-///     cuprate_test_utils::data::BLOCK_43BD1F
-/// );
-/// ```
-pub fn block_v16_tx0() -> Block {
-    /// `OnceLock` holding the data.
-    static BLOCK: OnceLock<Block> = OnceLock::new();
-    BLOCK
-        .get_or_init(|| Block::read(&mut BLOCK_43BD1F).unwrap())
-        .clone()
+verified_block_information_fn! {
+    fn_name: block_v9_tx3,
+    block_blob: BLOCK_F91043,
+    tx_blobs: [TX_E2D393, TX_E57440, TX_B6B439],
+    pow_hash: "7c78b5b67a112a66ea69ea51477492057dba9cfeaa2942ee7372c61800000000",
+    height: 1_731_606,
+    generated_coins: 3_403_774_022_163,
+    weight: 6_597,
+    long_term_weight: 6_597,
+    cumulative_difficulty_low: 23_558_910_234_058_343,
+    cumulative_difficulty_high: 0,
+    tx_len: 3,
+}
+
+verified_block_information_fn! {
+    fn_name: block_v16_tx0,
+    block_blob: BLOCK_43BD1F,
+    tx_blobs: [],
+    pow_hash: "10b473b5d097d6bfa0656616951840724dfe38c6fb9c4adf8158800300000000",
+    height: 2_751_506,
+    generated_coins: 600_000_000_000,
+    weight: 106,
+    long_term_weight: 176_470,
+    cumulative_difficulty_low: 236_046_001_376_524_168,
+    cumulative_difficulty_high: 0,
+    tx_len: 0,
 }
 
 //---------------------------------------------------------------------------------------------------- Transactions
-/// Return [`TX_3BC7FF`] as a [`Transaction`].
+/// Generate a transaction accessor function with this signature:
+///     `fn() -> &'static TransactionVerificationData`
 ///
-/// ```rust
-/// assert_eq!(
-///     &cuprate_test_utils::data::tx_v1_sig0().serialize(),
-///     cuprate_test_utils::data::TX_3BC7FF
-/// );
-/// ```
-pub fn tx_v1_sig0() -> Transaction {
-    /// `OnceLock` holding the data.
-    static TX: OnceLock<Transaction> = OnceLock::new();
-    TX.get_or_init(|| Transaction::read(&mut TX_3BC7FF).unwrap())
-        .clone()
+/// Same as [`verified_block_information_fn`] but for transactions.
+macro_rules! transaction_verification_data_fn {
+    (
+        fn_name: $fn_name:ident, // Name of the function created
+        tx_blobs: $tx_blob:ident, // Transaction blob ([u8], found in `constants.rs`)
+        weight: $weight:literal, // Transaction weight
+        hash: $hash:literal, // Transaction hash as a string literal
+    ) => {
+        #[doc = concat!("Return [`", stringify!($tx_blob), "`] as a [`TransactionVerificationData`].")]
+        ///
+        /// ```rust
+        #[doc = "# use cuprate_test_utils::data::*;"]
+        #[doc = "# use hex_literal::hex;"]
+        #[doc = concat!("let tx = ", stringify!($fn_name), "();")]
+        #[doc = concat!("assert_eq!(&tx.tx.serialize(), ", stringify!($tx_blob), ");")]
+        #[doc = concat!("assert_eq!(tx.tx_blob, ", stringify!($tx_blob), ");")]
+        #[doc = concat!("assert_eq!(tx.tx_weight, ", $weight, ");")]
+        #[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")]
+        #[doc = "assert_eq!(tx.fee, tx.tx.rct_signatures.base.fee);"]
+        /// ```
+        pub fn $fn_name() -> &'static TransactionVerificationData {
+            static TX: OnceLock<TransactionVerificationData> = OnceLock::new();
+            TX.get_or_init(|| to_tx_verification_data($tx_blob))
+        }
+    };
 }
 
-/// Return [`TX_9E3F73`] as a [`Transaction`].
-///
-/// ```rust
-/// assert_eq!(
-///     &cuprate_test_utils::data::tx_v1_sig2().serialize(),
-///     cuprate_test_utils::data::TX_9E3F73
-/// );
-/// ```
-pub fn tx_v1_sig2() -> Transaction {
-    /// `OnceLock` holding the data.
-    static TX: OnceLock<Transaction> = OnceLock::new();
-    TX.get_or_init(|| Transaction::read(&mut TX_9E3F73).unwrap())
-        .clone()
+transaction_verification_data_fn! {
+    fn_name: tx_v1_sig0,
+    tx_blobs: TX_3BC7FF,
+    weight: 248,
+    hash: "3bc7ff015b227e7313cc2e8668bfbb3f3acbee274a9c201d6211cf681b5f6bb1",
 }
 
-/// Return [`TX_84D48D`] as a [`Transaction`].
-///
-/// ```rust
-/// assert_eq!(
-///     &cuprate_test_utils::data::tx_v2_rct3().serialize(),
-///     cuprate_test_utils::data::TX_84D48D
-/// );
-/// ```
-pub fn tx_v2_rct3() -> Transaction {
-    /// `OnceLock` holding the data.
-    static TX: OnceLock<Transaction> = OnceLock::new();
-    TX.get_or_init(|| Transaction::read(&mut TX_84D48D).unwrap())
-        .clone()
+transaction_verification_data_fn! {
+    fn_name: tx_v1_sig2,
+    tx_blobs: TX_9E3F73,
+    weight: 448,
+    hash: "9e3f73e66d7c7293af59c59c1ff5d6aae047289f49e5884c66caaf4aea49fb34",
+}
+
+transaction_verification_data_fn! {
+    fn_name: tx_v2_rct3,
+    tx_blobs: TX_84D48D,
+    weight: 2743,
+    hash: "84d48dc11ec91950f8b70a85af9db91fe0c8abef71ef5db08304f7344b99ea66",
+}
+
+//---------------------------------------------------------------------------------------------------- TESTS
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use pretty_assertions::assert_eq;
+
+    use crate::rpc::HttpRpcClient;
+
+    /// Assert the defined blocks are the same compared to ones received from a local RPC call.
+    #[ignore] // FIXME: doesn't work in CI, we need a real unrestricted node
+    #[tokio::test]
+    async fn block_same_as_rpc() {
+        let rpc = HttpRpcClient::new(None).await;
+        for block in [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()] {
+            println!("block_height: {}", block.height);
+            let block_rpc = rpc.get_verified_block_information(block.height).await;
+            assert_eq!(block, &block_rpc);
+        }
+    }
+
+    /// Same as `block_same_as_rpc` but for transactions.
+    /// This also tests all the transactions within the defined blocks.
+    #[ignore] // FIXME: doesn't work in CI, we need a real unrestricted node
+    #[tokio::test]
+    async fn tx_same_as_rpc() {
+        let rpc = HttpRpcClient::new(None).await;
+
+        let mut txs = [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()]
+            .into_iter()
+            .flat_map(|block| block.txs.iter().map(|arc| (**arc).clone()))
+            .collect::<Vec<TransactionVerificationData>>();
+
+        txs.extend([
+            tx_v1_sig0().clone(),
+            tx_v1_sig2().clone(),
+            tx_v2_rct3().clone(),
+        ]);
+
+        for tx in txs {
+            println!("tx_hash: {:?}", tx.tx_hash);
+            let tx_rpc = rpc
+                .get_transaction_verification_data(&[tx.tx_hash])
+                .await
+                .collect::<Vec<TransactionVerificationData>>()
+                .pop()
+                .unwrap();
+            assert_eq!(tx, tx_rpc);
+        }
+    }
 }
diff --git a/test-utils/src/data/mod.rs b/test-utils/src/data/mod.rs
index a721cfb6..03c45240 100644
--- a/test-utils/src/data/mod.rs
+++ b/test-utils/src/data/mod.rs
@@ -1,9 +1,35 @@
-//! Testing data and utilities.
+//! Real Monero data.
 //!
-//! Raw data is found in `data/`.
+//! This module provides access to _real_ Monero data,
+//! either in raw bytes or typed.
+//!
+//! ## Constants
+//! The `const`ants provide byte slices representing block
+//! and transaction blobs that can be directly deserialized:
+//!
+//! ```rust
+//! # use cuprate_test_utils::data::*;
+//! use monero_serai::{block::Block, transaction::Transaction};
+//!
+//! let block: Block = Block::read(&mut BLOCK_43BD1F).unwrap();
+//! let tx: Transaction = Transaction::read(&mut TX_E57440).unwrap();
+//! ```
+//!
+//! ## Functions
+//! The free functions provide access to typed data found in `cuprate_types`:
+//! ```rust
+//! # use cuprate_test_utils::data::*;
+//! use cuprate_types::{VerifiedBlockInformation, TransactionVerificationData};
+//!
+//! let block: VerifiedBlockInformation = block_v16_tx0().clone();
+//! let tx: TransactionVerificationData = tx_v1_sig0().clone();
+//! ```
 
 mod constants;
-pub use constants::{BLOCK_43BD1F, BLOCK_BBD604, BLOCK_F91043, TX_3BC7FF, TX_84D48D, TX_9E3F73};
+pub use constants::{
+    BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_BBD604, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D,
+    TX_9E3F73, TX_B6B439, TX_D7FEBD, TX_E2D393, TX_E57440,
+};
 
 mod free;
-pub use free::{block_v16_tx0, block_v1_tx513, block_v9_tx3, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3};
+pub use free::{block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3};
diff --git a/test-utils/src/data/tx/2180a87f724702d37af087e22476297e818a73579ef7b7da947da963245202a3.bin b/test-utils/src/data/tx/2180a87f724702d37af087e22476297e818a73579ef7b7da947da963245202a3.bin
new file mode 100644
index 00000000..e7c506de
Binary files /dev/null and b/test-utils/src/data/tx/2180a87f724702d37af087e22476297e818a73579ef7b7da947da963245202a3.bin differ
diff --git a/test-utils/src/data/tx/b6b4394d4ec5f08ad63267c07962550064caa8d225dd9ad6d739ebf60291c169.bin b/test-utils/src/data/tx/b6b4394d4ec5f08ad63267c07962550064caa8d225dd9ad6d739ebf60291c169.bin
new file mode 100644
index 00000000..b4767363
Binary files /dev/null and b/test-utils/src/data/tx/b6b4394d4ec5f08ad63267c07962550064caa8d225dd9ad6d739ebf60291c169.bin differ
diff --git a/test-utils/src/data/tx/d7febd16293799d9c6a8e0fe9199b8a0a3e0da5a8a165098937b60f0bbd582df.bin b/test-utils/src/data/tx/d7febd16293799d9c6a8e0fe9199b8a0a3e0da5a8a165098937b60f0bbd582df.bin
new file mode 100644
index 00000000..b659a6ff
Binary files /dev/null and b/test-utils/src/data/tx/d7febd16293799d9c6a8e0fe9199b8a0a3e0da5a8a165098937b60f0bbd582df.bin differ
diff --git a/test-utils/src/data/tx/e2d39395dd1625b2d707b98af789e7eab9d24c2bd2978ec38ef910961a8cdcee.bin b/test-utils/src/data/tx/e2d39395dd1625b2d707b98af789e7eab9d24c2bd2978ec38ef910961a8cdcee.bin
new file mode 100644
index 00000000..f8667cf0
Binary files /dev/null and b/test-utils/src/data/tx/e2d39395dd1625b2d707b98af789e7eab9d24c2bd2978ec38ef910961a8cdcee.bin differ
diff --git a/test-utils/src/data/tx/e57440ec66d2f3b2a5fa2081af40128868973e7c021bb3877290db3066317474.bin b/test-utils/src/data/tx/e57440ec66d2f3b2a5fa2081af40128868973e7c021bb3877290db3066317474.bin
new file mode 100644
index 00000000..57816380
Binary files /dev/null and b/test-utils/src/data/tx/e57440ec66d2f3b2a5fa2081af40128868973e7c021bb3877290db3066317474.bin differ
diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs
index f88b072d..068f28ff 100644
--- a/test-utils/src/lib.rs
+++ b/test-utils/src/lib.rs
@@ -1,7 +1,6 @@
-//! Cuprate testing utilities.
-//!
-//! See the `README.md` for more info.
+#![doc = include_str!("../README.md")]
 
 pub mod data;
 pub mod monerod;
+pub mod rpc;
 pub mod test_netzone;
diff --git a/test-utils/src/monerod.rs b/test-utils/src/monerod.rs
index 51545b4d..9ffa08d3 100644
--- a/test-utils/src/monerod.rs
+++ b/test-utils/src/monerod.rs
@@ -4,9 +4,12 @@
 //! this to test compatibility with monerod.
 //!
 use std::{
+    env::current_dir,
     ffi::OsStr,
+    fs::read_dir,
     io::Read,
     net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener},
+    path::PathBuf,
     process::{Child, Command, Stdio},
     str::from_utf8,
     thread::panicking,
@@ -15,14 +18,9 @@ use std::{
 
 use tokio::{task::yield_now, time::timeout};
 
-mod download;
-
 /// IPv4 local host.
 const LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST);
 
-/// The `monerod` version to use.
-const MONEROD_VERSION: &str = "v0.18.3.1";
-
 /// The log line `monerod` emits indicated it has successfully started up.
 const MONEROD_STARTUP_TEXT: &str =
     "The daemon will start synchronizing with the network. This may take a long time to complete.";
@@ -34,7 +32,7 @@ const MONEROD_SHUTDOWN_TEXT: &str = "Stopping cryptonote protocol";
 ///
 /// This function will set `regtest` and the P2P/ RPC ports so these can't be included in the flags.
 pub async fn monerod<T: AsRef<OsStr>>(flags: impl IntoIterator<Item = T>) -> SpawnedMoneroD {
-    let path_to_monerod = download::check_download_monerod().await.unwrap();
+    let path_to_monerod = find_root().join("monerod");
 
     let rpc_port = get_available_port(&[]);
     let p2p_port = get_available_port(&[rpc_port]);
@@ -54,7 +52,9 @@ pub async fn monerod<T: AsRef<OsStr>>(flags: impl IntoIterator<Item = T>) -> Spa
         .arg(format!("--data-dir={}", data_dir.path().display()))
         .arg("--non-interactive")
         .spawn()
-        .unwrap();
+        .expect(
+            "Failed to start monerod, you need to have the monerod binary in the root of the repo",
+        );
 
     let mut logs = String::new();
 
@@ -92,6 +92,20 @@ pub async fn monerod<T: AsRef<OsStr>>(flags: impl IntoIterator<Item = T>) -> Spa
     }
 }
 
+/// Finds the root of the repo by finding the `target` directory, this will work up from the current
+/// directory until it finds a `target` directory, then returns the directory that the target is contained
+/// in.
+fn find_root() -> PathBuf {
+    let mut current_dir = current_dir().unwrap();
+    loop {
+        if read_dir(current_dir.join("target")).is_ok() {
+            return current_dir;
+        } else if !current_dir.pop() {
+            panic!("Could not find ./target");
+        }
+    }
+}
+
 /// Fetch an available TCP port on the machine for `monerod` to bind to.
 fn get_available_port(already_taken: &[u16]) -> u16 {
     loop {
diff --git a/test-utils/src/monerod/download.rs b/test-utils/src/monerod/download.rs
deleted file mode 100644
index 699323f7..00000000
--- a/test-utils/src/monerod/download.rs
+++ /dev/null
@@ -1,104 +0,0 @@
-//! Downloading Monerod Module
-//!
-//! This module handles finding the right monerod file to download, downloading it and extracting it.
-//!
-use std::{
-    env::{
-        consts::{ARCH, OS},
-        current_dir,
-    },
-    fs::read_dir,
-    path::{Path, PathBuf},
-};
-
-#[cfg(unix)]
-use bytes::Buf;
-use reqwest::{get, Error as ReqError};
-use tokio::sync::Mutex;
-
-use super::MONEROD_VERSION;
-
-/// A mutex to make sure only one thread at a time downloads monerod.
-static DOWNLOAD_MONEROD_MUTEX: Mutex<()> = Mutex::const_new(());
-
-/// Returns the file name to download and the expected extracted folder name.
-fn file_name(version: &str) -> (String, String) {
-    let download_file = match (OS, ARCH) {
-        ("windows", "x64" | "x86_64") => format!("monero-win-x64-{version}.zip"),
-        ("windows", "x86") => format!("monero-win-x86-{version}.zip"),
-        ("linux", "x64" | "x86_64") => format!("monero-linux-x64-{version}.tar.bz2"),
-        ("linux", "x86") => format!("monero-linux-x86-{version}.tar.bz2"),
-        ("macos", "x64" | "x86_64") => format!("monero-mac-x64-{version}.tar.bz2"),
-        _ => panic!("Can't get monerod for {OS}, {ARCH}."),
-    };
-
-    let extracted_dir = match (OS, ARCH) {
-        ("windows", "x64" | "x86_64") => {
-            format!("monero-x86_64-w64-mingw32-{version}")
-        }
-        ("windows", "x86") => format!("monero-i686-w64-mingw32-{version}"),
-        ("linux", "x64" | "x86_64") => format!("monero-x86_64-linux-gnu-{version}"),
-        ("linux", "x86") => format!("monero-i686-linux-gnu-{version}"),
-        ("macos", "x64" | "x86_64") => {
-            format!("monero-x86_64-apple-darwin11-{version}")
-        }
-        _ => panic!("Can't get monerod for {OS}, {ARCH}."),
-    };
-
-    (download_file, extracted_dir)
-}
-
-/// Downloads the monerod file provided, extracts it and puts the extracted folder into `path_to_store`.
-async fn download_monerod(file_name: &str, path_to_store: &Path) -> Result<(), ReqError> {
-    let res = get(format!("https://downloads.getmonero.org/cli/{file_name}")).await?;
-    let monerod_archive = res.bytes().await.unwrap();
-
-    #[cfg(unix)]
-    {
-        let bzip_decomp = bzip2::read::BzDecoder::new(monerod_archive.reader());
-        let mut tar_archive = tar::Archive::new(bzip_decomp);
-        tar_archive.unpack(path_to_store).unwrap();
-    }
-    #[cfg(windows)]
-    {
-        let mut zip = zip::ZipArchive::new(std::io::Cursor::new(monerod_archive.as_ref())).unwrap();
-        zip.extract(path_to_store).unwrap();
-    }
-
-    Ok(())
-}
-
-/// Finds the `target` directory, this will work up from the current directory until
-/// it finds a `target` directory.
-fn find_target() -> PathBuf {
-    let mut current_dir = current_dir().unwrap();
-    loop {
-        let potential_target = current_dir.join("target");
-        if read_dir(current_dir.join("target")).is_ok() {
-            return potential_target;
-        } else if !current_dir.pop() {
-            panic!("Could not find ./target");
-        }
-    }
-}
-
-/// Checks if we have monerod or downloads it if we don't and then returns the path to it.
-pub(crate) async fn check_download_monerod() -> Result<PathBuf, ReqError> {
-    // make sure no other threads are downloading monerod at the same time.
-    let _guard = DOWNLOAD_MONEROD_MUTEX.lock().await;
-
-    let path_to_store = find_target();
-
-    let (file_name, dir_name) = file_name(MONEROD_VERSION);
-
-    let path_to_monerod = path_to_store.join(dir_name);
-
-    // Check if we already have monerod
-    if read_dir(&path_to_monerod).is_ok() {
-        return Ok(path_to_monerod.join("monerod"));
-    }
-
-    download_monerod(&file_name, &path_to_store).await?;
-
-    Ok(path_to_monerod.join("monerod"))
-}
diff --git a/test-utils/src/rpc/client.rs b/test-utils/src/rpc/client.rs
new file mode 100644
index 00000000..34d194cd
--- /dev/null
+++ b/test-utils/src/rpc/client.rs
@@ -0,0 +1,307 @@
+//! HTTP RPC client.
+
+//---------------------------------------------------------------------------------------------------- Use
+use std::sync::Arc;
+
+use serde::Deserialize;
+use serde_json::json;
+use tokio::task::spawn_blocking;
+
+use monero_serai::{
+    block::Block,
+    rpc::{HttpRpc, Rpc},
+};
+
+use cuprate_types::{TransactionVerificationData, VerifiedBlockInformation};
+
+use crate::rpc::constants::LOCALHOST_RPC_URL;
+
+//---------------------------------------------------------------------------------------------------- HttpRpcClient
+/// An HTTP RPC client for Monero.
+pub struct HttpRpcClient {
+    address: String,
+    rpc: Rpc<HttpRpc>,
+}
+
+impl HttpRpcClient {
+    /// Create an [`HttpRpcClient`].
+    ///
+    /// `address` should be an HTTP URL pointing to a `monerod`.
+    ///
+    /// If `None` is provided the default is used: [`LOCALHOST_RPC_URL`].
+    ///
+    /// Note that for [`Self::get_verified_block_information`] to work, the `monerod`
+    /// must be in unrestricted mode such that some fields (e.g. `pow_hash`) appear
+    /// in the JSON response.
+    ///
+    /// # Panics
+    /// This panics if the `address` is invalid or a connection could not be made.
+    pub async fn new(address: Option<String>) -> Self {
+        let address = address.unwrap_or_else(|| LOCALHOST_RPC_URL.to_string());
+
+        Self {
+            rpc: HttpRpc::new(address.clone()).await.unwrap(),
+            address,
+        }
+    }
+
+    /// The address used for this [`HttpRpcClient`].
+    #[allow(dead_code)]
+    const fn address(&self) -> &String {
+        &self.address
+    }
+
+    /// Access to the inner RPC client for other usage.
+    #[allow(dead_code)]
+    const fn rpc(&self) -> &Rpc<HttpRpc> {
+        &self.rpc
+    }
+
+    /// Request data and map the response to a [`VerifiedBlockInformation`].
+    ///
+    /// # Panics
+    /// This function will panic at any error point, e.g.,
+    /// if the node cannot be connected to, if deserialization fails, etc.
+    pub async fn get_verified_block_information(&self, height: u64) -> VerifiedBlockInformation {
+        #[derive(Debug, Deserialize)]
+        struct Result {
+            blob: String,
+            block_header: BlockHeader,
+        }
+
+        #[derive(Debug, Deserialize)]
+        struct BlockHeader {
+            block_weight: usize,
+            long_term_weight: usize,
+            cumulative_difficulty: u128,
+            hash: String,
+            height: u64,
+            pow_hash: String,
+            reward: u64, // generated_coins + total_tx_fees
+        }
+
+        let result = self
+            .rpc
+            .json_rpc_call::<Result>(
+                "get_block",
+                Some(json!(
+                    {
+                        "height": height,
+                        "fill_pow_hash": true
+                    }
+                )),
+            )
+            .await
+            .unwrap();
+
+        // Make sure this is a trusted, `pow_hash` only works there.
+        assert!(
+        	!result.block_header.pow_hash.is_empty(),
+        	"untrusted node detected, `pow_hash` will not show on these nodes - use a trusted node!"
+        );
+
+        let reward = result.block_header.reward;
+
+        let (block_hash, block_blob, block) = spawn_blocking(|| {
+            let block_blob = hex::decode(result.blob).unwrap();
+            let block = Block::read(&mut block_blob.as_slice()).unwrap();
+            (block.hash(), block_blob, block)
+        })
+        .await
+        .unwrap();
+
+        let txs: Vec<Arc<TransactionVerificationData>> = self
+            .get_transaction_verification_data(&block.txs)
+            .await
+            .map(Arc::new)
+            .collect();
+
+        let block_header = result.block_header;
+        let block_hash_2 = <[u8; 32]>::try_from(hex::decode(&block_header.hash).unwrap()).unwrap();
+        let pow_hash = <[u8; 32]>::try_from(hex::decode(&block_header.pow_hash).unwrap()).unwrap();
+
+        // Assert the block hash matches.
+        assert_eq!(block_hash, block_hash_2);
+
+        let total_tx_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
+        let generated_coins = block
+            .miner_tx
+            .prefix
+            .outputs
+            .iter()
+            .map(|output| output.amount.expect("miner_tx amount was None"))
+            .sum::<u64>()
+            - total_tx_fees;
+        assert_eq!(
+            reward,
+            generated_coins + total_tx_fees,
+            "generated_coins ({generated_coins}) + total_tx_fees ({total_tx_fees}) != reward ({reward})"
+        );
+
+        VerifiedBlockInformation {
+            block,
+            block_blob,
+            txs,
+            block_hash,
+            pow_hash,
+            generated_coins,
+            height: block_header.height,
+            weight: block_header.block_weight,
+            long_term_weight: block_header.long_term_weight,
+            cumulative_difficulty: block_header.cumulative_difficulty,
+        }
+    }
+
+    /// Request data and map the response to a [`TransactionVerificationData`].
+    ///
+    /// # Panics
+    /// This function will panic at any error point, e.g.,
+    /// if the node cannot be connected to, if deserialization fails, etc.
+    pub async fn get_transaction_verification_data<'a>(
+        &self,
+        tx_hashes: &'a [[u8; 32]],
+    ) -> impl Iterator<Item = TransactionVerificationData> + 'a {
+        self.rpc
+            .get_transactions(tx_hashes)
+            .await
+            .unwrap()
+            .into_iter()
+            .enumerate()
+            .map(|(i, tx)| {
+                let tx_hash = tx.hash();
+                assert_eq!(tx_hash, tx_hashes[i]);
+                TransactionVerificationData {
+                    tx_blob: tx.serialize(),
+                    tx_weight: tx.weight(),
+                    tx_hash,
+                    fee: tx.rct_signatures.base.fee,
+                    tx,
+                }
+            })
+    }
+}
+
+//---------------------------------------------------------------------------------------------------- TESTS
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use hex_literal::hex;
+
+    /// Assert the default address is localhost.
+    #[tokio::test]
+    async fn localhost() {
+        assert_eq!(HttpRpcClient::new(None).await.address(), LOCALHOST_RPC_URL);
+    }
+
+    /// Assert blocks are correctly received/calculated.
+    #[ignore] // FIXME: doesn't work in CI, we need a real unrestricted node
+    #[tokio::test]
+    async fn get() {
+        #[allow(clippy::too_many_arguments)]
+        async fn assert_eq(
+            rpc: &HttpRpcClient,
+            height: u64,
+            block_hash: [u8; 32],
+            pow_hash: [u8; 32],
+            generated_coins: u64,
+            weight: usize,
+            long_term_weight: usize,
+            cumulative_difficulty: u128,
+            tx_count: usize,
+        ) {
+            let block = rpc.get_verified_block_information(height).await;
+
+            println!("block height: {height}");
+            assert_eq!(block.txs.len(), tx_count);
+            println!("{block:#?}");
+
+            assert_eq!(block.block_hash, block_hash);
+            assert_eq!(block.pow_hash, pow_hash);
+            assert_eq!(block.height, height);
+            assert_eq!(block.generated_coins, generated_coins);
+            assert_eq!(block.weight, weight);
+            assert_eq!(block.long_term_weight, long_term_weight);
+            assert_eq!(block.cumulative_difficulty, cumulative_difficulty);
+        }
+
+        let rpc = HttpRpcClient::new(None).await;
+
+        assert_eq(
+            &rpc,
+            0,                                                                        // height
+            hex!("418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3"), // block_hash
+            hex!("8a7b1a780e99eec31a9425b7d89c283421b2042a337d5700dfd4a7d6eb7bd774"), // pow_hash
+            17592186044415, // generated_coins
+            80,             // weight
+            80,             // long_term_weight
+            1,              // cumulative_difficulty
+            0,              // tx_count (miner_tx excluded)
+        )
+        .await;
+
+        assert_eq(
+            &rpc,
+            1,
+            hex!("771fbcd656ec1464d3a02ead5e18644030007a0fc664c0a964d30922821a8148"),
+            hex!("5aeebb3de73859d92f3f82fdb97286d81264ecb72a42e4b9f1e6d62eb682d7c0"),
+            17592169267200,
+            383,
+            383,
+            2,
+            0,
+        )
+        .await;
+
+        assert_eq(
+            &rpc,
+            202612,
+            hex!("bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698"),
+            hex!("84f64766475d51837ac9efbef1926486e58563c95a19fef4aec3254f03000000"),
+            13138270467918,
+            55503,
+            55503,
+            126654460829362,
+            513,
+        )
+        .await;
+
+        assert_eq(
+            &rpc,
+            1731606,
+            hex!("f910435a5477ca27be1986c080d5476aeab52d0c07cf3d9c72513213350d25d4"),
+            hex!("7c78b5b67a112a66ea69ea51477492057dba9cfeaa2942ee7372c61800000000"),
+            3403774022163,
+            6597,
+            6597,
+            23558910234058343,
+            3,
+        )
+        .await;
+
+        assert_eq(
+            &rpc,
+            2751506,
+            hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428"),
+            hex!("10b473b5d097d6bfa0656616951840724dfe38c6fb9c4adf8158800300000000"),
+            600000000000,
+            106,
+            176470,
+            236046001376524168,
+            0,
+        )
+        .await;
+
+        assert_eq(
+            &rpc,
+            3132285,
+            hex!("a999c6ba4d2993541ba9d81561bb8293baa83b122f8aa9ab65b3c463224397d8"),
+            hex!("4eaa3b3d4dc888644bc14dc4895ca0b008586e30b186fbaa009d330100000000"),
+            600000000000,
+            133498,
+            176470,
+            348189741564698577,
+            57,
+        )
+        .await;
+    }
+}
diff --git a/test-utils/src/rpc/constants.rs b/test-utils/src/rpc/constants.rs
new file mode 100644
index 00000000..ce44a88b
--- /dev/null
+++ b/test-utils/src/rpc/constants.rs
@@ -0,0 +1,7 @@
+//! RPC-related Constants.
+
+//---------------------------------------------------------------------------------------------------- Use
+
+//---------------------------------------------------------------------------------------------------- Constants
+/// The default URL used for Monero RPC connections.
+pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081";
diff --git a/test-utils/src/rpc/mod.rs b/test-utils/src/rpc/mod.rs
new file mode 100644
index 00000000..14c963ac
--- /dev/null
+++ b/test-utils/src/rpc/mod.rs
@@ -0,0 +1,25 @@
+//! Monero RPC client.
+//!
+//! This module is a client for Monero RPC that maps the types
+//! into the native types used by Cuprate found in `cuprate_types`.
+//!
+//! # Usage
+//! ```rust,ignore
+//! #[tokio::main]
+//! async fn main() {
+//!     // Create RPC client.
+//!     let rpc = HttpRpcClient::new(None).await;
+//!
+//!     // Collect 20 blocks.
+//!     let mut vec: Vec<VerifiedBlockInformation> = vec![];
+//!     for height in (3130269 - 20)..3130269 {
+//!         vec.push(rpc.get_verified_block_information(height).await);
+//!     }
+//! }
+//! ```
+
+mod client;
+pub use client::HttpRpcClient;
+
+mod constants;
+pub use constants::LOCALHOST_RPC_URL;
diff --git a/test-utils/src/test_netzone.rs b/test-utils/src/test_netzone.rs
index 523f9754..0a534164 100644
--- a/test-utils/src/test_netzone.rs
+++ b/test-utils/src/test_netzone.rs
@@ -31,6 +31,8 @@ impl NetZoneAddress for TestNetZoneAddr {
 
     fn set_port(&mut self, _: u16) {}
 
+    fn make_canonical(&mut self) {}
+
     fn ban_id(&self) -> Self::BanID {
         *self
     }
@@ -74,6 +76,7 @@ impl<const ALLOW_SYNC: bool, const DANDELION_PP: bool, const CHECK_NODE_ID: bool
     for TestNetZone<ALLOW_SYNC, DANDELION_PP, CHECK_NODE_ID>
 {
     const NAME: &'static str = "Testing";
+    const SEEDS: &'static [Self::Addr] = &[];
     const ALLOW_SYNC: bool = ALLOW_SYNC;
     const DANDELION_PP: bool = DANDELION_PP;
     const CHECK_NODE_ID: bool = CHECK_NODE_ID;
diff --git a/types/README.md b/types/README.md
index bbe03c17..6a2015af 100644
--- a/types/README.md
+++ b/types/README.md
@@ -1,21 +1,20 @@
 # `cuprate-types`
 Various data types shared by Cuprate.
 
-<!-- Did you know markdown automatically increments number lists, even if they are all 1...? -->
-1. [File Structure](#file-structure)
-    - [`src/`](#src)
+- [1. File Structure](#1-file-structure)
+    - [1.1 `src/`](#11-src)
 
 ---
 
-# File Structure
+## 1. File Structure
 A quick reference of the structure of the folders & files in `cuprate-types`.
 
 Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`.
 
-## `src/`
+### 1.1 `src/`
 The top-level `src/` files.
 
 | File                | Purpose |
 |---------------------|---------|
 | `service.rs`        | Types used in database requests; `enum {Request,Response}`
-| `types.rs`          | Various general types used by Cuprate
+| `types.rs`          | Various general types used by Cuprate
\ No newline at end of file
diff --git a/types/src/lib.rs b/types/src/lib.rs
index ea96515c..8c077901 100644
--- a/types/src/lib.rs
+++ b/types/src/lib.rs
@@ -1,6 +1,10 @@
 //! Cuprate shared data types.
 //!
-//! TODO
+//! This crate is a kitchen-sink for data types that are shared across `Cuprate`.
+//!
+//! # Features flags
+//! The `service` module, containing `cuprate_database` request/response
+//! types, must be enabled with the `service` feature (on by default).
 
 //---------------------------------------------------------------------------------------------------- Lints
 // Forbid lints.
@@ -59,7 +63,6 @@
     unused_comparisons,
     nonstandard_style
 )]
-#![allow(unreachable_code, unused_variables, dead_code, unused_imports)] // TODO: remove
 #![allow(
 	// FIXME: this lint affects crates outside of
 	// `database/` for some reason, allow for now.
@@ -70,9 +73,6 @@
 	// although it is sometimes nice.
 	clippy::must_use_candidate,
 
-	// TODO: should be removed after all `todo!()`'s are gone.
-	clippy::diverging_sub_expression,
-
 	clippy::module_name_repetitions,
 	clippy::module_inception,
 	clippy::redundant_pub_crate,
diff --git a/types/src/service.rs b/types/src/service.rs
index 97344f64..078c8464 100644
--- a/types/src/service.rs
+++ b/types/src/service.rs
@@ -1,4 +1,10 @@
 //! Database [`ReadRequest`]s, [`WriteRequest`]s, and [`Response`]s.
+//!
+//! See [`cuprate_database`](https://github.com/Cuprate/cuprate/blob/00c3692eac6b2669e74cfd8c9b41c7e704c779ad/database/src/service/mod.rs#L1-L59)'s
+//! `service` module for more usage/documentation.
+//!
+//! Tests that assert particular requests lead to particular
+//! responses are also tested in `cuprate_database`.
 
 //---------------------------------------------------------------------------------------------------- Import
 use std::{
@@ -6,8 +12,6 @@ use std::{
     ops::Range,
 };
 
-use monero_serai::{block::Block, transaction::Transaction};
-
 #[cfg(feature = "borsh")]
 use borsh::{BorshDeserialize, BorshSerialize};
 #[cfg(feature = "serde")]
@@ -17,67 +21,151 @@ use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}
 
 //---------------------------------------------------------------------------------------------------- ReadRequest
 /// A read request to the database.
+///
+/// This pairs with [`Response`], where each variant here
+/// matches in name with a `Response` variant. For example,
+/// the proper response for a [`ReadRequest::BlockHash`]
+/// would be a [`Response::BlockHash`].
+///
+/// See `Response` for the expected responses per `Request`.
 #[derive(Debug, Clone, PartialEq, Eq)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
 pub enum ReadRequest {
-    /// TODO
+    /// Request a block's extended header.
+    ///
+    /// The input is the block's height.
     BlockExtendedHeader(u64),
-    /// TODO
+
+    /// Request a block's hash.
+    ///
+    /// The input is the block's height.
     BlockHash(u64),
-    /// TODO
+
+    /// Request a range of block extended headers.
+    ///
+    /// The input is a range of block heights.
     BlockExtendedHeaderInRange(Range<u64>),
-    /// TODO
+
+    /// Request the current chain height.
+    ///
+    /// Note that this is not the top-block height.
     ChainHeight,
-    /// TODO
+
+    /// Request the total amount of generated coins (atomic units) so far.
     GeneratedCoins,
-    /// TODO
+
+    /// Request data for multiple outputs.
+    ///
+    /// The input is a `HashMap` where:
+    /// - Key = output amount
+    /// - Value = set of amount indices
+    ///
+    /// For pre-RCT outputs, the amount is non-zero,
+    /// and the amount indices represent the wanted
+    /// indices of duplicate amount outputs, i.e.:
+    ///
+    /// ```ignore
+    /// // list of outputs with amount 10
+    /// [0, 1, 2, 3, 4, 5]
+    /// //  ^     ^
+    /// // we only want these two, so we would provide
+    /// // `amount: 10, amount_index: {1, 3}`
+    /// ```
+    ///
+    /// For RCT outputs, the amounts would be `0` and
+    /// the amount indices would represent the global
+    /// RCT output indices.
     Outputs(HashMap<u64, HashSet<u64>>),
-    /// TODO
+
+    /// Request the amount of outputs with a certain amount.
+    ///
+    /// The input is a list of output amounts.
     NumberOutputsWithAmount(Vec<u64>),
-    /// TODO
+
+    /// Check that all key images within a set arer not spent.
+    ///
+    /// Input is a set of key images.
     CheckKIsNotSpent(HashSet<[u8; 32]>),
-    /// TODO
-    BlockBatchInRange(Range<u64>),
 }
 
 //---------------------------------------------------------------------------------------------------- WriteRequest
 /// A write request to the database.
+///
+/// There is currently only 1 write request to the database,
+/// as such, the only valid [`Response`] to this request is
+/// the proper response for a [`Response::WriteBlockOk`].
 #[derive(Debug, Clone, PartialEq, Eq)]
 // #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
 pub enum WriteRequest {
-    /// TODO
+    /// Request that a block be written to the database.
+    ///
+    /// Input is an already verified block.
     WriteBlock(VerifiedBlockInformation),
 }
 
 //---------------------------------------------------------------------------------------------------- Response
 /// A response from the database.
+///
+/// These are the data types returned when using sending a `Request`.
+///
+/// This pairs with [`ReadRequest`] and [`WriteRequest`],
+/// see those two for more info.
 #[derive(Debug, Clone, PartialEq, Eq)]
 // #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
 pub enum Response {
     //------------------------------------------------------ Reads
-    /// TODO
+    /// Response to [`ReadRequest::BlockExtendedHeader`].
+    ///
+    /// Inner value is the extended headed of the requested block.
     BlockExtendedHeader(ExtendedBlockHeader),
-    /// TODO
+
+    /// Response to [`ReadRequest::BlockHash`].
+    ///
+    /// Inner value is the hash of the requested block.
     BlockHash([u8; 32]),
-    /// TODO
+
+    /// Response to [`ReadRequest::BlockExtendedHeaderInRange`].
+    ///
+    /// Inner value is the list of extended header(s) of the requested block(s).
     BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
-    /// TODO
+
+    /// Response to [`ReadRequest::ChainHeight`].
+    ///
+    /// Inner value is the chain height, and the top block's hash.
     ChainHeight(u64, [u8; 32]),
-    /// TODO
+
+    /// Response to [`ReadRequest::GeneratedCoins`].
+    ///
+    /// Inner value is the total amount of generated coins so far, in atomic units.
     GeneratedCoins(u64),
-    /// TODO
+
+    /// Response to [`ReadRequest::Outputs`].
+    ///
+    /// Inner value is all the outputs requested,
+    /// associated with their amount and amount index.
     Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
-    /// TODO
+
+    /// Response to [`ReadRequest::NumberOutputsWithAmount`].
+    ///
+    /// Inner value is a `HashMap` of all the outputs requested where:
+    /// - Key = output amount
+    /// - Value = count of outputs with the same amount
     NumberOutputsWithAmount(HashMap<u64, usize>),
-    /// TODO
-    /// returns true if key images are spent
+
+    /// Response to [`ReadRequest::CheckKIsNotSpent`].
+    ///
+    /// The inner value is `true` if _any_ of the key images
+    /// were spent (exited in the database already).
+    ///
+    /// The inner value is `false` if _none_ of the key images were spent.
     CheckKIsNotSpent(bool),
-    /// TODO
-    BlockBatchInRange(Vec<(Block, Vec<Transaction>)>),
 
     //------------------------------------------------------ Writes
-    /// TODO
+    /// Response to [`WriteRequest::WriteBlock`].
+    ///
+    /// This response indicates that the requested block has
+    /// successfully been written to the database without error.
     WriteBlockOk,
 }
 
diff --git a/types/src/types.rs b/types/src/types.rs
index 6532aec7..aeef4535 100644
--- a/types/src/types.rs
+++ b/types/src/types.rs
@@ -1,4 +1,4 @@
-//! TODO
+//! Various shared data types in Cuprate.
 
 //---------------------------------------------------------------------------------------------------- Import
 use std::sync::Arc;
@@ -15,84 +15,113 @@ use borsh::{BorshDeserialize, BorshSerialize};
 use serde::{Deserialize, Serialize};
 
 //---------------------------------------------------------------------------------------------------- ExtendedBlockHeader
-/// TODO
+/// Extended header data of a block.
+///
+/// This contains various metadata of a block, but not the block blob itself.
+///
+/// For more definitions, see also: <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_last_block_header>.
 #[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
 pub struct ExtendedBlockHeader {
-    /// TODO
-    /// This is a `cuprate_consensus::HardFork`.
+    /// The block's major version.
+    ///
+    /// This can also be represented with `cuprate_consensus::HardFork`.
+    ///
+    /// This is the same value as [`monero_serai::block::BlockHeader::major_version`].
     pub version: u8,
-    /// TODO
-    /// This is a `cuprate_consensus::HardFork`.
+    /// The block's hard-fork vote.
+    ///
+    /// This can also be represented with `cuprate_consensus::HardFork`.
+    ///
+    /// This is the same value as [`monero_serai::block::BlockHeader::minor_version`].
     pub vote: u8,
-    /// TODO
+    /// The UNIX time at which the block was mined.
     pub timestamp: u64,
-    /// TODO
+    /// The total amount of coins mined in all blocks so far, including this block's.
     pub cumulative_difficulty: u128,
-    /// TODO
+    /// The adjusted block size, in bytes.
     pub block_weight: usize,
-    /// TODO
+    /// The long term block weight, based on the median weight of the preceding `100_000` blocks.
     pub long_term_weight: usize,
 }
 
 //---------------------------------------------------------------------------------------------------- TransactionVerificationData
-/// TODO
+/// Data needed to verify a transaction.
+///
+/// This represents data that allows verification of a transaction,
+/// although it doesn't mean it _has_ been verified.
 #[derive(Clone, Debug, PartialEq, Eq)]
 // #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
 // #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
 pub struct TransactionVerificationData {
-    /// TODO
+    /// The transaction itself.
     pub tx: Transaction,
-    /// TODO
+    /// The serialized byte form of [`Self::tx`].
+    ///
+    /// [`Transaction::serialize`].
     pub tx_blob: Vec<u8>,
-    /// TODO
+    /// The transaction's weight.
+    ///
+    /// [`Transaction::weight`].
     pub tx_weight: usize,
-    /// TODO
+    /// The transaction's total fees.
     pub fee: u64,
-    /// TODO
+    /// The transaction's hash.
+    ///
+    /// [`Transaction::hash`].
     pub tx_hash: [u8; 32],
 }
 
 //---------------------------------------------------------------------------------------------------- VerifiedBlockInformation
-/// TODO
+/// Verified information of a block.
+///
+/// This represents a block that has already been verified to be correct.
+///
+/// For more definitions, see also: <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_block>.
 #[derive(Clone, Debug, PartialEq, Eq)]
 // #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
 // #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
 pub struct VerifiedBlockInformation {
-    /// TODO
+    /// The block itself.
     pub block: Block,
-    /// TODO
+    /// The serialized byte form of [`Self::block`].
+    ///
+    /// [`Block::serialize`].
+    pub block_blob: Vec<u8>,
+    /// All the transactions in the block, excluding the [`Block::miner_tx`].
     pub txs: Vec<Arc<TransactionVerificationData>>,
-    /// TODO
+    /// The block's hash.
+    ///
+    /// [`Block::hash`].
     pub block_hash: [u8; 32],
-    /// TODO
+    /// The block's proof-of-work hash.
     pub pow_hash: [u8; 32],
-    /// TODO
+    /// The block's height.
     pub height: u64,
-    /// TODO
+    /// The amount of generated coins (atomic units) in this block.
     pub generated_coins: u64,
-    /// TODO
+    /// The adjusted block size, in bytes.
     pub weight: usize,
-    /// TODO
+    /// The long term block weight, which is the weight factored in with previous block weights.
     pub long_term_weight: usize,
-    /// TODO
+    /// The cumulative difficulty of all blocks up until and including this block.
     pub cumulative_difficulty: u128,
 }
 
 //---------------------------------------------------------------------------------------------------- OutputOnChain
-/// An already approved previous transaction output.
+/// An already existing transaction output.
 #[derive(Clone, Debug, PartialEq, Eq)]
 // #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
 // #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
 pub struct OutputOnChain {
-    /// TODO
+    /// The block height this output belongs to.
     pub height: u64,
-    /// TODO
+    /// The timelock of this output, if any.
     pub time_lock: Timelock,
-    /// TODO
+    /// The public key of this output, if any.
     pub key: Option<EdwardsPoint>,
-    /// TODO
+    /// The output's commitment.
     pub commitment: EdwardsPoint,
 }
 
diff --git a/typos.toml b/typos.toml
index 299b8eb8..abab1903 100644
--- a/typos.toml
+++ b/typos.toml
@@ -6,6 +6,10 @@ extend-ignore-identifiers-re = [
 	# in file: `/cryptonight/c/oaes_lib.c:1213`
 	# not sure if false-positive or not.
 	"InvMixColums",
+	# cuprate_database's `TxRo` and `tx_ro`
+	"RO",
+	"Ro",
+	"ro",
 ]
 
 [files]