diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 36e23eab..ebf40b12 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -96,22 +96,17 @@ jobs: update: true install: mingw-w64-x86_64-toolchain mingw-w64-x86_64-boost msys2-runtime-devel git mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja - - name: Update Rust (UNIX) - if: matrix.os != 'windows-latest' - run: rustup update - - - name: Switch target (Windows) - if: matrix.os == 'windows-latest' - run: rustup toolchain install stable-x86_64-pc-windows-gnu -c clippy && rustup set default-host x86_64-pc-windows-gnu && rustup default stable-x86_64-pc-windows-gnu - - name: Documentation run: cargo doc --workspace --all-features - name: Clippy (fail on warnings) run: cargo clippy --workspace --all-features --all-targets -- -D warnings + # HACK: how to test both DB backends that are feature-gated? - name: Test - run: cargo test --all-features --workspace + run: | + cargo test --all-features --workspace + cargo test --package cuprate-database --no-default-features --features redb --features service # TODO: upload binaries with `actions/upload-artifact@v3` - name: Build diff --git a/database/Cargo.toml b/database/Cargo.toml index 61b7a6ae..cef709e7 100644 --- a/database/Cargo.toml +++ b/database/Cargo.toml @@ -9,8 +9,8 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/database" keywords = ["cuprate", "database"] [features] -default = ["heed", "service"] -# default = ["redb", "service"] # For testing `redb`. +default = ["heed", "redb", "service"] +# default = ["redb", "service"] heed = ["dep:heed"] redb = ["dep:redb"] service = ["dep:crossbeam", "dep:tokio", "dep:tower"] @@ -22,23 +22,14 @@ cfg-if = { workspace = true } # We only need the `thread` feature if `service` is enabled. # Figure out how to enable features of an already pulled in dependency conditionally. cuprate-helper = { path = "../helper", features = ["fs", "thread"] } -paste = { workspace = true } -# Needed for database resizes. -# They must be a multiple of the OS page size. -page_size = { version = "0.6.0" } -thiserror = { workspace = true } +paste = { workspace = true } +page_size = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size. +thiserror = { workspace = true } # `service` feature. crossbeam = { workspace = true, features = ["std"], optional = true } tokio = { workspace = true, features = ["full"], optional = true } tower = { workspace = true, features = ["full"], optional = true } -# SOMEDAY: could be used in `service` as -# the database mutual exclusive `RwLock`. -# -# `parking_lot` has a fairness policy unlike `std`, -# although for now (and until testing is done), -# `std` is fine. -# parking_lot = { workspace = true, optional = true } # Optional features. heed = { version = "0.20.0-alpha.9", optional = true } diff --git a/database/README.md b/database/README.md index 493b0226..9b91bc7f 100644 --- a/database/README.md +++ b/database/README.md @@ -66,21 +66,23 @@ Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, an ## `src/` The top-level `src/` files. -| File | Purpose | -|------------------|---------| -| `config.rs` | Database `Env` configuration -| `constants.rs` | General constants used throughout `cuprate-database` -| `database.rs` | Abstracted database; `trait DatabaseR{o,w}` -| `env.rs` | Abstracted database environment; `trait Env` -| `error.rs` | Database error types -| `free.rs` | General free functions (related to the database) -| `key.rs` | Abstracted database keys; `trait Key` -| `resize.rs` | Database resizing algorithms -| `storable.rs` | Data (de)serialization; `trait Storable` -| `table.rs` | Database table abstraction; `trait Table` -| `tables.rs` | All the table definitions used by `cuprate-database` -| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}` -| `types.rs` | Database table schema types +| File | Purpose | +|---------------------|---------| +| `config.rs` | Database `Env` configuration +| `constants.rs` | General constants used throughout `cuprate-database` +| `database.rs` | Abstracted database; `trait DatabaseR{o,w}` +| `env.rs` | Abstracted database environment; `trait Env` +| `error.rs` | Database error types +| `free.rs` | General free functions (related to the database) +| `key.rs` | Abstracted database keys; `trait Key` +| `resize.rs` | Database resizing algorithms +| `storable.rs` | Data (de)serialization; `trait Storable` +| `table.rs` | Database table abstraction; `trait Table` +| `tables.rs` | All the table definitions used by `cuprate-database` +| `to_owned_debug.rs` | Borrowed/owned data abstraction; `trait ToOwnedDebug` +| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}` +| `types.rs` | Database table schema types +| `value_guard.rs` | Database value "guard" abstraction; `trait ValueGuard` ## `src/ops/` This folder contains the `cupate_database::ops` module. @@ -126,9 +128,10 @@ All backends follow the same file structure: | `database.rs` | Implementation of `trait DatabaseR{o,w}` | `env.rs` | Implementation of `trait Env` | `error.rs` | Implementation of backend's errors to `cuprate_database`'s error types +| `storable.rs` | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization +| `tests.rs` | Tests for the specific backend | `transaction.rs` | Implementation of `trait TxR{o,w}` | `types.rs` | Type aliases for long backend-specific types -| `storable.rs` | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization # Backends `cuprate-database`'s `trait`s abstract over various actual databases. @@ -172,7 +175,7 @@ The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/s As such, it is not implemented. ## `MDBX` -[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement dup tables. It is also quite similar to the main backend LMDB (of which it was originally a fork of). +[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement duplicate tables. It is also quite similar to the main backend LMDB (of which it was originally a fork of). As such, it is not implemented (yet). @@ -198,4 +201,4 @@ TODO: document disk flushing behavior. - Backend-specific behavior # (De)serialization -TODO: document `Pod` and how databases use (de)serialize objects when storing/fetching, essentially using `<[u8], [u8]>`. \ No newline at end of file +TODO: document `Storable` and how databases (de)serialize types when storing/fetching. diff --git a/database/src/backend/heed/database.rs b/database/src/backend/heed/database.rs index c4919498..8070c836 100644 --- a/database/src/backend/heed/database.rs +++ b/database/src/backend/heed/database.rs @@ -1,13 +1,19 @@ //! Implementation of `trait Database` for `heed`. //---------------------------------------------------------------------------------------------------- Import -use std::marker::PhantomData; +use std::{ + borrow::{Borrow, Cow}, + fmt::Debug, + ops::RangeBounds, + sync::RwLockReadGuard, +}; use crate::{ - backend::heed::types::HeedDb, + backend::heed::{storable::StorableHeed, types::HeedDb}, database::{DatabaseRo, DatabaseRw}, error::RuntimeError, table::Table, + value_guard::ValueGuard, }; //---------------------------------------------------------------------------------------------------- Heed Database Wrappers @@ -26,72 +32,107 @@ use crate::{ /// An opened read-only database associated with a transaction. /// /// Matches `redb::ReadOnlyTable`. -pub(super) struct HeedTableRo<'env, T: Table> { +pub(super) struct HeedTableRo<'tx, T: Table> { /// An already opened database table. - db: HeedDb, + pub(super) db: HeedDb, /// The associated read-only transaction that opened this table. - tx_ro: &'env heed::RoTxn<'env>, + pub(super) tx_ro: &'tx heed::RoTxn<'tx>, } /// An opened read/write database associated with a transaction. /// /// Matches `redb::Table` (read & write). -pub(super) struct HeedTableRw<'env, T: Table> { +pub(super) struct HeedTableRw<'env, 'tx, T: Table> { /// TODO - db: HeedDb, + pub(super) db: HeedDb, /// The associated read/write transaction that opened this table. - tx_rw: &'env mut heed::RwTxn<'env>, + pub(super) tx_rw: &'tx mut heed::RwTxn<'env>, +} + +//---------------------------------------------------------------------------------------------------- Shared functions +// FIXME: we cannot just deref `HeedTableRw -> HeedTableRo` and +// call the functions since the database is held by value, so +// just use these generic functions that both can call instead. + +/// Shared generic `get()` between `HeedTableR{o,w}`. +#[inline] +fn get<'a, T: Table>( + db: &'_ HeedDb, + tx_ro: &'a heed::RoTxn<'_>, + key: &T::Key, +) -> Result + 'a, RuntimeError> { + db.get(tx_ro, key)? + .map(Cow::Borrowed) + .ok_or(RuntimeError::KeyNotFound) +} + +/// Shared generic `get_range()` between `HeedTableR{o,w}`. +#[inline] +fn get_range<'a, T: Table, Range>( + db: &'a HeedDb, + tx_ro: &'a heed::RoTxn<'_>, + range: &'a Range, +) -> Result + 'a, RuntimeError>>, RuntimeError> +where + Range: RangeBounds + 'a, +{ + Ok(db.range(tx_ro, range)?.map(|res| Ok(Cow::Borrowed(res?.1)))) } //---------------------------------------------------------------------------------------------------- DatabaseRo Impl -impl DatabaseRo for HeedTableRo<'_, T> { - fn get(&self, key: &T::Key) -> Result<&T::Value, RuntimeError> { - todo!() +impl<'tx, T: Table> DatabaseRo<'tx, T> for HeedTableRo<'tx, T> { + #[inline] + fn get<'a>(&'a self, key: &'a T::Key) -> Result + 'a, RuntimeError> { + get::(&self.db, self.tx_ro, key) } - fn get_range<'a>( + #[inline] + fn get_range<'a, Range>( &'a self, - key: &'a T::Key, - amount: usize, - ) -> Result, RuntimeError> + range: &'a Range, + ) -> Result< + impl Iterator + 'a, RuntimeError>>, + RuntimeError, + > where - ::Value: 'a, + Range: RangeBounds + 'a, { - let iter: std::vec::Drain<'_, &T::Value> = todo!(); - Ok(iter) + get_range::(&self.db, self.tx_ro, range) } } //---------------------------------------------------------------------------------------------------- DatabaseRw Impl -impl DatabaseRo for HeedTableRw<'_, T> { - fn get(&self, key: &T::Key) -> Result<&T::Value, RuntimeError> { - todo!() +impl<'tx, T: Table> DatabaseRo<'tx, T> for HeedTableRw<'_, 'tx, T> { + #[inline] + fn get<'a>(&'a self, key: &'a T::Key) -> Result + 'a, RuntimeError> { + get::(&self.db, self.tx_rw, key) } - fn get_range<'a>( + #[inline] + fn get_range<'a, Range>( &'a self, - key: &'a T::Key, - amount: usize, - ) -> Result, RuntimeError> + range: &'a Range, + ) -> Result< + impl Iterator + 'a, RuntimeError>>, + RuntimeError, + > where - ::Value: 'a, + Range: RangeBounds + 'a, { - let iter: std::vec::Drain<'_, &T::Value> = todo!(); - Ok(iter) + get_range::(&self.db, self.tx_rw, range) } } -impl DatabaseRw for HeedTableRw<'_, T> { +impl<'env, 'tx, T: Table> DatabaseRw<'env, 'tx, T> for HeedTableRw<'env, 'tx, T> { + #[inline] fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> { - todo!() - } - - fn clear(&mut self) -> Result<(), RuntimeError> { - todo!() + Ok(self.db.put(self.tx_rw, key, value)?) } + #[inline] fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> { - todo!() + self.db.delete(self.tx_rw, key)?; + Ok(()) } } diff --git a/database/src/backend/heed/env.rs b/database/src/backend/heed/env.rs index 32639391..402cc678 100644 --- a/database/src/backend/heed/env.rs +++ b/database/src/backend/heed/env.rs @@ -1,19 +1,34 @@ //! Implementation of `trait Env` for `heed`. //---------------------------------------------------------------------------------------------------- Import -use std::sync::RwLock; +use std::{ + fmt::Debug, + ops::Deref, + sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; + +use heed::{DatabaseOpenOptions, EnvFlags, EnvOpenOptions}; use crate::{ - backend::heed::database::{HeedTableRo, HeedTableRw}, - config::Config, + backend::heed::{ + database::{HeedTableRo, HeedTableRw}, + storable::StorableHeed, + types::HeedDb, + }, + config::{Config, SyncMode}, database::{DatabaseRo, DatabaseRw}, - env::Env, + env::{Env, EnvInner}, error::{InitError, RuntimeError}, resize::ResizeAlgorithm, table::Table, }; -//---------------------------------------------------------------------------------------------------- Env +//---------------------------------------------------------------------------------------------------- Consts +/// TODO +const PANIC_MSG_MISSING_TABLE: &str = + "cuprate_database::Env should uphold the invariant that all tables are already created"; + +//---------------------------------------------------------------------------------------------------- ConcreteEnv /// A strongly typed, concrete database environment, backed by `heed`. pub struct ConcreteEnv { /// The actual database environment. @@ -44,11 +59,13 @@ pub struct ConcreteEnv { /// The configuration we were opened with /// (and in current use). - config: Config, + pub(super) config: Config, } impl Drop for ConcreteEnv { fn drop(&mut self) { + // INVARIANT: drop(ConcreteEnv) must sync. + // // TODO: // "if the environment has the MDB_NOSYNC flag set the flushes will be omitted, // and with MDB_MAPASYNC they will be asynchronous." @@ -57,7 +74,7 @@ impl Drop for ConcreteEnv { // We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)` // to clear the no sync and async flags such that the below `self.sync()` // _actually_ synchronously syncs. - if let Err(e) = self.sync() { + if let Err(e) = crate::Env::sync(self) { // TODO: log error? } @@ -77,30 +94,118 @@ impl Drop for ConcreteEnv { impl Env for ConcreteEnv { const MANUAL_RESIZE: bool = true; const SYNCS_PER_TX: bool = false; - type TxRo<'env> = heed::RoTxn<'env>; - type TxRw<'env> = heed::RwTxn<'env>; + type EnvInner<'env> = RwLockReadGuard<'env, heed::Env>; + type TxRo<'tx> = heed::RoTxn<'tx>; + type TxRw<'tx> = heed::RwTxn<'tx>; #[cold] #[inline(never)] // called once. + #[allow(clippy::items_after_statements)] fn open(config: Config) -> Result { - // INVARIANT: - // We must open LMDB using `heed::EnvOpenOptions::max_readers` - // and input whatever is in `config.reader_threads` or else - // LMDB will start throwing errors if there are >126 readers. - // - // - // We should also leave reader slots for other processes, e.g. `xmrblocks`. - // - // - todo!() - } + // Map our `Config` sync mode to the LMDB environment flags. + // + // + let flags = match config.sync_mode { + SyncMode::Safe => EnvFlags::empty(), + SyncMode::Async => EnvFlags::MAP_ASYNC, + SyncMode::Fast => EnvFlags::NO_SYNC | EnvFlags::WRITE_MAP | EnvFlags::MAP_ASYNC, + // TODO: dynamic syncs are not implemented. + SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(), + }; - #[cold] - #[inline(never)] // called once in [`Env::open`]? - fn create_tables(&self, tx_rw: &mut Self::TxRw<'_>) -> Result<(), RuntimeError> { - todo!() + let mut env_open_options = EnvOpenOptions::new(); + + // Set the memory map size to + // (current disk size) + (a bit of leeway) + // to account for empty databases where we + // need to write same tables. + #[allow(clippy::cast_possible_truncation)] // only 64-bit targets + let disk_size_bytes = match std::fs::File::open(&config.db_file) { + Ok(file) => file.metadata()?.len() as usize, + // The database file doesn't exist, 0 bytes. + Err(io_err) if io_err.kind() == std::io::ErrorKind::NotFound => 0, + Err(io_err) => return Err(io_err.into()), + }; + // Add leeway space. + let memory_map_size = crate::resize::fixed_bytes(disk_size_bytes, 1_000_000 /* 1MB */); + env_open_options.map_size(memory_map_size.get()); + + // Set the max amount of database tables. + // We know at compile time how many tables there are. + // TODO: ...how many? + env_open_options.max_dbs(32); + + // LMDB documentation: + // ``` + // Number of slots in the reader table. + // This value was chosen somewhat arbitrarily. 126 readers plus a + // couple mutexes fit exactly into 8KB on my development machine. + // ``` + // + // + // So, we're going to be following these rules: + // - Use at least 126 reader threads + // - Add 16 extra reader threads if <126 + // + // TODO: This behavior is from `monerod`: + // + // I believe this could be adjusted percentage-wise so very high + // thread PCs can benefit from something like (cuprated + anything that uses the DB in the future). + // For now: + // - No other program using our DB exists + // - Almost no-one has a 126+ thread CPU + #[allow(clippy::cast_possible_truncation)] // no-one has `u32::MAX`+ threads + let reader_threads = config.reader_threads.as_threads().get() as u32; + env_open_options.max_readers(if reader_threads < 110 { + 126 + } else { + reader_threads + 16 + }); + + // Open the environment in the user's PATH. + let env = env_open_options.open(config.db_directory())?; + + // TODO: Open/create tables with certain flags + // + // `heed` creates the database if it didn't exist. + // + use crate::tables::{TestTable, TestTable2}; + let mut tx_rw = env.write_txn()?; + + // FIXME: + // These wonderful fully qualified trait types are brought + // to you by `tower::discover::Discover>::Key` collisions. + + // TODO: Create all tables when schema is done. + + /// Function that creates the tables based off the passed `T: Table`. + fn create_table( + env: &heed::Env, + tx_rw: &mut heed::RwTxn<'_>, + ) -> Result<(), InitError> { + DatabaseOpenOptions::new(env) + .name(::NAME) + .types::::Key>, StorableHeed<::Value>>() + .create(tx_rw)?; + Ok(()) + } + + create_table::(&env, &mut tx_rw)?; + create_table::(&env, &mut tx_rw)?; + + // TODO: Set dupsort and comparison functions for certain tables + // + + // INVARIANT: this should never return `ResizeNeeded` due to adding + // some tables since we added some leeway to the memory map above. + tx_rw.commit()?; + + Ok(Self { + env: RwLock::new(env), + config, + }) } fn config(&self) -> &Config { @@ -108,14 +213,14 @@ impl Env for ConcreteEnv { } fn sync(&self) -> Result<(), RuntimeError> { - todo!() + Ok(self.env.read().unwrap().force_sync()?) } fn resize_map(&self, resize_algorithm: Option) { let resize_algorithm = resize_algorithm.unwrap_or_else(|| self.config().resize_algorithm); let current_size_bytes = self.current_map_size(); - let new_size_bytes = resize_algorithm.resize(current_size_bytes); + let new_size_bytes = resize_algorithm.resize(current_size_bytes).get(); // SAFETY: // Resizing requires that we have @@ -126,44 +231,62 @@ impl Env for ConcreteEnv { // unsafe { // INVARIANT: `resize()` returns a valid `usize` to resize to. - self.env - .write() - .unwrap() - .resize(new_size_bytes.get()) - .unwrap(); + self.env.write().unwrap().resize(new_size_bytes).unwrap(); } } + #[inline] fn current_map_size(&self) -> usize { self.env.read().unwrap().info().map_size } #[inline] - fn tx_ro(&self) -> Result, RuntimeError> { - todo!() + fn env_inner(&self) -> Self::EnvInner<'_> { + self.env.read().unwrap() + } +} + +//---------------------------------------------------------------------------------------------------- EnvInner Impl +impl<'env> EnvInner<'env, heed::RoTxn<'env>, heed::RwTxn<'env>> for RwLockReadGuard<'env, heed::Env> +where + Self: 'env, +{ + #[inline] + fn tx_ro(&'env self) -> Result, RuntimeError> { + Ok(self.read_txn()?) } #[inline] - fn tx_rw(&self) -> Result, RuntimeError> { - todo!() + fn tx_rw(&'env self) -> Result, RuntimeError> { + Ok(self.write_txn()?) } #[inline] - fn open_db_ro( + fn open_db_ro<'tx, T: Table>( &self, - tx_ro: &Self::TxRo<'_>, - ) -> Result, RuntimeError> { - let tx: HeedTableRo = todo!(); - Ok(tx) + tx_ro: &'tx heed::RoTxn<'env>, + ) -> Result, RuntimeError> { + // Open up a read-only database using our table's const metadata. + Ok(HeedTableRo { + db: self + .open_database(tx_ro, Some(T::NAME))? + .expect(PANIC_MSG_MISSING_TABLE), + tx_ro, + }) } #[inline] - fn open_db_rw( + fn open_db_rw<'tx, T: Table>( &self, - tx_rw: &mut Self::TxRw<'_>, - ) -> Result, RuntimeError> { - let tx: HeedTableRw = todo!(); - Ok(tx) + tx_rw: &'tx mut heed::RwTxn<'env>, + ) -> Result, RuntimeError> { + // Open up a read/write database using our table's const metadata. + Ok(HeedTableRw { + db: self + .open_database(tx_rw, Some(T::NAME))? + .expect(PANIC_MSG_MISSING_TABLE), + tx_rw, + }) } } diff --git a/database/src/backend/heed/storable.rs b/database/src/backend/heed/storable.rs index 8cd4b93b..9014a28e 100644 --- a/database/src/backend/heed/storable.rs +++ b/database/src/backend/heed/storable.rs @@ -1,7 +1,7 @@ //! `cuprate_database::Storable` <-> `heed` serde trait compatibility layer. //---------------------------------------------------------------------------------------------------- Use -use std::{borrow::Cow, marker::PhantomData}; +use std::{borrow::Cow, fmt::Debug, marker::PhantomData}; use heed::{types::Bytes, BoxedError, BytesDecode, BytesEncode, Database}; @@ -12,10 +12,15 @@ use crate::storable::Storable; /// traits on any type that implements `cuprate_database::Storable`. /// /// Never actually gets constructed, just used for trait bound translations. -pub(super) struct StorableHeed(PhantomData); +pub(super) struct StorableHeed(PhantomData) +where + T: Storable + ?Sized; //---------------------------------------------------------------------------------------------------- BytesDecode -impl<'a, T: Storable + ?Sized + 'a> BytesDecode<'a> for StorableHeed { +impl<'a, T> BytesDecode<'a> for StorableHeed +where + T: Storable + ?Sized + 'a, +{ type DItem = &'a T; #[inline] @@ -26,7 +31,10 @@ impl<'a, T: Storable + ?Sized + 'a> BytesDecode<'a> for StorableHeed { } //---------------------------------------------------------------------------------------------------- BytesEncode -impl<'a, T: Storable + ?Sized + 'a> BytesEncode<'a> for StorableHeed { +impl<'a, T> BytesEncode<'a> for StorableHeed +where + T: Storable + ?Sized + 'a, +{ type EItem = T; #[inline] @@ -39,6 +47,8 @@ impl<'a, T: Storable + ?Sized + 'a> BytesEncode<'a> for StorableHeed { //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { + use std::fmt::Debug; + use super::*; // Each `#[test]` function has a `test()` to: @@ -49,7 +59,10 @@ mod test { #[test] /// Assert `BytesEncode::bytes_encode` is accurate. fn bytes_encode() { - fn test(t: &T, expected: &[u8]) { + fn test(t: &T, expected: &[u8]) + where + T: Storable + ?Sized, + { println!("t: {t:?}, expected: {expected:?}"); assert_eq!( as BytesEncode>::bytes_encode(t).unwrap(), @@ -76,7 +89,11 @@ mod test { #[test] /// Assert `BytesDecode::bytes_decode` is accurate. fn bytes_decode() { - fn test(bytes: &[u8], expected: &T) { + fn test(bytes: &[u8], expected: &T) + where + T: Storable + ?Sized + PartialEq + ToOwned + Debug, + T::Owned: Debug, + { println!("bytes: {bytes:?}, expected: {expected:?}"); assert_eq!( as BytesDecode>::bytes_decode(bytes).unwrap(), diff --git a/database/src/backend/heed/transaction.rs b/database/src/backend/heed/transaction.rs index 655ba8b1..123328ee 100644 --- a/database/src/backend/heed/transaction.rs +++ b/database/src/backend/heed/transaction.rs @@ -1,5 +1,7 @@ //! Implementation of `trait TxRo/TxRw` for `heed`. +use std::{ops::Deref, sync::RwLockReadGuard}; + //---------------------------------------------------------------------------------------------------- Import use crate::{ error::RuntimeError, @@ -9,31 +11,26 @@ use crate::{ //---------------------------------------------------------------------------------------------------- TxRo impl TxRo<'_> for heed::RoTxn<'_> { fn commit(self) -> Result<(), RuntimeError> { - todo!() + Ok(self.commit()?) } } //---------------------------------------------------------------------------------------------------- TxRw impl TxRo<'_> for heed::RwTxn<'_> { - /// TODO - /// # Errors - /// TODO fn commit(self) -> Result<(), RuntimeError> { - todo!() + Ok(self.commit()?) } } impl TxRw<'_> for heed::RwTxn<'_> { - /// TODO - /// # Errors - /// TODO fn commit(self) -> Result<(), RuntimeError> { - todo!() + Ok(self.commit()?) } - /// TODO - fn abort(self) { - todo!() + /// This function is infallible. + fn abort(self) -> Result<(), RuntimeError> { + self.abort(); + Ok(()) } } diff --git a/database/src/backend/mod.rs b/database/src/backend/mod.rs index 3b458d8c..2d6800e1 100644 --- a/database/src/backend/mod.rs +++ b/database/src/backend/mod.rs @@ -20,3 +20,6 @@ cfg_if::cfg_if! { pub use heed::ConcreteEnv; } } + +#[cfg(test)] +mod tests; diff --git a/database/src/backend/redb/database.rs b/database/src/backend/redb/database.rs index f5821cae..cb50d266 100644 --- a/database/src/backend/redb/database.rs +++ b/database/src/backend/redb/database.rs @@ -1,62 +1,180 @@ //! Implementation of `trait DatabaseR{o,w}` for `redb`. //---------------------------------------------------------------------------------------------------- Import -use crate::{ - backend::redb::types::{RedbTableRo, RedbTableRw}, - database::{DatabaseRo, DatabaseRw}, - error::RuntimeError, - table::Table, +use std::{ + borrow::{Borrow, Cow}, + fmt::Debug, + marker::PhantomData, + ops::{Bound, Deref, RangeBounds}, }; -//---------------------------------------------------------------------------------------------------- DatabaseRo -impl DatabaseRo for RedbTableRo<'_, T::Key, T::Value> { - fn get(&self, key: &T::Key) -> Result<&T::Value, RuntimeError> { - todo!() +use crate::{ + backend::redb::{ + storable::StorableRedb, + types::{RedbTableRo, RedbTableRw}, + }, + database::{DatabaseRo, DatabaseRw}, + error::RuntimeError, + storable::Storable, + table::Table, + value_guard::ValueGuard, + ToOwnedDebug, +}; + +//---------------------------------------------------------------------------------------------------- Shared functions +// FIXME: we cannot just deref `RedbTableRw -> RedbTableRo` and +// call the functions since the database is held by value, so +// just use these generic functions that both can call instead. + +/// Shared generic `get()` between `RedbTableR{o,w}`. +#[inline] +fn get<'a, T: Table + 'static>( + db: &'a impl redb::ReadableTable, StorableRedb>, + key: &'a T::Key, +) -> Result + 'a, RuntimeError> { + db.get(Cow::Borrowed(key))?.ok_or(RuntimeError::KeyNotFound) +} + +/// Shared generic `get_range()` between `RedbTableR{o,w}`. +#[inline] +fn get_range<'a, T: Table, Range>( + db: &'a impl redb::ReadableTable, StorableRedb>, + range: &'a Range, +) -> Result< + impl Iterator>, RuntimeError>> + 'a, + RuntimeError, +> +where + Range: RangeBounds + 'a, +{ + /// HACK: `redb` sees the database's key type as `Cow<'_, T::Key>`, + /// not `T::Key` directly like `heed` does. As such, it wants the + /// range to be over `Cow<'_, T::Key>`, not `T::Key` directly. + /// + /// If `DatabaseRo` were to want `Cow<'_, T::Key>` as input in `get()`, + /// `get_range()`, it would complicate the API: + /// ```rust,ignore + /// // This would be needed... + /// let range = Cow::Owned(0)..Cow::Owned(1); + /// // ...instead of the more obvious + /// let range = 0..1; + /// ``` + /// + /// As such, `DatabaseRo` only wants `RangeBounds` and + /// we create a compatibility struct here, essentially converting + /// this functions input: + /// ```rust,ignore + /// RangeBound + /// ``` + /// into `redb`'s desired: + /// ```rust,ignore + /// RangeBound> + /// ``` + struct CowRange<'a, K> + where + K: ToOwnedDebug, + { + /// The start bound of `Range`. + start_bound: Bound>, + /// The end bound of `Range`. + end_bound: Bound>, } - fn get_range<'a>( - &'a self, - key: &'a T::Key, - amount: usize, - ) -> Result, RuntimeError> + /// This impl forwards our `T::Key` to be wrapped in a Cow. + impl<'a, K> RangeBounds> for CowRange<'a, K> where - ::Value: 'a, + K: ToOwnedDebug, { - let iter: std::vec::Drain<'_, &T::Value> = todo!(); - Ok(iter) + fn start_bound(&self) -> Bound<&Cow<'a, K>> { + self.start_bound.as_ref() + } + + fn end_bound(&self) -> Bound<&Cow<'a, K>> { + self.end_bound.as_ref() + } + } + + let start_bound = match range.start_bound() { + Bound::Included(t) => Bound::Included(Cow::Borrowed(t)), + Bound::Excluded(t) => Bound::Excluded(Cow::Borrowed(t)), + Bound::Unbounded => Bound::Unbounded, + }; + let end_bound = match range.end_bound() { + Bound::Included(t) => Bound::Included(Cow::Borrowed(t)), + Bound::Excluded(t) => Bound::Excluded(Cow::Borrowed(t)), + Bound::Unbounded => Bound::Unbounded, + }; + let range = CowRange { + start_bound, + end_bound, + }; + + Ok(db.range(range)?.map(|result| { + let (_key, value_guard) = result?; + Ok(value_guard) + })) +} + +//---------------------------------------------------------------------------------------------------- DatabaseRo +impl<'tx, T: Table + 'static> DatabaseRo<'tx, T> for RedbTableRo<'tx, T::Key, T::Value> { + #[inline] + fn get<'a>(&'a self, key: &'a T::Key) -> Result + 'a, RuntimeError> { + get::(self, key) + } + + #[inline] + fn get_range<'a, Range>( + &'a self, + range: &'a Range, + ) -> Result< + impl Iterator, RuntimeError>> + 'a, + RuntimeError, + > + where + Range: RangeBounds + 'a, + { + get_range::(self, range) } } //---------------------------------------------------------------------------------------------------- DatabaseRw -impl DatabaseRo for RedbTableRw<'_, '_, T::Key, T::Value> { - fn get(&self, key: &T::Key) -> Result<&T::Value, RuntimeError> { - todo!() +impl<'tx, T: Table + 'static> DatabaseRo<'tx, T> for RedbTableRw<'_, 'tx, T::Key, T::Value> { + #[inline] + fn get<'a>(&'a self, key: &'a T::Key) -> Result + 'a, RuntimeError> { + get::(self, key) } - fn get_range<'a>( + #[inline] + fn get_range<'a, Range>( &'a self, - key: &'a T::Key, - amount: usize, - ) -> Result, RuntimeError> + range: &'a Range, + ) -> Result< + impl Iterator, RuntimeError>> + 'a, + RuntimeError, + > where - ::Value: 'a, + Range: RangeBounds + 'a, { - let iter: std::vec::Drain<'_, &T::Value> = todo!(); - Ok(iter) + get_range::(self, range) } } -impl DatabaseRw for RedbTableRw<'_, '_, T::Key, T::Value> { +impl<'env, 'tx, T: Table + 'static> DatabaseRw<'env, 'tx, T> + for RedbTableRw<'env, 'tx, T::Key, T::Value> +{ + // `redb` returns the value after `insert()/remove()` + // we end with Ok(()) instead. + + #[inline] fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> { - todo!() - } - - fn clear(&mut self) -> Result<(), RuntimeError> { - todo!() + self.insert(Cow::Borrowed(key), Cow::Borrowed(value))?; + Ok(()) } + #[inline] fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> { - todo!() + self.remove(Cow::Borrowed(key))?; + Ok(()) } } diff --git a/database/src/backend/redb/env.rs b/database/src/backend/redb/env.rs index b5a455b0..11484981 100644 --- a/database/src/backend/redb/env.rs +++ b/database/src/backend/redb/env.rs @@ -1,15 +1,19 @@ //! Implementation of `trait Env` for `redb`. //---------------------------------------------------------------------------------------------------- Import -use std::{path::Path, sync::Arc}; +use std::{fmt::Debug, ops::Deref, path::Path, sync::Arc}; use crate::{ - backend::redb::types::{RedbTableRo, RedbTableRw}, + backend::redb::{ + storable::StorableRedb, + types::{RedbTableRo, RedbTableRw}, + }, config::{Config, SyncMode}, database::{DatabaseRo, DatabaseRw}, - env::Env, + env::{Env, EnvInner}, error::{InitError, RuntimeError}, table::Table, + TxRw, }; //---------------------------------------------------------------------------------------------------- ConcreteEnv @@ -30,6 +34,7 @@ pub struct ConcreteEnv { impl Drop for ConcreteEnv { fn drop(&mut self) { + // INVARIANT: drop(ConcreteEnv) must sync. if let Err(e) = self.sync() { // TODO: log error? } @@ -42,12 +47,13 @@ impl Drop for ConcreteEnv { impl Env for ConcreteEnv { const MANUAL_RESIZE: bool = false; const SYNCS_PER_TX: bool = false; - - type TxRo<'env> = redb::ReadTransaction<'env>; - type TxRw<'env> = redb::WriteTransaction<'env>; + type EnvInner<'env> = (&'env redb::Database, redb::Durability); + type TxRo<'tx> = redb::ReadTransaction<'tx>; + type TxRw<'tx> = redb::WriteTransaction<'tx>; #[cold] #[inline(never)] // called once. + #[allow(clippy::items_after_statements)] fn open(config: Config) -> Result { // TODO: dynamic syncs are not implemented. let durability = match config.sync_mode { @@ -61,13 +67,57 @@ impl Env for ConcreteEnv { SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(), }; - todo!() - } + let env_builder = redb::Builder::new(); - #[cold] - #[inline(never)] // called once in [`Env::open`]?` - fn create_tables(&self, tx_rw: &mut Self::TxRw<'_>) -> Result<(), RuntimeError> { - todo!() + // TODO: we can set cache sizes with: + // env_builder.set_cache(bytes); + + // Open the database file, create if needed. + let db_file = std::fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(config.db_file())?; + let mut env = env_builder.create_file(db_file)?; + + // Create all database tables. + // `redb` creates tables if they don't exist. + // + use crate::tables::{TestTable, TestTable2}; + let tx_rw = env.begin_write()?; + + // FIXME: + // These wonderful fully qualified trait types are brought + // to you by `tower::discover::Discover>::Key` collisions. + + // TODO: Create all tables when schema is done. + + /// Function that creates the tables based off the passed `T: Table`. + fn create_table(tx_rw: &redb::WriteTransaction<'_>) -> Result<(), InitError> { + let table: redb::TableDefinition< + 'static, + StorableRedb<::Key>, + StorableRedb<::Value>, + > = redb::TableDefinition::new(::NAME); + + // `redb` creates tables on open if not already created. + tx_rw.open_table(table)?; + Ok(()) + } + + create_table::(&tx_rw)?; + create_table::(&tx_rw)?; + tx_rw.commit()?; + + // Check for file integrity. + // TODO: should we do this? is it slow? + env.check_integrity()?; + + Ok(Self { + env, + config, + durability, + }) } fn config(&self) -> &Config { @@ -75,41 +125,65 @@ impl Env for ConcreteEnv { } fn sync(&self) -> Result<(), RuntimeError> { - todo!() + // `redb`'s syncs are tied with write transactions, + // so just create one, don't do anything and commit. + let mut tx_rw = self.env.begin_write()?; + tx_rw.set_durability(redb::Durability::Paranoid); + TxRw::commit(tx_rw) + } + + fn env_inner(&self) -> Self::EnvInner<'_> { + (&self.env, self.durability) + } +} + +//---------------------------------------------------------------------------------------------------- EnvInner Impl +impl<'env> EnvInner<'env, redb::ReadTransaction<'env>, redb::WriteTransaction<'env>> + for (&'env redb::Database, redb::Durability) +where + Self: 'env, +{ + #[inline] + fn tx_ro(&'env self) -> Result, RuntimeError> { + Ok(self.0.begin_read()?) } #[inline] - fn tx_ro(&self) -> Result, RuntimeError> { - todo!() - } - - #[inline] - fn tx_rw(&self) -> Result, RuntimeError> { + fn tx_rw(&'env self) -> Result, RuntimeError> { // `redb` has sync modes on the TX level, unlike heed, // which sets it at the Environment level. // // So, set the durability here before returning the TX. - let mut tx_rw = self.env.begin_write()?; - tx_rw.set_durability(self.durability); + let mut tx_rw = self.0.begin_write()?; + tx_rw.set_durability(self.1); Ok(tx_rw) } #[inline] - fn open_db_ro( + fn open_db_ro<'tx, T: Table>( &self, - tx_ro: &Self::TxRo<'_>, - ) -> Result, RuntimeError> { - let tx: RedbTableRo<'_, T::Key, T::Value> = todo!(); - Ok(tx) + tx_ro: &'tx redb::ReadTransaction<'env>, + ) -> Result, RuntimeError> { + // Open up a read-only database using our `T: Table`'s const metadata. + let table: redb::TableDefinition<'static, StorableRedb, StorableRedb> = + redb::TableDefinition::new(T::NAME); + + // INVARIANT: Our `?` error conversion will panic if the table does not exist. + Ok(tx_ro.open_table(table)?) } #[inline] - fn open_db_rw( + fn open_db_rw<'tx, T: Table>( &self, - tx_rw: &mut Self::TxRw<'_>, - ) -> Result, RuntimeError> { - let tx: RedbTableRw<'_, '_, T::Key, T::Value> = todo!(); - Ok(tx) + tx_rw: &'tx mut redb::WriteTransaction<'env>, + ) -> Result, RuntimeError> { + // Open up a read/write database using our `T: Table`'s const metadata. + let table: redb::TableDefinition<'static, StorableRedb, StorableRedb> = + redb::TableDefinition::new(T::NAME); + + // `redb` creates tables if they don't exist, so this should never panic. + // + Ok(tx_rw.open_table(table)?) } } diff --git a/database/src/backend/redb/error.rs b/database/src/backend/redb/error.rs index 025c5411..28cd6b7e 100644 --- a/database/src/backend/redb/error.rs +++ b/database/src/backend/redb/error.rs @@ -4,10 +4,13 @@ //! `redb`'s errors are `#[non_exhaustive]`... //---------------------------------------------------------------------------------------------------- Import -use crate::constants::DATABASE_CORRUPT_MSG; +use crate::{ + constants::DATABASE_CORRUPT_MSG, + error::{InitError, RuntimeError}, +}; -//---------------------------------------------------------------------------------------------------- DatabaseError -impl From for crate::InitError { +//---------------------------------------------------------------------------------------------------- InitError +impl From for InitError { /// Created by `redb` in: /// - [`redb::Database::open`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.open). fn from(error: redb::DatabaseError) -> Self { @@ -34,9 +37,67 @@ impl From for crate::InitError { } } -//---------------------------------------------------------------------------------------------------- TransactionError +impl From for InitError { + /// Created by `redb` in: + /// - [`redb::Database::open`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.check_integrity) + fn from(error: redb::StorageError) -> Self { + use redb::StorageError as E; + + match error { + E::Io(e) => Self::Io(e), + E::Corrupted(s) => Self::Corrupt, + // HACK: Handle new errors as `redb` adds them. + _ => Self::Unknown(Box::new(error)), + } + } +} + +impl From for InitError { + /// Created by `redb` in: + /// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write) + fn from(error: redb::TransactionError) -> Self { + use redb::StorageError as E; + + match error { + redb::TransactionError::Storage(error) => error.into(), + // HACK: Handle new errors as `redb` adds them. + _ => Self::Unknown(Box::new(error)), + } + } +} + +impl From for InitError { + /// Created by `redb` in: + /// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table) + fn from(error: redb::TableError) -> Self { + use redb::StorageError as E2; + use redb::TableError as E; + + match error { + E::Storage(error) => error.into(), + // HACK: Handle new errors as `redb` adds them. + _ => Self::Unknown(Box::new(error)), + } + } +} + +impl From for InitError { + /// Created by `redb` in: + /// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit) + fn from(error: redb::CommitError) -> Self { + use redb::StorageError as E; + + match error { + redb::CommitError::Storage(error) => error.into(), + // HACK: Handle new errors as `redb` adds them. + _ => Self::Unknown(Box::new(error)), + } + } +} + +//---------------------------------------------------------------------------------------------------- RuntimeError #[allow(clippy::fallible_impl_from)] // We need to panic sometimes. -impl From for crate::RuntimeError { +impl From for RuntimeError { /// Created by `redb` in: /// - [`redb::Database::begin_write`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_write) /// - [`redb::Database::begin_read`](https://docs.rs/redb/1.5.0/redb/struct.Database.html#method.begin_read) @@ -52,9 +113,24 @@ impl From for crate::RuntimeError { } } -//---------------------------------------------------------------------------------------------------- TableError #[allow(clippy::fallible_impl_from)] // We need to panic sometimes. -impl From for crate::RuntimeError { +impl From for RuntimeError { + /// Created by `redb` in: + /// - [`redb::WriteTransaction::commit`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.commit) + fn from(error: redb::CommitError) -> Self { + use redb::StorageError as E; + + match error { + redb::CommitError::Storage(error) => error.into(), + + // HACK: Handle new errors as `redb` adds them. + _ => unreachable!(), + } + } +} + +#[allow(clippy::fallible_impl_from)] // We need to panic sometimes. +impl From for RuntimeError { /// Created by `redb` in: /// - [`redb::WriteTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.WriteTransaction.html#method.open_table) /// - [`redb::ReadTransaction::open_table`](https://docs.rs/redb/1.5.0/redb/struct.ReadTransaction.html#method.open_table) @@ -79,9 +155,8 @@ impl From for crate::RuntimeError { } } -//---------------------------------------------------------------------------------------------------- StorageError #[allow(clippy::fallible_impl_from)] // We need to panic sometimes. -impl From for crate::RuntimeError { +impl From for RuntimeError { /// Created by `redb` in: /// - [`redb::Table`](https://docs.rs/redb/1.5.0/redb/struct.Table.html) functions /// - [`redb::ReadOnlyTable`](https://docs.rs/redb/1.5.0/redb/struct.ReadOnlyTable.html) functions diff --git a/database/src/backend/redb/storable.rs b/database/src/backend/redb/storable.rs index a021ec60..89b4bcac 100644 --- a/database/src/backend/redb/storable.rs +++ b/database/src/backend/redb/storable.rs @@ -1,23 +1,42 @@ //! `cuprate_database::Storable` <-> `redb` serde trait compatibility layer. //---------------------------------------------------------------------------------------------------- Use -use std::{any::Any, borrow::Cow, cmp::Ordering, marker::PhantomData}; +use std::{any::Any, borrow::Cow, cmp::Ordering, fmt::Debug, marker::PhantomData}; use redb::{RedbKey, RedbValue, TypeName}; use crate::{key::Key, storable::Storable}; //---------------------------------------------------------------------------------------------------- StorableRedb -/// The glue struct that implements `redb`'s (de)serialization +/// The glue structs that implements `redb`'s (de)serialization /// traits on any type that implements `cuprate_database::Key`. /// -/// Never actually gets constructed, just used for trait bound translations. +/// Never actually get constructed, just used for trait bound translations. #[derive(Debug)] -pub(super) struct StorableRedb(PhantomData); +pub(super) struct StorableRedb(PhantomData) +where + T: Storable + ?Sized; + +impl crate::value_guard::ValueGuard for redb::AccessGuard<'_, StorableRedb> { + #[inline] + fn unguard(&self) -> Cow<'_, T> { + self.value() + } +} + +impl crate::value_guard::ValueGuard for &redb::AccessGuard<'_, StorableRedb> { + #[inline] + fn unguard(&self) -> Cow<'_, T> { + self.value() + } +} //---------------------------------------------------------------------------------------------------- RedbKey // If `Key` is also implemented, this can act as a `RedbKey`. -impl RedbKey for StorableRedb { +impl RedbKey for StorableRedb +where + T: Key, +{ #[inline] fn compare(left: &[u8], right: &[u8]) -> Ordering { ::compare(left, right) @@ -25,8 +44,11 @@ impl RedbKey for StorableRedb { } //---------------------------------------------------------------------------------------------------- RedbValue -impl RedbValue for StorableRedb { - type SelfType<'a> = &'a T where Self: 'a; +impl RedbValue for StorableRedb +where + T: Storable + ?Sized, +{ + type SelfType<'a> = Cow<'a, T> where Self: 'a; type AsBytes<'a> = &'a [u8] where Self: 'a; #[inline] @@ -35,11 +57,18 @@ impl RedbValue for StorableRedb { } #[inline] - fn from_bytes<'a>(data: &'a [u8]) -> &'a T + fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> where Self: 'a, { - ::from_bytes(data) + // Use the bytes directly if possible... + if T::ALIGN == 1 { + Cow::Borrowed(::from_bytes(data)) + // ...else, make sure the bytes are aligned + // when casting by allocating a new buffer. + } else { + ::from_bytes_unaligned(data) + } } #[inline] @@ -47,7 +76,7 @@ impl RedbValue for StorableRedb { where Self: 'a + 'b, { - ::as_bytes(value) + ::as_bytes(value.as_ref()) } #[inline] @@ -70,12 +99,15 @@ mod test { #[test] /// Assert `RedbKey::compare` works for `StorableRedb`. fn compare() { - fn test(left: T, right: T, expected: Ordering) { + fn test(left: T, right: T, expected: Ordering) + where + T: Key, + { println!("left: {left:?}, right: {right:?}, expected: {expected:?}"); assert_eq!( as RedbKey>::compare( - as RedbValue>::as_bytes(&&left), - as RedbValue>::as_bytes(&&right) + as RedbValue>::as_bytes(&Cow::Borrowed(&left)), + as RedbValue>::as_bytes(&Cow::Borrowed(&right)) ), expected ); @@ -90,7 +122,10 @@ mod test { #[test] /// Assert `RedbKey::fixed_width` is accurate. fn fixed_width() { - fn test(expected: Option) { + fn test(expected: Option) + where + T: Storable + ?Sized, + { assert_eq!( as RedbValue>::fixed_width(), expected); } @@ -113,9 +148,15 @@ mod test { #[test] /// Assert `RedbKey::as_bytes` is accurate. fn as_bytes() { - fn test(t: &T, expected: &[u8]) { + fn test(t: &T, expected: &[u8]) + where + T: Storable + ?Sized, + { println!("t: {t:?}, expected: {expected:?}"); - assert_eq!( as RedbValue>::as_bytes(&t), expected); + assert_eq!( + as RedbValue>::as_bytes(&Cow::Borrowed(t)), + expected + ); } test::<()>(&(), &[]); @@ -137,11 +178,14 @@ mod test { #[test] /// Assert `RedbKey::from_bytes` is accurate. fn from_bytes() { - fn test(bytes: &[u8], expected: &T) { + fn test(bytes: &[u8], expected: &T) + where + T: Storable + PartialEq + ?Sized, + { println!("bytes: {bytes:?}, expected: {expected:?}"); assert_eq!( as RedbValue>::from_bytes(bytes), - expected + Cow::Borrowed(expected) ); } diff --git a/database/src/backend/redb/transaction.rs b/database/src/backend/redb/transaction.rs index b3f007e8..758a11a8 100644 --- a/database/src/backend/redb/transaction.rs +++ b/database/src/backend/redb/transaction.rs @@ -10,19 +10,22 @@ use crate::{ //---------------------------------------------------------------------------------------------------- TxRo impl TxRo<'_> for redb::ReadTransaction<'_> { + /// This function is infallible. fn commit(self) -> Result<(), RuntimeError> { - todo!() + // `redb`'s read transactions cleanup in their `drop()`, there is no `commit()`. + // https://docs.rs/redb/latest/src/redb/transactions.rs.html#1258-1265 + Ok(()) } } //---------------------------------------------------------------------------------------------------- TxRw impl TxRw<'_> for redb::WriteTransaction<'_> { fn commit(self) -> Result<(), RuntimeError> { - todo!() + Ok(self.commit()?) } - fn abort(self) { - todo!() + fn abort(self) -> Result<(), RuntimeError> { + Ok(self.abort()?) } } diff --git a/database/src/backend/tests.rs b/database/src/backend/tests.rs new file mode 100644 index 00000000..1875c566 --- /dev/null +++ b/database/src/backend/tests.rs @@ -0,0 +1,192 @@ +//! Tests for `cuprate_database`'s backends. +//! +//! These tests are fully trait-based, meaning there +//! is no reference to `backend/`-specific types. +//! +//! As such, which backend is tested is +//! dependant on the feature flags used. +//! +//! | Feature flag | Tested backend | +//! |---------------|----------------| +//! | Only `redb` | `redb` +//! | Anything else | `heed` +//! +//! `redb`, and it only must be enabled for it to be tested. + +//---------------------------------------------------------------------------------------------------- Import +use std::borrow::{Borrow, Cow}; + +use crate::{ + config::{Config, SyncMode}, + database::{DatabaseRo, DatabaseRw}, + env::{Env, EnvInner}, + error::{InitError, RuntimeError}, + resize::ResizeAlgorithm, + table::Table, + tables::{TestTable, TestTable2}, + transaction::{TxRo, TxRw}, + types::TestType, + value_guard::ValueGuard, + ConcreteEnv, +}; + +//---------------------------------------------------------------------------------------------------- Tests +/// Create an `Env` in a temporarily directory. +/// The directory is automatically removed after the `TempDir` is dropped. +/// +/// TODO: changing this to `-> impl Env` causes lifetime errors... +fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) { + let tempdir = tempfile::tempdir().unwrap(); + let config = Config::low_power(Some(tempdir.path().into())); + let env = ConcreteEnv::open(config).unwrap(); + + (env, tempdir) +} + +/// Simply call [`Env::open`]. If this fails, something is really wrong. +#[test] +fn open() { + tmp_concrete_env(); +} + +/// Create database transactions, but don't write any data. +#[test] +fn tx() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + + TxRo::commit(env_inner.tx_ro().unwrap()).unwrap(); + TxRw::commit(env_inner.tx_rw().unwrap()).unwrap(); + TxRw::abort(env_inner.tx_rw().unwrap()).unwrap(); +} + +/// Open (and verify) that all database tables +/// exist already after calling [`Env::open`]. +#[test] +#[allow(clippy::items_after_statements, clippy::significant_drop_tightening)] +fn open_db() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro().unwrap(); + let mut tx_rw = env_inner.tx_rw().unwrap(); + + // Open all tables in read-only mode. + // This should be updated when tables are modified. + env_inner.open_db_ro::(&tx_ro).unwrap(); + env_inner.open_db_ro::(&tx_ro).unwrap(); + TxRo::commit(tx_ro).unwrap(); + + // Open all tables in read/write mode. + env_inner.open_db_rw::(&mut tx_rw).unwrap(); + env_inner.open_db_rw::(&mut tx_rw).unwrap(); + TxRw::commit(tx_rw).unwrap(); +} + +/// Test `Env` resizes. +#[test] +fn resize() { + // This test is only valid for `Env`'s that need to resize manually. + if !ConcreteEnv::MANUAL_RESIZE { + return; + } + + let (env, _tempdir) = tmp_concrete_env(); + + // Resize by the OS page size. + let page_size = crate::resize::page_size(); + let old_size = env.current_map_size(); + env.resize_map(Some(ResizeAlgorithm::FixedBytes(page_size))); + + // Assert it resized exactly by the OS page size. + let new_size = env.current_map_size(); + assert_eq!(new_size, old_size + page_size.get()); +} + +/// Test that `Env`'s that don't manually resize. +#[test] +#[should_panic = "unreachable"] +fn non_manual_resize_1() { + if ConcreteEnv::MANUAL_RESIZE { + unreachable!(); + } else { + let (env, _tempdir) = tmp_concrete_env(); + env.resize_map(None); + } +} +#[test] +#[should_panic = "unreachable"] +fn non_manual_resize_2() { + if ConcreteEnv::MANUAL_RESIZE { + unreachable!(); + } else { + let (env, _tempdir) = tmp_concrete_env(); + env.current_map_size(); + } +} + +/// Test all `DatabaseR{o,w}` operations. +#[test] +#[allow( + clippy::items_after_statements, + clippy::significant_drop_tightening, + clippy::used_underscore_binding +)] +fn db_read_write() { + let (env, _tempdir) = tmp_concrete_env(); + let env_inner = env.env_inner(); + let mut tx_rw = env_inner.tx_rw().unwrap(); + let mut table = env_inner.open_db_rw::(&mut tx_rw).unwrap(); + + const KEY: i64 = 0_i64; + const VALUE: TestType = TestType { + u: 1, + b: 255, + _pad: [0; 7], + }; + + // Insert `0..100` keys. + for i in 0..100 { + table.put(&(KEY + i), &VALUE).unwrap(); + } + + // Assert the 1st key is there. + { + let guard = table.get(&KEY).unwrap(); + let cow: Cow<'_, TestType> = guard.unguard(); + let value: &TestType = cow.as_ref(); + + // Make sure all field accesses are aligned. + assert_eq!(value, &VALUE); + assert_eq!(value.u, VALUE.u); + assert_eq!(value.b, VALUE.b); + assert_eq!(value._pad, VALUE._pad); + } + + // Assert the whole range is there. + { + let range = table.get_range(&..).unwrap(); + let mut i = 0; + for result in range { + let guard = result.unwrap(); + let cow: Cow<'_, TestType> = guard.unguard(); + let value: &TestType = cow.as_ref(); + + assert_eq!(value, &VALUE); + assert_eq!(value.u, VALUE.u); + assert_eq!(value.b, VALUE.b); + assert_eq!(value._pad, VALUE._pad); + + i += 1; + } + assert_eq!(i, 100); + } + + // Assert `get_range()` works. + let range = KEY..(KEY + 100); + assert_eq!(100, table.get_range(&range).unwrap().count()); + + // Assert deleting works. + table.delete(&KEY).unwrap(); + let value = table.get(&KEY); + assert!(matches!(value, Err(RuntimeError::KeyNotFound))); +} diff --git a/database/src/config.rs b/database/src/config.rs deleted file mode 100644 index d65b8943..00000000 --- a/database/src/config.rs +++ /dev/null @@ -1,463 +0,0 @@ -//! Database [`Env`](crate::Env) configuration. -//! -//! This module contains the main [`Config`]uration struct -//! for the database [`Env`](crate::Env)ironment, and data -//! structures related to any configuration setting. -//! -//! These configurations are processed at runtime, meaning -//! the `Env` can/will dynamically adjust its behavior -//! based on these values. - -//---------------------------------------------------------------------------------------------------- Import -use std::{ - borrow::Cow, - num::NonZeroUsize, - path::{Path, PathBuf}, -}; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -use cuprate_helper::fs::cuprate_database_dir; - -use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm}; - -//---------------------------------------------------------------------------------------------------- Config -/// Database [`Env`](crate::Env) configuration. -/// -/// This is the struct passed to [`Env::open`](crate::Env::open) that -/// allows the database to be configured in various ways. -/// -/// TODO: there's probably more options to add. -#[derive(Debug, Clone, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Config { - //------------------------ Database PATHs - // These are private since we don't want - // users messing with them after construction. - /// The directory used to store all database files. - /// - /// By default, if no value is provided in the [`Config`] - /// constructor functions, this will be [`cuprate_database_dir`]. - pub(crate) db_directory: Cow<'static, Path>, - /// The actual database data file. - /// - /// This is private, and created from the above `db_directory`. - pub(crate) db_file: Cow<'static, Path>, - - /// Disk synchronization mode. - pub sync_mode: SyncMode, - - /// Database reader thread count. - pub reader_threads: ReaderThreads, - - /// Database memory map resizing algorithm. - /// - /// This is used as the default fallback, but - /// custom algorithms can be used as well with - /// [`Env::resize_map`](crate::Env::resize_map). - pub resize_algorithm: ResizeAlgorithm, -} - -impl Config { - /// Private function to acquire [`Config::db_file`] - /// from the user provided (or default) [`Config::db_directory`]. - /// - /// As the database data file PATH is just the directory + the filename, - /// we only need the directory from the user/Config, and can add it here. - fn return_db_dir_and_file( - db_directory: Option, - ) -> (Cow<'static, Path>, Cow<'static, Path>) { - // INVARIANT: all PATH safety checks are done - // in `helper::fs`. No need to do them here. - let db_directory = - db_directory.map_or_else(|| Cow::Borrowed(cuprate_database_dir()), Cow::Owned); - - // Add the database filename to the directory. - let mut db_file = db_directory.to_path_buf(); - db_file.push(DATABASE_DATA_FILENAME); - - (db_directory, Cow::Owned(db_file)) - } - - /// Create a new [`Config`] with sane default settings. - /// - /// # `db_directory` - /// If this is `Some`, it will be used as the - /// directory that contains all database files. - /// - /// If `None`, it will use the default directory [`cuprate_database_dir`]. - pub fn new(db_directory: Option) -> Self { - let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory); - Self { - db_directory, - db_file, - sync_mode: SyncMode::FastThenSafe, - reader_threads: ReaderThreads::OnePerThread, - resize_algorithm: ResizeAlgorithm::new(), - } - } - - /// Create a [`Config`] with the highest performing, - /// but also most resource-intensive & maybe risky settings. - /// - /// Good default for testing, and resource-available machines. - /// - /// # `db_directory` - /// If this is `Some`, it will be used as the - /// directory that contains all database files. - /// - /// If `None`, it will use the default directory [`cuprate_database_dir`]. - pub fn fast(db_directory: Option) -> Self { - let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory); - Self { - db_directory, - db_file, - sync_mode: SyncMode::Fast, - reader_threads: ReaderThreads::OnePerThread, - resize_algorithm: ResizeAlgorithm::new(), - } - } - - /// Create a [`Config`] with the lowest performing, - /// but also least resource-intensive settings. - /// - /// Good default for resource-limited machines, e.g. a cheap VPS. - /// - /// # `db_directory` - /// If this is `Some`, it will be used as the - /// directory that contains all database files. - /// - /// If `None`, it will use the default directory [`cuprate_database_dir`]. - pub fn low_power(db_directory: Option) -> Self { - let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory); - Self { - db_directory, - db_file, - sync_mode: SyncMode::FastThenSafe, - reader_threads: ReaderThreads::One, - resize_algorithm: ResizeAlgorithm::new(), - } - } - - /// Return the absolute [`Path`] to the database directory. - /// - /// This will be the `db_directory` given - /// (or default) during [`Config`] construction. - pub const fn db_directory(&self) -> &Cow<'_, Path> { - &self.db_directory - } - - /// Return the absolute [`Path`] to the database data file. - /// - /// This will be based off the `db_directory` given - /// (or default) during [`Config`] construction. - pub const fn db_file(&self) -> &Cow<'_, Path> { - &self.db_file - } -} - -impl Default for Config { - /// Same as `Self::new(None)`. - /// - /// ```rust - /// # use cuprate_database::config::*; - /// assert_eq!(Config::default(), Config::new(None)); - /// ``` - fn default() -> Self { - Self::new(None) - } -} - -//---------------------------------------------------------------------------------------------------- SyncMode -/// Disk synchronization mode. -/// -/// This controls how/when the database syncs its data to disk. -/// -/// Regardless of the variant chosen, dropping [`Env`](crate::Env) -/// will always cause it to fully sync to disk. -/// -/// # Sync vs Async -/// All invariants except [`SyncMode::Async`] & [`SyncMode::Fast`] -/// are `synchronous`, as in the database will wait until the OS has -/// finished syncing all the data to disk before continuing. -/// -/// `SyncMode::Async` & `SyncMode::Fast` are `asynchronous`, meaning -/// the database will _NOT_ wait until the data is fully synced to disk -/// before continuing. Note that this doesn't mean the database itself -/// won't be synchronized between readers/writers, but rather that the -/// data _on disk_ may not be immediately synchronized after a write. -/// -/// Something like: -/// ```rust,ignore -/// db.put("key", value); -/// db.get("key"); -/// ``` -/// will be fine, most likely pulling from memory instead of disk. -/// -/// # TODO -/// Dynamic sync's are not yet supported. -/// -/// Only: -/// -/// - [`SyncMode::Safe`] -/// - [`SyncMode::Async`] -/// - [`SyncMode::Fast`] -/// -/// are supported, all other variants will panic on [`crate::Env::open`]. -#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum SyncMode { - /// Use [`SyncMode::Fast`] until fully synced, - /// then use [`SyncMode::Safe`]. - /// - /// # TODO: how to implement this? - /// ref: - /// monerod-solution: - /// cuprate-issue: - /// - /// We could: - /// ```rust,ignore - /// if current_db_block <= top_block.saturating_sub(N) { - /// // don't sync() - /// } else { - /// // sync() - /// } - /// ``` - /// where N is some threshold we pick that is _close_ enough - /// to being synced where we want to start being safer. - /// - /// Essentially, when we are in a certain % range of being finished, - /// switch to safe mode, until then, go fast. - #[default] - FastThenSafe, - - /// Fully sync to disk per transaction. - /// - /// Every database transaction commit will - /// fully sync all data to disk, _synchronously_, - /// so the database (writer) halts until synced. - /// - /// This is expected to be very slow. - /// - /// This matches: - /// - LMDB without any special sync flags - /// - [`redb::Durability::Immediate`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Immediate) - Safe, - - /// Asynchrously sync to disk per transaction. - /// - /// This is the same as [`SyncMode::Safe`], - /// but the syncs will be asynchronous, i.e. - /// each transaction commit will sync to disk, - /// but only eventually, not necessarily immediately. - /// - /// This matches: - /// - [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) - /// - [`redb::Durability::Eventual`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Eventual) - Async, - - /// Fully sync to disk after we cross this transaction threshold. - /// - /// After committing [`usize`] amount of database - /// transactions, it will be sync to disk. - /// - /// `0` behaves the same as [`SyncMode::Safe`], and a ridiculously large - /// number like `usize::MAX` is practically the same as [`SyncMode::Fast`]. - Threshold(usize), - - /// Only flush at database shutdown. - /// - /// This is the fastest, yet unsafest option. - /// - /// It will cause the database to never _actively_ sync, - /// letting the OS decide when to flush data to disk. - /// - /// This matches: - /// - [`MDB_NOSYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#ga5791dd1adb09123f82dd1f331209e12e) + [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) - /// - [`redb::Durability::None`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.None) - /// - /// `monerod` reference: - /// - /// # Corruption - /// In the case of a system crash, the database - /// may become corrupted when using this option. - // - // TODO: we could call this `unsafe` - // and use that terminology in the config file - // so users know exactly what they are getting - // themselves into. - Fast, -} - -//---------------------------------------------------------------------------------------------------- ReaderThreads -/// Amount of database reader threads to spawn. -/// -/// This controls how many reader thread [`crate::service`]'s -/// thread-pool will spawn to receive and send requests/responses. -/// -/// It will always be at least 1, up until the amount of threads on the machine. -/// -/// The main function used to extract an actual -/// usable thread count out of this is [`ReaderThreads::as_threads`]. -#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum ReaderThreads { - #[default] - /// Spawn 1 reader thread per available thread on the machine. - /// - /// For example, a `16-core, 32-thread` Ryzen 5950x will - /// spawn `32` reader threads using this setting. - OnePerThread, - - /// Only spawn 1 reader thread. - One, - - /// Spawn a specified amount of reader threads. - /// - /// Note that no matter how large this value, it will be - /// ultimately capped at the amount of system threads. - /// - /// # `0` - /// `ReaderThreads::Number(0)` represents "use maximum value", - /// as such, it is equal to [`ReaderThreads::OnePerThread`]. - /// - /// ```rust - /// # use cuprate_database::config::*; - /// let reader_threads = ReaderThreads::from(0_usize); - /// assert!(matches!(reader_threads, ReaderThreads::OnePerThread)); - /// ``` - Number(usize), - - /// Spawn a specified % of reader threads. - /// - /// This must be a value in-between `0.0..1.0` - /// where `1.0` represents [`ReaderThreads::OnePerThread`]. - /// - /// # Example - /// For example, using a `16-core, 32-thread` Ryzen 5950x CPU: - /// - /// | Input | Total thread used | - /// |------------------------------------|-------------------| - /// | `ReaderThreads::Percent(0.0)` | 32 (maximum value) - /// | `ReaderThreads::Percent(0.5)` | 16 - /// | `ReaderThreads::Percent(0.75)` | 24 - /// | `ReaderThreads::Percent(1.0)` | 32 - /// | `ReaderThreads::Percent(2.0)` | 32 (saturating) - /// | `ReaderThreads::Percent(f32::NAN)` | 32 (non-normal default) - /// - /// # `0.0` - /// `ReaderThreads::Percent(0.0)` represents "use maximum value", - /// as such, it is equal to [`ReaderThreads::OnePerThread`]. - /// - /// # Not quite `0.0` - /// If the thread count multiplied by the percentage ends up being - /// non-zero, but not 1 thread, the minimum value 1 will be returned. - /// - /// ```rust - /// # use cuprate_database::config::*; - /// assert_eq!(ReaderThreads::Percent(0.000000001).as_threads().get(), 1); - /// ``` - Percent(f32), -} - -impl ReaderThreads { - /// This converts [`ReaderThreads`] into a safe, usable - /// number representing how many threads to spawn. - /// - /// This function will always return a number in-between `1..=total_thread_count`. - /// - /// It uses [`cuprate_helper::thread::threads()`] internally to determine the total thread count. - /// - /// # Example - /// ```rust - /// use cuprate_database::config::ReaderThreads as Rt; - /// - /// let total_threads: std::num::NonZeroUsize = - /// cuprate_helper::thread::threads(); - /// - /// assert_eq!(Rt::OnePerThread.as_threads(), total_threads); - /// - /// assert_eq!(Rt::One.as_threads().get(), 1); - /// - /// assert_eq!(Rt::Number(0).as_threads(), total_threads); - /// assert_eq!(Rt::Number(1).as_threads().get(), 1); - /// assert_eq!(Rt::Number(usize::MAX).as_threads(), total_threads); - /// - /// assert_eq!(Rt::Percent(0.01).as_threads().get(), 1); - /// assert_eq!(Rt::Percent(0.0).as_threads(), total_threads); - /// assert_eq!(Rt::Percent(1.0).as_threads(), total_threads); - /// assert_eq!(Rt::Percent(f32::NAN).as_threads(), total_threads); - /// assert_eq!(Rt::Percent(f32::INFINITY).as_threads(), total_threads); - /// assert_eq!(Rt::Percent(f32::NEG_INFINITY).as_threads(), total_threads); - /// - /// // Percentage only works on more than 1 thread. - /// if total_threads.get() > 1 { - /// assert_eq!( - /// Rt::Percent(0.5).as_threads().get(), - /// (total_threads.get() as f32 / 2.0) as usize, - /// ); - /// } - /// ``` - // - // INVARIANT: - // LMDB will error if we input zero, so don't allow that. - // - pub fn as_threads(&self) -> NonZeroUsize { - let total_threads = cuprate_helper::thread::threads(); - - match self { - Self::OnePerThread => total_threads, // use all threads - Self::One => NonZeroUsize::MIN, // one - Self::Number(n) => match NonZeroUsize::new(*n) { - Some(n) => std::cmp::min(n, total_threads), // saturate at total threads - None => total_threads, // 0 == maximum value - }, - - // We handle the casting loss. - #[allow( - clippy::cast_precision_loss, - clippy::cast_possible_truncation, - clippy::cast_sign_loss - )] - Self::Percent(f) => { - // If non-normal float, use the default (all threads). - if !f.is_normal() || !(0.0..=1.0).contains(f) { - return total_threads; - } - - // 0.0 == maximum value. - if *f == 0.0 { - return total_threads; - } - - // Calculate percentage of total threads. - let thread_percent = (total_threads.get() as f32) * f; - match NonZeroUsize::new(thread_percent as usize) { - Some(n) => std::cmp::min(n, total_threads), // saturate at total threads. - None => { - // We checked for `0.0` above, so what this - // being 0 means that the percentage was _so_ - // low it made our thread count something like - // 0.99. In this case, just use 1 thread. - NonZeroUsize::MIN - } - } - } - } - } -} - -impl> From for ReaderThreads { - /// Create a [`ReaderThreads::Number`]. - /// - /// If `value` is `0`, this will return [`ReaderThreads::OnePerThread`]. - fn from(value: T) -> Self { - let u: usize = value.into(); - if u == 0 { - Self::OnePerThread - } else { - Self::Number(u) - } - } -} diff --git a/database/src/config/backend.rs b/database/src/config/backend.rs new file mode 100644 index 00000000..ed826344 --- /dev/null +++ b/database/src/config/backend.rs @@ -0,0 +1,31 @@ +//! TODO + +//---------------------------------------------------------------------------------------------------- Import +use std::{ + borrow::Cow, + num::NonZeroUsize, + path::{Path, PathBuf}, +}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use cuprate_helper::fs::cuprate_database_dir; + +use crate::{ + config::{ReaderThreads, SyncMode}, + constants::DATABASE_DATA_FILENAME, + resize::ResizeAlgorithm, +}; + +//---------------------------------------------------------------------------------------------------- Backend +/// TODO +#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Backend { + #[default] + /// TODO + Heed, + /// TODO + Redb, +} diff --git a/database/src/config/config.rs b/database/src/config/config.rs new file mode 100644 index 00000000..1791a540 --- /dev/null +++ b/database/src/config/config.rs @@ -0,0 +1,177 @@ +//! Database [`Env`](crate::Env) configuration. +//! +//! This module contains the main [`Config`]uration struct +//! for the database [`Env`](crate::Env)ironment, and data +//! structures related to any configuration setting. +//! +//! These configurations are processed at runtime, meaning +//! the `Env` can/will dynamically adjust its behavior +//! based on these values. + +//---------------------------------------------------------------------------------------------------- Import +use std::{ + borrow::Cow, + num::NonZeroUsize, + path::{Path, PathBuf}, +}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use cuprate_helper::fs::cuprate_database_dir; + +use crate::{ + config::{ReaderThreads, SyncMode}, + constants::DATABASE_DATA_FILENAME, + resize::ResizeAlgorithm, +}; + +//---------------------------------------------------------------------------------------------------- Config +/// Database [`Env`](crate::Env) configuration. +/// +/// This is the struct passed to [`Env::open`](crate::Env::open) that +/// allows the database to be configured in various ways. +/// +/// TODO: there's probably more options to add. +#[derive(Debug, Clone, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Config { + //------------------------ Database PATHs + // These are private since we don't want + // users messing with them after construction. + /// The directory used to store all database files. + /// + /// By default, if no value is provided in the [`Config`] + /// constructor functions, this will be [`cuprate_database_dir`]. + /// + /// TODO: we should also support `/etc/cuprated.conf`. + /// This could be represented with an `enum DbPath { Default, Custom, Etc, }` + pub(crate) db_directory: Cow<'static, Path>, + /// The actual database data file. + /// + /// This is private, and created from the above `db_directory`. + pub(crate) db_file: Cow<'static, Path>, + + /// Disk synchronization mode. + pub sync_mode: SyncMode, + + /// Database reader thread count. + pub reader_threads: ReaderThreads, + + /// Database memory map resizing algorithm. + /// + /// This is used as the default fallback, but + /// custom algorithms can be used as well with + /// [`Env::resize_map`](crate::Env::resize_map). + pub resize_algorithm: ResizeAlgorithm, +} + +impl Config { + /// Private function to acquire [`Config::db_file`] + /// from the user provided (or default) [`Config::db_directory`]. + /// + /// As the database data file PATH is just the directory + the filename, + /// we only need the directory from the user/Config, and can add it here. + fn return_db_dir_and_file( + db_directory: Option, + ) -> (Cow<'static, Path>, Cow<'static, Path>) { + // INVARIANT: all PATH safety checks are done + // in `helper::fs`. No need to do them here. + let db_directory = + db_directory.map_or_else(|| Cow::Borrowed(cuprate_database_dir()), Cow::Owned); + + // Add the database filename to the directory. + let mut db_file = db_directory.to_path_buf(); + db_file.push(DATABASE_DATA_FILENAME); + + (db_directory, Cow::Owned(db_file)) + } + + /// Create a new [`Config`] with sane default settings. + /// + /// # `db_directory` + /// If this is `Some`, it will be used as the + /// directory that contains all database files. + /// + /// If `None`, it will use the default directory [`cuprate_database_dir`]. + pub fn new(db_directory: Option) -> Self { + let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory); + Self { + db_directory, + db_file, + sync_mode: SyncMode::default(), + reader_threads: ReaderThreads::OnePerThread, + resize_algorithm: ResizeAlgorithm::default(), + } + } + + /// Create a [`Config`] with the highest performing, + /// but also most resource-intensive & maybe risky settings. + /// + /// Good default for testing, and resource-available machines. + /// + /// # `db_directory` + /// If this is `Some`, it will be used as the + /// directory that contains all database files. + /// + /// If `None`, it will use the default directory [`cuprate_database_dir`]. + pub fn fast(db_directory: Option) -> Self { + let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory); + Self { + db_directory, + db_file, + sync_mode: SyncMode::Fast, + reader_threads: ReaderThreads::OnePerThread, + resize_algorithm: ResizeAlgorithm::default(), + } + } + + /// Create a [`Config`] with the lowest performing, + /// but also least resource-intensive settings. + /// + /// Good default for resource-limited machines, e.g. a cheap VPS. + /// + /// # `db_directory` + /// If this is `Some`, it will be used as the + /// directory that contains all database files. + /// + /// If `None`, it will use the default directory [`cuprate_database_dir`]. + pub fn low_power(db_directory: Option) -> Self { + let (db_directory, db_file) = Self::return_db_dir_and_file(db_directory); + Self { + db_directory, + db_file, + sync_mode: SyncMode::default(), + reader_threads: ReaderThreads::One, + resize_algorithm: ResizeAlgorithm::default(), + } + } + + /// Return the absolute [`Path`] to the database directory. + /// + /// This will be the `db_directory` given + /// (or default) during [`Config`] construction. + pub const fn db_directory(&self) -> &Cow<'_, Path> { + &self.db_directory + } + + /// Return the absolute [`Path`] to the database data file. + /// + /// This will be based off the `db_directory` given + /// (or default) during [`Config`] construction. + pub const fn db_file(&self) -> &Cow<'_, Path> { + &self.db_file + } +} + +impl Default for Config { + /// Same as `Self::new(None)`. + /// + /// ```rust + /// # use cuprate_database::config::*; + /// assert_eq!(Config::default(), Config::new(None)); + /// ``` + fn default() -> Self { + Self::new(None) + } +} diff --git a/database/src/config/mod.rs b/database/src/config/mod.rs new file mode 100644 index 00000000..a8da828c --- /dev/null +++ b/database/src/config/mod.rs @@ -0,0 +1,10 @@ +//! TODO + +mod config; +pub use config::Config; + +mod reader_threads; +pub use reader_threads::ReaderThreads; + +mod sync_mode; +pub use sync_mode::SyncMode; diff --git a/database/src/config/reader_threads.rs b/database/src/config/reader_threads.rs new file mode 100644 index 00000000..0dc35581 --- /dev/null +++ b/database/src/config/reader_threads.rs @@ -0,0 +1,195 @@ +//! Database [`Env`](crate::Env) configuration. +//! +//! This module contains the main [`Config`]uration struct +//! for the database [`Env`](crate::Env)ironment, and data +//! structures related to any configuration setting. +//! +//! These configurations are processed at runtime, meaning +//! the `Env` can/will dynamically adjust its behavior +//! based on these values. + +//---------------------------------------------------------------------------------------------------- Import +use std::{ + borrow::Cow, + num::NonZeroUsize, + path::{Path, PathBuf}, +}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use cuprate_helper::fs::cuprate_database_dir; + +use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm}; + +//---------------------------------------------------------------------------------------------------- ReaderThreads +/// Amount of database reader threads to spawn. +/// +/// This controls how many reader thread [`crate::service`]'s +/// thread-pool will spawn to receive and send requests/responses. +/// +/// It will always be at least 1, up until the amount of threads on the machine. +/// +/// The main function used to extract an actual +/// usable thread count out of this is [`ReaderThreads::as_threads`]. +#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum ReaderThreads { + #[default] + /// Spawn 1 reader thread per available thread on the machine. + /// + /// For example, a `16-core, 32-thread` Ryzen 5950x will + /// spawn `32` reader threads using this setting. + OnePerThread, + + /// Only spawn 1 reader thread. + One, + + /// Spawn a specified amount of reader threads. + /// + /// Note that no matter how large this value, it will be + /// ultimately capped at the amount of system threads. + /// + /// # `0` + /// `ReaderThreads::Number(0)` represents "use maximum value", + /// as such, it is equal to [`ReaderThreads::OnePerThread`]. + /// + /// ```rust + /// # use cuprate_database::config::*; + /// let reader_threads = ReaderThreads::from(0_usize); + /// assert!(matches!(reader_threads, ReaderThreads::OnePerThread)); + /// ``` + Number(usize), + + /// Spawn a specified % of reader threads. + /// + /// This must be a value in-between `0.0..1.0` + /// where `1.0` represents [`ReaderThreads::OnePerThread`]. + /// + /// # Example + /// For example, using a `16-core, 32-thread` Ryzen 5950x CPU: + /// + /// | Input | Total thread used | + /// |------------------------------------|-------------------| + /// | `ReaderThreads::Percent(0.0)` | 32 (maximum value) + /// | `ReaderThreads::Percent(0.5)` | 16 + /// | `ReaderThreads::Percent(0.75)` | 24 + /// | `ReaderThreads::Percent(1.0)` | 32 + /// | `ReaderThreads::Percent(2.0)` | 32 (saturating) + /// | `ReaderThreads::Percent(f32::NAN)` | 32 (non-normal default) + /// + /// # `0.0` + /// `ReaderThreads::Percent(0.0)` represents "use maximum value", + /// as such, it is equal to [`ReaderThreads::OnePerThread`]. + /// + /// # Not quite `0.0` + /// If the thread count multiplied by the percentage ends up being + /// non-zero, but not 1 thread, the minimum value 1 will be returned. + /// + /// ```rust + /// # use cuprate_database::config::*; + /// assert_eq!(ReaderThreads::Percent(0.000000001).as_threads().get(), 1); + /// ``` + Percent(f32), +} + +impl ReaderThreads { + /// This converts [`ReaderThreads`] into a safe, usable + /// number representing how many threads to spawn. + /// + /// This function will always return a number in-between `1..=total_thread_count`. + /// + /// It uses [`cuprate_helper::thread::threads()`] internally to determine the total thread count. + /// + /// # Example + /// ```rust + /// use cuprate_database::config::ReaderThreads as Rt; + /// + /// let total_threads: std::num::NonZeroUsize = + /// cuprate_helper::thread::threads(); + /// + /// assert_eq!(Rt::OnePerThread.as_threads(), total_threads); + /// + /// assert_eq!(Rt::One.as_threads().get(), 1); + /// + /// assert_eq!(Rt::Number(0).as_threads(), total_threads); + /// assert_eq!(Rt::Number(1).as_threads().get(), 1); + /// assert_eq!(Rt::Number(usize::MAX).as_threads(), total_threads); + /// + /// assert_eq!(Rt::Percent(0.01).as_threads().get(), 1); + /// assert_eq!(Rt::Percent(0.0).as_threads(), total_threads); + /// assert_eq!(Rt::Percent(1.0).as_threads(), total_threads); + /// assert_eq!(Rt::Percent(f32::NAN).as_threads(), total_threads); + /// assert_eq!(Rt::Percent(f32::INFINITY).as_threads(), total_threads); + /// assert_eq!(Rt::Percent(f32::NEG_INFINITY).as_threads(), total_threads); + /// + /// // Percentage only works on more than 1 thread. + /// if total_threads.get() > 1 { + /// assert_eq!( + /// Rt::Percent(0.5).as_threads().get(), + /// (total_threads.get() as f32 / 2.0) as usize, + /// ); + /// } + /// ``` + // + // INVARIANT: + // LMDB will error if we input zero, so don't allow that. + // + pub fn as_threads(&self) -> NonZeroUsize { + let total_threads = cuprate_helper::thread::threads(); + + match self { + Self::OnePerThread => total_threads, // use all threads + Self::One => NonZeroUsize::MIN, // one + Self::Number(n) => match NonZeroUsize::new(*n) { + Some(n) => std::cmp::min(n, total_threads), // saturate at total threads + None => total_threads, // 0 == maximum value + }, + + // We handle the casting loss. + #[allow( + clippy::cast_precision_loss, + clippy::cast_possible_truncation, + clippy::cast_sign_loss + )] + Self::Percent(f) => { + // If non-normal float, use the default (all threads). + if !f.is_normal() || !(0.0..=1.0).contains(f) { + return total_threads; + } + + // 0.0 == maximum value. + if *f == 0.0 { + return total_threads; + } + + // Calculate percentage of total threads. + let thread_percent = (total_threads.get() as f32) * f; + match NonZeroUsize::new(thread_percent as usize) { + Some(n) => std::cmp::min(n, total_threads), // saturate at total threads. + None => { + // We checked for `0.0` above, so what this + // being 0 means that the percentage was _so_ + // low it made our thread count something like + // 0.99. In this case, just use 1 thread. + NonZeroUsize::MIN + } + } + } + } + } +} + +impl> From for ReaderThreads { + /// Create a [`ReaderThreads::Number`]. + /// + /// If `value` is `0`, this will return [`ReaderThreads::OnePerThread`]. + fn from(value: T) -> Self { + let u: usize = value.into(); + if u == 0 { + Self::OnePerThread + } else { + Self::Number(u) + } + } +} diff --git a/database/src/config/sync_mode.rs b/database/src/config/sync_mode.rs new file mode 100644 index 00000000..7dba062a --- /dev/null +++ b/database/src/config/sync_mode.rs @@ -0,0 +1,144 @@ +//! Database [`Env`](crate::Env) configuration. +//! +//! This module contains the main [`Config`]uration struct +//! for the database [`Env`](crate::Env)ironment, and data +//! structures related to any configuration setting. +//! +//! These configurations are processed at runtime, meaning +//! the `Env` can/will dynamically adjust its behavior +//! based on these values. + +//---------------------------------------------------------------------------------------------------- Import +use std::{ + borrow::Cow, + num::NonZeroUsize, + path::{Path, PathBuf}, +}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use cuprate_helper::fs::cuprate_database_dir; + +use crate::{constants::DATABASE_DATA_FILENAME, resize::ResizeAlgorithm}; + +//---------------------------------------------------------------------------------------------------- SyncMode +/// Disk synchronization mode. +/// +/// This controls how/when the database syncs its data to disk. +/// +/// Regardless of the variant chosen, dropping [`Env`](crate::Env) +/// will always cause it to fully sync to disk. +/// +/// # Sync vs Async +/// All invariants except [`SyncMode::Async`] & [`SyncMode::Fast`] +/// are `synchronous`, as in the database will wait until the OS has +/// finished syncing all the data to disk before continuing. +/// +/// `SyncMode::Async` & `SyncMode::Fast` are `asynchronous`, meaning +/// the database will _NOT_ wait until the data is fully synced to disk +/// before continuing. Note that this doesn't mean the database itself +/// won't be synchronized between readers/writers, but rather that the +/// data _on disk_ may not be immediately synchronized after a write. +/// +/// Something like: +/// ```rust,ignore +/// db.put("key", value); +/// db.get("key"); +/// ``` +/// will be fine, most likely pulling from memory instead of disk. +/// +/// # TODO +/// Dynamic sync's are not yet supported. +/// +/// Only: +/// +/// - [`SyncMode::Safe`] +/// - [`SyncMode::Async`] +/// - [`SyncMode::Fast`] +/// +/// are supported, all other variants will panic on [`crate::Env::open`]. +#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum SyncMode { + /// Use [`SyncMode::Fast`] until fully synced, + /// then use [`SyncMode::Safe`]. + /// + /// # TODO: how to implement this? + /// ref: + /// monerod-solution: + /// cuprate-issue: + /// + /// We could: + /// ```rust,ignore + /// if current_db_block <= top_block.saturating_sub(N) { + /// // don't sync() + /// } else { + /// // sync() + /// } + /// ``` + /// where N is some threshold we pick that is _close_ enough + /// to being synced where we want to start being safer. + /// + /// Essentially, when we are in a certain % range of being finished, + /// switch to safe mode, until then, go fast. + FastThenSafe, + + #[default] + /// Fully sync to disk per transaction. + /// + /// Every database transaction commit will + /// fully sync all data to disk, _synchronously_, + /// so the database (writer) halts until synced. + /// + /// This is expected to be very slow. + /// + /// This matches: + /// - LMDB without any special sync flags + /// - [`redb::Durability::Immediate`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Immediate) + Safe, + + /// Asynchrously sync to disk per transaction. + /// + /// This is the same as [`SyncMode::Safe`], + /// but the syncs will be asynchronous, i.e. + /// each transaction commit will sync to disk, + /// but only eventually, not necessarily immediately. + /// + /// This matches: + /// - [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) + /// - [`redb::Durability::Eventual`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Eventual) + Async, + + /// Fully sync to disk after we cross this transaction threshold. + /// + /// After committing [`usize`] amount of database + /// transactions, it will be sync to disk. + /// + /// `0` behaves the same as [`SyncMode::Safe`], and a ridiculously large + /// number like `usize::MAX` is practically the same as [`SyncMode::Fast`]. + Threshold(usize), + + /// Only flush at database shutdown. + /// + /// This is the fastest, yet unsafest option. + /// + /// It will cause the database to never _actively_ sync, + /// letting the OS decide when to flush data to disk. + /// + /// This matches: + /// - [`MDB_NOSYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#ga5791dd1adb09123f82dd1f331209e12e) + [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) + /// - [`redb::Durability::None`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.None) + /// + /// `monerod` reference: + /// + /// # Corruption + /// In the case of a system crash, the database + /// may become corrupted when using this option. + // + // TODO: we could call this `unsafe` + // and use that terminology in the config file + // so users know exactly what they are getting + // themselves into. + Fast, +} diff --git a/database/src/database.rs b/database/src/database.rs index 819849bb..80eae7df 100644 --- a/database/src/database.rs +++ b/database/src/database.rs @@ -1,54 +1,73 @@ //! Abstracted database; `trait DatabaseRo` & `trait DatabaseRw`. //---------------------------------------------------------------------------------------------------- Import -use crate::{error::RuntimeError, table::Table}; +use std::{ + borrow::{Borrow, Cow}, + fmt::Debug, + ops::{Deref, RangeBounds}, +}; + +use crate::{ + error::RuntimeError, + table::Table, + transaction::{TxRo, TxRw}, + value_guard::ValueGuard, +}; //---------------------------------------------------------------------------------------------------- DatabaseRo /// Database (key-value store) read abstraction. /// -/// TODO: document relation between `DatabaseRo` <-> `DatabaseRw`. -pub trait DatabaseRo { - /// TODO - /// # Errors - /// TODO +/// This is a read-only database table, +/// write operations are defined in [`DatabaseRw`]. +pub trait DatabaseRo<'tx, T: Table> { + /// Get the value corresponding to a key. + /// + /// This returns a guard to the value, not the value itself. + /// See [`ValueGuard`] for more info. /// - /// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist. - fn get(&self, key: &T::Key) -> Result<&T::Value, RuntimeError>; - - /// TODO /// # Errors - /// TODO - // - // TODO: (Iterators + ?Sized + lifetimes) == bad time - // fix this later. - fn get_range<'a>( + /// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist. + /// + /// It will return other [`RuntimeError`]'s on things like IO errors as well. + fn get<'a>(&'a self, key: &'a T::Key) -> Result + 'a, RuntimeError>; + + /// Get an iterator of values corresponding to a range of keys. + /// + /// This returns guards to the values, not the values themselves. + /// See [`ValueGuard`] for more info. + /// + /// # Errors + /// Each key in the `range` has the potential to error, for example, + /// if a particular key in the `range` does not exist, + /// [`RuntimeError::KeyNotFound`] wrapped in [`Err`] will be returned + /// from the iterator. + fn get_range<'a, Range>( &'a self, - key: &'a T::Key, - amount: usize, - ) -> Result, RuntimeError> + range: &'a Range, + ) -> Result< + impl Iterator, RuntimeError>> + 'a, + RuntimeError, + > where - ::Value: 'a; + Range: RangeBounds + 'a; } //---------------------------------------------------------------------------------------------------- DatabaseRw /// Database (key-value store) read/write abstraction. /// -/// TODO: document relation between `DatabaseRo` <-> `DatabaseRw`. -pub trait DatabaseRw: DatabaseRo { - /// TODO +/// All [`DatabaseRo`] functions are also callable by [`DatabaseRw`]. +pub trait DatabaseRw<'env, 'tx, T: Table>: DatabaseRo<'tx, T> { + /// Insert a key-value pair into the database. + /// + /// This will overwrite any existing key-value pairs. + /// /// # Errors - /// TODO + /// This will not return [`RuntimeError::KeyExists`]. fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>; - /// TODO - /// # Errors - /// TODO - fn clear(&mut self) -> Result<(), RuntimeError>; - - /// TODO - /// # Errors - /// TODO + /// Delete a key-value pair in the database. /// + /// # Errors /// This will return [`RuntimeError::KeyNotFound`] wrapped in [`Err`] if `key` does not exist. fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>; } diff --git a/database/src/env.rs b/database/src/env.rs index 989aedb7..cac0a8d4 100644 --- a/database/src/env.rs +++ b/database/src/env.rs @@ -1,6 +1,8 @@ //! Abstracted database environment; `trait Env`. //---------------------------------------------------------------------------------------------------- Import +use std::{fmt::Debug, ops::Deref}; + use crate::{ config::Config, database::{DatabaseRo, DatabaseRw}, @@ -19,6 +21,10 @@ use crate::{ /// Objects that implement [`Env`] _should_ probably /// [`Env::sync`] in their drop implementations, /// although, no invariant relies on this (yet). +/// +/// # Lifetimes +/// TODO: Explain the very sequential lifetime pipeline: +/// - `ConcreteEnv` -> `'env` -> `'tx` -> `impl DatabaseR{o,w}` pub trait Env: Sized { //------------------------------------------------ Constants /// Does the database backend need to be manually @@ -39,51 +45,49 @@ pub trait Env: Sized { const SYNCS_PER_TX: bool; //------------------------------------------------ Types - /// TODO - type TxRo<'env>: TxRo<'env>; + /// The struct representing the actual backend's database environment. + /// + /// This is used as the `self` in [`EnvInner`] functions, so whatever + /// this type is, is what will be accessible from those functions. + /// + /// # Explanation (not needed for practical use) + /// For `heed`, this is just `heed::Env`, for `redb` this is + /// `(redb::Database, redb::Durability)` as each transaction + /// needs the sync mode set during creation. + type EnvInner<'env>: EnvInner<'env, Self::TxRo<'env>, Self::TxRw<'env>> + where + Self: 'env; - /// TODO - type TxRw<'env>: TxRw<'env>; + /// The read-only transaction type of the backend. + type TxRo<'env>: TxRo<'env> + 'env + where + Self: 'env; + + /// The read/write transaction type of the backend. + type TxRw<'env>: TxRw<'env> + 'env + where + Self: 'env; //------------------------------------------------ Required - /// TODO + /// Open the database environment, using the passed [`Config`]. + /// + /// # Invariants + /// This function **must** create all tables listed in [`crate::tables`]. + /// + /// The rest of the functions depend on the fact + /// they already exist, or else they will panic. + /// /// # Errors - /// TODO + /// This will error if the database could not be opened. + /// + /// This is the only [`Env`] function that will return + /// an [`InitError`] instead of a [`RuntimeError`]. fn open(config: Config) -> Result; - /// TODO - /// # Errors - /// TODO - fn create_tables(&self, tx_rw: &mut Self::TxRw<'_>) -> Result<(), RuntimeError>; - /// Return the [`Config`] that this database was [`Env::open`]ed with. fn config(&self) -> &Config; - /// Return the amount of actual of bytes the database is taking up on disk. - /// - /// This is the current _disk_ value in bytes, not the memory map. - /// - /// # Errors - /// This will error if either: - /// - /// - [`std::fs::File::open`] - /// - [`std::fs::File::metadata`] - /// - /// failed on the database file on disk. - fn disk_size_bytes(&self) -> std::io::Result { - // We have the direct PATH to the file, - // no need to use backend-specific functions. - // - // SAFETY: as we are only accessing the metadata of - // the file and not reading the bytes, it should be - // fine even with a memory mapped file being actively - // written to. - Ok(std::fs::File::open(&self.config().db_file)? - .metadata()? - .len()) - } - - /// TODO + /// Fully sync the database caches to disk. /// /// # Invariant /// This must **fully** and **synchronously** flush the database data to disk. @@ -122,47 +126,98 @@ pub trait Env: Sized { unreachable!() } - /// TODO - /// # Errors - /// TODO - fn tx_ro(&self) -> Result, RuntimeError>; - - /// TODO - /// # Errors - /// TODO - fn tx_rw(&self) -> Result, RuntimeError>; - - /// TODO + /// Return the [`Env::EnvInner`]. /// - /// # TODO: Invariant - /// This should never panic the database because the table doesn't exist. + /// # Locking behavior + /// When using the `heed` backend, [`Env::EnvInner`] is a + /// `RwLockReadGuard`, i.e., calling this function takes a + /// read lock on the `heed::Env`. /// - /// Opening/using the database [`Env`] should have an invariant - /// that it creates all the tables we need, such that this - /// never returns `None`. - /// - /// # Errors - /// TODO - fn open_db_ro( - &self, - tx_ro: &Self::TxRo<'_>, - ) -> Result, RuntimeError>; - - /// TODO - /// - /// # TODO: Invariant - /// This should never panic the database because the table doesn't exist. - /// - /// Opening/using the database [`Env`] should have an invariant - /// that it creates all the tables we need, such that this - /// never returns `None`. - /// - /// # Errors - /// TODO - fn open_db_rw( - &self, - tx_rw: &mut Self::TxRw<'_>, - ) -> Result, RuntimeError>; + /// Be aware of this, as other functions (currently only + /// [`Env::resize_map`]) will take a _write_ lock. + fn env_inner(&self) -> Self::EnvInner<'_>; //------------------------------------------------ Provided + /// Return the amount of actual of bytes the database is taking up on disk. + /// + /// This is the current _disk_ value in bytes, not the memory map. + /// + /// # Errors + /// This will error if either: + /// + /// - [`std::fs::File::open`] + /// - [`std::fs::File::metadata`] + /// + /// failed on the database file on disk. + fn disk_size_bytes(&self) -> std::io::Result { + // We have the direct PATH to the file, + // no need to use backend-specific functions. + // + // SAFETY: as we are only accessing the metadata of + // the file and not reading the bytes, it should be + // fine even with a memory mapped file being actively + // written to. + Ok(std::fs::File::open(&self.config().db_file)? + .metadata()? + .len()) + } +} + +//---------------------------------------------------------------------------------------------------- DatabaseRo +/// TODO +pub trait EnvInner<'env, Ro, Rw> +where + Self: 'env, + Ro: TxRo<'env>, + Rw: TxRw<'env>, +{ + /// Create a read-only transaction. + /// + /// # Errors + /// This will only return [`RuntimeError::Io`] if it errors. + fn tx_ro(&'env self) -> Result; + + /// Create a read/write transaction. + /// + /// # Errors + /// This will only return [`RuntimeError::Io`] if it errors. + fn tx_rw(&'env self) -> Result; + + /// Open a database in read-only mode. + /// + /// This will open the database [`Table`] + /// passed as a generic to this function. + /// + /// ```rust,ignore + /// let db = env.open_db_ro::(&tx_ro); + /// // ^ ^ + /// // database table table metadata + /// // (name, key/value type) + /// ``` + /// + /// # Errors + /// As [`Table`] is `Sealed`, and all tables are created + /// upon [`Env::open`], this function will never error because + /// a table doesn't exist. + fn open_db_ro<'tx, T: Table>( + &self, + tx_ro: &'tx Ro, + ) -> Result, RuntimeError>; + + /// Open a database in read/write mode. + /// + /// All [`DatabaseRo`] functions are also callable + /// with the returned [`DatabaseRw`] structure. + /// + /// This will open the database [`Table`] + /// passed as a generic to this function. + /// + /// # Errors + /// As [`Table`] is `Sealed`, and all tables are created + /// upon [`Env::open`], this function will never error because + /// a table doesn't exist. + fn open_db_rw<'tx, T: Table>( + &self, + tx_rw: &'tx mut Rw, + ) -> Result, RuntimeError>; } diff --git a/database/src/key.rs b/database/src/key.rs index 97aa26b9..1e87d711 100644 --- a/database/src/key.rs +++ b/database/src/key.rs @@ -1,11 +1,14 @@ //! Database key abstraction; `trait Key`. //---------------------------------------------------------------------------------------------------- Import -use std::cmp::Ordering; +use std::{cmp::Ordering, fmt::Debug}; use bytemuck::Pod; -use crate::storable::{self, Storable}; +use crate::{ + storable::{self, Storable}, + ToOwnedDebug, +}; //---------------------------------------------------------------------------------------------------- Table /// Database [`Table`](crate::table::Table) key metadata. @@ -106,11 +109,10 @@ impl_key! { i64, } -impl Key for [T; N] { +impl Key for [T; N] { const DUPLICATE: bool = false; const CUSTOM_COMPARE: bool = false; - - type Primary = [T; N]; + type Primary = Self; } //---------------------------------------------------------------------------------------------------- Tests diff --git a/database/src/lib.rs b/database/src/lib.rs index de7a7184..cec077c3 100644 --- a/database/src/lib.rs +++ b/database/src/lib.rs @@ -187,89 +187,6 @@ // TODO: should be removed after all `todo!()`'s are gone. clippy::diverging_sub_expression, - // FIXME: - // If #[deny(clippy::restriction)] is used, it - // enables a whole bunch of very subjective lints. - // The below disables most of the ones that are - // a bit too unwieldy. - // - // Figure out if if `clippy::restriction` should be - // used (it enables a bunch of good lints but has - // many false positives). - - // clippy::single_char_lifetime_names, - // clippy::implicit_return, - // clippy::std_instead_of_alloc, - // clippy::std_instead_of_core, - // clippy::unwrap_used, - // clippy::min_ident_chars, - // clippy::absolute_paths, - // clippy::missing_inline_in_public_items, - // clippy::shadow_reuse, - // clippy::shadow_unrelated, - // clippy::missing_trait_methods, - // clippy::pub_use, - // clippy::pub_with_shorthand, - // clippy::blanket_clippy_restriction_lints, - // clippy::exhaustive_structs, - // clippy::exhaustive_enums, - // clippy::unsafe_derive_deserialize, - // clippy::multiple_inherent_impl, - // clippy::unreadable_literal, - // clippy::indexing_slicing, - // clippy::float_arithmetic, - // clippy::cast_possible_truncation, - // clippy::as_conversions, - // clippy::cast_precision_loss, - // clippy::cast_sign_loss, - // clippy::missing_asserts_for_indexing, - // clippy::default_numeric_fallback, - // clippy::module_inception, - // clippy::mod_module_files, - // clippy::multiple_unsafe_ops_per_block, - // clippy::too_many_lines, - // clippy::missing_assert_message, - // clippy::len_zero, - // clippy::separated_literal_suffix, - // clippy::single_call_fn, - // clippy::unreachable, - // clippy::many_single_char_names, - // clippy::redundant_pub_crate, - // clippy::decimal_literal_representation, - // clippy::option_if_let_else, - // clippy::lossy_float_literal, - // clippy::modulo_arithmetic, - // clippy::print_stdout, - // clippy::module_name_repetitions, - // clippy::no_effect, - // clippy::semicolon_outside_block, - // clippy::panic, - // clippy::question_mark_used, - // clippy::expect_used, - // clippy::integer_division, - // clippy::type_complexity, - // clippy::pattern_type_mismatch, - // clippy::arithmetic_side_effects, - // clippy::default_trait_access, - // clippy::similar_names, - // clippy::needless_pass_by_value, - // clippy::inline_always, - // clippy::if_then_some_else_none, - // clippy::arithmetic_side_effects, - // clippy::float_cmp, - // clippy::items_after_statements, - // clippy::use_debug, - // clippy::mem_forget, - // clippy::else_if_without_else, - // clippy::str_to_string, - // clippy::branches_sharing_code, - // clippy::impl_trait_in_params, - // clippy::struct_excessive_bools, - // clippy::exit, - // // This lint is actually good but - // // it sometimes hits false positive. - // clippy::self_named_module_files - clippy::module_name_repetitions, clippy::module_inception, clippy::redundant_pub_crate, @@ -282,6 +199,10 @@ // // This allows us to assume 64-bit // invariants in code, e.g. `usize as u64`. +// +// # Safety +// As of 0d67bfb1bcc431e90c82d577bf36dd1182c807e2 (2024-04-12) +// there are invariants relying on 64-bit pointer sizes. #[cfg(not(target_pointer_width = "64"))] compile_error!("Cuprate is only compatible with 64-bit CPUs"); @@ -304,7 +225,7 @@ mod database; pub use database::{DatabaseRo, DatabaseRw}; mod env; -pub use env::Env; +pub use env::{Env, EnvInner}; mod error; pub use error::{InitError, RuntimeError}; @@ -333,6 +254,12 @@ pub mod types; mod transaction; pub use transaction::{TxRo, TxRw}; +mod to_owned_debug; +pub use to_owned_debug::ToOwnedDebug; + +mod value_guard; +pub use value_guard::ValueGuard; + //---------------------------------------------------------------------------------------------------- Feature-gated #[cfg(feature = "service")] pub mod service; diff --git a/database/src/resize.rs b/database/src/resize.rs index 9eae1872..62ecf5e7 100644 --- a/database/src/resize.rs +++ b/database/src/resize.rs @@ -63,8 +63,8 @@ impl ResizeAlgorithm { pub fn resize(&self, current_size_bytes: usize) -> NonZeroUsize { match self { Self::Monero => monero(current_size_bytes), - Self::FixedBytes(u) => todo!(), - Self::Percent(f) => todo!(), + Self::FixedBytes(add_bytes) => fixed_bytes(current_size_bytes, add_bytes.get()), + Self::Percent(f) => percent(current_size_bytes, *f), } } } diff --git a/database/src/storable.rs b/database/src/storable.rs index da56def9..6f04af1e 100644 --- a/database/src/storable.rs +++ b/database/src/storable.rs @@ -3,12 +3,15 @@ //---------------------------------------------------------------------------------------------------- Import use std::{ borrow::Cow, + char::ToLowercase, fmt::Debug, io::{Read, Write}, sync::Arc, }; -use bytemuck::{AnyBitPattern, NoUninit}; +use bytemuck::Pod; + +use crate::ToOwnedDebug; //---------------------------------------------------------------------------------------------------- Storable /// A type that can be stored in the database. @@ -20,8 +23,12 @@ use bytemuck::{AnyBitPattern, NoUninit}; /// casted/represented as raw bytes. /// /// ## `bytemuck` -/// Any type that implements `bytemuck`'s [`NoUninit`] + [`AnyBitPattern`] -/// (and [Debug]) will automatically implement [`Storable`]. +/// Any type that implements: +/// - [`bytemuck::Pod`] +/// - [`Debug`] +/// - [`ToOwned`] +/// +/// will automatically implement [`Storable`]. /// /// This includes: /// - Most primitive types @@ -30,6 +37,7 @@ use bytemuck::{AnyBitPattern, NoUninit}; /// /// ```rust /// # use cuprate_database::*; +/// # use std::borrow::*; /// let number: u64 = 0; /// /// // Into bytes. @@ -37,8 +45,8 @@ use bytemuck::{AnyBitPattern, NoUninit}; /// assert_eq!(into, &[0; 8]); /// /// // From bytes. -/// let from: &u64 = Storable::from_bytes(&into); -/// assert_eq!(from, &number); +/// let from: u64 = *Storable::from_bytes(&into); +/// assert_eq!(from, number); /// ``` /// /// ## Invariants @@ -54,7 +62,36 @@ use bytemuck::{AnyBitPattern, NoUninit}; /// /// Most likely, the bytes are little-endian, however /// that cannot be relied upon when using this trait. -pub trait Storable: Debug { +pub trait Storable: ToOwnedDebug { + /// What is the alignment of `Self`? + /// + /// For `[T]` types, this is set to the alignment of `T`. + /// + /// This is used to prevent copying when unneeded, e.g. + /// `[u8] -> [u8]` does not need to account for unaligned bytes, + /// since no cast needs to occur. + /// + /// # Examples + /// ```rust + /// # use cuprate_database::Storable; + /// assert_eq!(<()>::ALIGN, 1); + /// assert_eq!(u8::ALIGN, 1); + /// assert_eq!(u16::ALIGN, 2); + /// assert_eq!(u32::ALIGN, 4); + /// assert_eq!(u64::ALIGN, 8); + /// assert_eq!(i8::ALIGN, 1); + /// assert_eq!(i16::ALIGN, 2); + /// assert_eq!(i32::ALIGN, 4); + /// assert_eq!(i64::ALIGN, 8); + /// assert_eq!(<[u8]>::ALIGN, 1); + /// assert_eq!(<[u64]>::ALIGN, 8); + /// assert_eq!(<[u8; 0]>::ALIGN, 1); + /// assert_eq!(<[u8; 1]>::ALIGN, 1); + /// assert_eq!(<[u8; 2]>::ALIGN, 1); + /// assert_eq!(<[u64; 2]>::ALIGN, 8); + /// ``` + const ALIGN: usize; + /// Is this type fixed width in byte length? /// /// I.e., when converting `Self` to bytes, is it @@ -97,12 +134,35 @@ pub trait Storable: Debug { /// Return `self` in byte form. fn as_bytes(&self) -> &[u8]; - /// Create [`Self`] from bytes. + /// Create a borrowed [`Self`] from bytes. + /// + /// # Invariant + /// `bytes` must be perfectly aligned for `Self` + /// or else this function may cause UB. + /// + /// This function _may_ panic if `bytes` isn't aligned. + /// + /// # Blanket implementation + /// The blanket implementation that covers all types used + /// by `cuprate_database` will simply cast `bytes` into `Self`, + /// with no copying. fn from_bytes(bytes: &[u8]) -> &Self; + + /// Create a [`Self`] from potentially unaligned bytes. + /// + /// # Blanket implementation + /// The blanket implementation that covers all types used + /// by `cuprate_database` will **always** allocate a new buffer + /// or create a new `Self`. + fn from_bytes_unaligned(bytes: &[u8]) -> Cow<'_, Self>; } //---------------------------------------------------------------------------------------------------- Impl -impl Storable for T { +impl Storable for T +where + Self: Pod + ToOwnedDebug, +{ + const ALIGN: usize = std::mem::align_of::(); const BYTE_LENGTH: Option = Some(std::mem::size_of::()); #[inline] @@ -111,12 +171,22 @@ impl Storable for T { } #[inline] - fn from_bytes(bytes: &[u8]) -> &Self { + fn from_bytes(bytes: &[u8]) -> &T { bytemuck::from_bytes(bytes) } + + #[inline] + fn from_bytes_unaligned(bytes: &[u8]) -> Cow<'static, Self> { + Cow::Owned(bytemuck::pod_read_unaligned(bytes)) + } } -impl Storable for [T] { +impl Storable for [T] +where + T: Pod + ToOwnedDebug, + Self: ToOwnedDebug>, +{ + const ALIGN: usize = std::mem::align_of::(); const BYTE_LENGTH: Option = None; #[inline] @@ -125,8 +195,13 @@ impl Storable for [T] { } #[inline] - fn from_bytes(bytes: &[u8]) -> &Self { - bytemuck::must_cast_slice(bytes) + fn from_bytes(bytes: &[u8]) -> &[T] { + bytemuck::cast_slice(bytes) + } + + #[inline] + fn from_bytes_unaligned(bytes: &[u8]) -> Cow<'static, Self> { + Cow::Owned(bytemuck::pod_collect_to_vec(bytes)) } } @@ -137,14 +212,16 @@ mod test { /// Serialize, deserialize, and compare that /// the intermediate/end results are correct. - fn test_storable( + fn test_storable( // The primitive number function that // converts the number into little endian bytes, // e.g `u8::to_le_bytes`. to_le_bytes: fn(T) -> [u8; LEN], // A `Vec` of the numbers to test. t: Vec, - ) { + ) where + T: Storable + Copy + PartialEq, + { for t in t { let expected_bytes = to_le_bytes(t); diff --git a/database/src/table.rs b/database/src/table.rs index 1bb7683d..b94aea3c 100644 --- a/database/src/table.rs +++ b/database/src/table.rs @@ -1,7 +1,9 @@ //! Database table abstraction; `trait Table`. //---------------------------------------------------------------------------------------------------- Import -use crate::{key::Key, storable::Storable}; +use std::fmt::Debug; + +use crate::{key::Key, storable::Storable, to_owned_debug::ToOwnedDebug}; //---------------------------------------------------------------------------------------------------- Table /// Database table metadata. @@ -12,29 +14,15 @@ use crate::{key::Key, storable::Storable}; /// This trait is [`Sealed`](https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed). /// /// It is, and can only be implemented on the types inside [`tables`][crate::tables]. -pub trait Table: crate::tables::private::Sealed { +pub trait Table: crate::tables::private::Sealed + 'static { /// Name of the database table. const NAME: &'static str; - // TODO: - // - // `redb` requires `K/V` is `'static`: - // - - // - - // - // ...but kinda not really? - // "Note that the lifetime of the K and V type parameters does not impact - // the lifetimes of the data that is stored or retrieved from the table" - // - // - // This might be incompatible with `heed`. We'll see - // after function bodies are actually implemented... - /// Primary key type. type Key: Key + 'static; /// Value type. - type Value: Storable + ?Sized + 'static; + type Value: Storable + 'static; } //---------------------------------------------------------------------------------------------------- Tests diff --git a/database/src/to_owned_debug.rs b/database/src/to_owned_debug.rs new file mode 100644 index 00000000..e8c67cf2 --- /dev/null +++ b/database/src/to_owned_debug.rs @@ -0,0 +1,51 @@ +//! Borrowed/owned data abstraction; `trait ToOwnedDebug`. + +//---------------------------------------------------------------------------------------------------- Import +use std::fmt::Debug; + +use crate::{key::Key, storable::Storable}; + +//---------------------------------------------------------------------------------------------------- Table +/// `T: Debug` and `T::Owned: Debug`. +/// +/// This trait simply combines [`Debug`] and [`ToOwned`] +/// such that the `Owned` version must also be [`Debug`]. +/// +/// An example is `[u8]` which is [`Debug`], and +/// its owned version `Vec` is also [`Debug`]. +/// +/// # Explanation (not needed for practical use) +/// This trait solely exists due to the `redb` backend +/// requiring [`Debug`] bounds on keys and values. +/// +/// As we have `?Sized` types like `[u8]`, and due to `redb` requiring +/// allocation upon deserialization, we must make our values `ToOwned`. +/// +/// However, this requires that the `Owned` version is also `Debug`. +/// Combined with: +/// - [`Table::Key`](crate::Table::Key) +/// - [`Table::Value`](crate::Table::Value) +/// - [`Key::Primary`] +/// +/// this quickly permutates into many many many `where` bounds on +/// each function that touchs any data that must be deserialized. +/// +/// This trait and the blanket impl it provides get applied all these types +/// automatically, which means we don't have to write these bounds everywhere. +pub trait ToOwnedDebug: Debug + ToOwned { + /// The owned version of [`Self`]. + /// + /// Should be equal to `::Owned`. + type OwnedDebug: Debug; +} + +// The blanket impl that covers all our types. +impl + Debug + ?Sized> ToOwnedDebug for T { + type OwnedDebug = O; +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/database/src/transaction.rs b/database/src/transaction.rs index 74678836..6dffae27 100644 --- a/database/src/transaction.rs +++ b/database/src/transaction.rs @@ -6,24 +6,43 @@ use crate::{config::SyncMode, env::Env, error::RuntimeError}; //---------------------------------------------------------------------------------------------------- TxRo /// Read-only database transaction. /// -/// TODO +/// Returned from [`EnvInner::tx_ro`](crate::EnvInner::tx_ro). +/// +/// # TODO +/// I don't think we need this, we can just drop the `tx_ro`? +/// pub trait TxRo<'env> { - /// TODO + /// Commit the read-only transaction. + /// /// # Errors - /// TODO + /// This operation is infallible (will always return `Ok(())`) with the `redb` backend. fn commit(self) -> Result<(), RuntimeError>; } //---------------------------------------------------------------------------------------------------- TxRw /// Read/write database transaction. /// -/// TODO +/// Returned from [`EnvInner::tx_rw`](crate::EnvInner::tx_rw). pub trait TxRw<'env> { - /// TODO + /// Commit the read/write transaction. + /// + /// Note that this doesn't necessarily sync the database caches to disk. + /// /// # Errors - /// TODO + /// This operation is infallible (will always return `Ok(())`) with the `redb` backend. + /// + /// Else, this will only return: + /// - [`RuntimeError::ResizeNeeded`] (if `Env::MANUAL_RESIZE == true`) + /// - [`RuntimeError::Io`] fn commit(self) -> Result<(), RuntimeError>; - /// TODO - fn abort(self); + /// Abort the transaction, erasing any writes that have occurred. + /// + /// # Errors + /// This operation is infallible (will always return `Ok(())`) with the `heed` backend. + /// + /// Else, this will only return: + /// - [`RuntimeError::ResizeNeeded`] (if `Env::MANUAL_RESIZE == true`) + /// - [`RuntimeError::Io`] + fn abort(self) -> Result<(), RuntimeError>; } diff --git a/database/src/types.rs b/database/src/types.rs index 543eb1c8..865b6e95 100644 --- a/database/src/types.rs +++ b/database/src/types.rs @@ -53,15 +53,25 @@ use serde::{Deserialize, Serialize}; /// TEST /// /// ```rust -/// # use cuprate_database::types::*; +/// # use cuprate_database::{*, types::*}; +/// // Assert bytemuck is correct. /// let a = TestType { u: 1, b: 255, _pad: [0; 7] }; // original struct /// let b = bytemuck::must_cast::(a); // cast into bytes /// let c = bytemuck::checked::cast::<[u8; 16], TestType>(b); // cast back into struct -/// /// assert_eq!(a, c); /// assert_eq!(c.u, 1); /// assert_eq!(c.b, 255); /// assert_eq!(c._pad, [0; 7]); +/// +/// // Assert Storable is correct. +/// let b2 = Storable::as_bytes(&a); +/// let c2: &TestType = Storable::from_bytes(b2); +/// assert_eq!(a, *c2); +/// assert_eq!(b, b2); +/// assert_eq!(c, *c2); +/// assert_eq!(c2.u, 1); +/// assert_eq!(c2.b, 255); +/// assert_eq!(c2._pad, [0; 7]); /// ``` /// /// # Size & Alignment @@ -94,14 +104,23 @@ pub struct TestType { /// TEST2 /// /// ```rust -/// # use cuprate_database::types::*; +/// # use cuprate_database::{*, types::*}; +/// // Assert bytemuck is correct. /// let a = TestType2 { u: 1, b: [1; 32] }; // original struct /// let b = bytemuck::must_cast::(a); // cast into bytes /// let c = bytemuck::must_cast::<[u8; 40], TestType2>(b); // cast back into struct -/// /// assert_eq!(a, c); /// assert_eq!(c.u, 1); /// assert_eq!(c.b, [1; 32]); +/// +/// // Assert Storable is correct. +/// let b2 = Storable::as_bytes(&a); +/// let c2: &TestType2 = Storable::from_bytes(b2); +/// assert_eq!(a, *c2); +/// assert_eq!(b, b2); +/// assert_eq!(c, *c2); +/// assert_eq!(c.u, 1); +/// assert_eq!(c.b, [1; 32]); /// ``` /// /// # Size & Alignment diff --git a/database/src/value_guard.rs b/database/src/value_guard.rs new file mode 100644 index 00000000..48e5ec4c --- /dev/null +++ b/database/src/value_guard.rs @@ -0,0 +1,47 @@ +//! Database table value "guard" abstraction; `trait ValueGuard`. + +//---------------------------------------------------------------------------------------------------- Import +use std::borrow::{Borrow, Cow}; + +use crate::{table::Table, Storable, ToOwnedDebug}; + +//---------------------------------------------------------------------------------------------------- Table +/// A guard that allows you to access a value. +/// +/// This trait acts as an object that must be kept alive, +/// and will give you access to a [`Table`]'s value. +/// +/// # Explanation (not needed for practical use) +/// This trait solely exists due to the `redb` backend +/// not _directly_ returning the value, but a +/// [guard object](https://docs.rs/redb/1.5.0/redb/struct.AccessGuard.html) +/// that has a lifetime attached to the key. +/// It does not implement `Deref` or `Borrow` and such. +/// +/// Also, due to `redb` requiring `Cow`, this object builds on that. +/// +/// - `heed` will always be `Cow::Borrowed` +/// - `redb` will always be `Cow::Borrowed` for `[u8]` +/// or any type where `Storable::ALIGN == 1` +/// - `redb` will always be `Cow::Owned` for everything else +pub trait ValueGuard { + /// Retrieve the data from the guard. + fn unguard(&self) -> Cow<'_, T>; +} + +impl ValueGuard for Cow<'_, T> { + #[inline] + fn unguard(&self) -> Cow<'_, T> { + Cow::Borrowed(self.borrow()) + } +} + +// HACK: +// This is implemented for `redb::AccessGuard<'_>` in +// `src/backend/redb/storable.rs` due to struct privacy. + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +}