Use parity-db in current Dockerfiles (#455)

* Use redb and in Dockerfiles

The motivation for redb was to remove the multiple rocksdb compile times from
CI.

* Correct feature flagging of coordinator and message-queue in Dockerfiles

* Correct message-queue DB type alias

* Use consistent table typing in redb

* Correct rebase artifacts

* Correct removal of binaries feature from message-queue

* Correct processor feature flagging

* Replace redb with parity-db

It still has much better compile times yet doesn't block when creating multiple
transactions. It also is actively maintained and doesn't grow our tree. The MPT
aspects are irrelevant.

* Correct stray Redb

* clippy warning

* Correct txn get
This commit is contained in:
Luke Parker 2023-11-30 04:22:37 -05:00 committed by GitHub
parent d1122a6535
commit b823413c9b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 232 additions and 149 deletions

3
Cargo.lock generated
View file

@ -4718,7 +4718,7 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e"
dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro-crate 2.0.0",
"proc-macro2",
"quote",
"syn 2.0.39",
@ -7372,6 +7372,7 @@ dependencies = [
name = "serai-db"
version = "0.1.0"
dependencies = [
"parity-db",
"rocksdb",
]

View file

@ -14,7 +14,9 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
parity-db = { version = "0.4", default-features = false, optional = true }
rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true }
[features]
parity-db = ["dep:parity-db"]
rocksdb = ["dep:rocksdb"]

View file

@ -9,6 +9,11 @@ mod rocks;
#[cfg(feature = "rocksdb")]
pub use rocks::{RocksDB, new_rocksdb};
#[cfg(feature = "parity-db")]
mod parity_db;
#[cfg(feature = "parity-db")]
pub use parity_db::{ParityDb, new_parity_db};
/// An object implementing get.
pub trait Get {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;

View file

@ -0,0 +1,46 @@
use std::sync::Arc;
pub use ::parity_db::{Options, Db as ParityDb};
use crate::*;
pub struct Transaction<'a>(&'a Arc<ParityDb>, Vec<(u8, Vec<u8>, Option<Vec<u8>>)>);
impl Get for Transaction<'_> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
let mut res = self.0.get(&key);
for change in &self.1 {
if change.1 == key.as_ref() {
res = change.2.clone();
}
}
res
}
}
impl DbTxn for Transaction<'_> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
self.1.push((0, key.as_ref().to_vec(), Some(value.as_ref().to_vec())))
}
fn del(&mut self, key: impl AsRef<[u8]>) {
self.1.push((0, key.as_ref().to_vec(), None))
}
fn commit(self) {
self.0.commit(self.1).unwrap()
}
}
impl Get for Arc<ParityDb> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
ParityDb::get(self, 0, key.as_ref()).unwrap()
}
}
impl Db for Arc<ParityDb> {
type Transaction<'a> = Transaction<'a>;
fn txn(&mut self) -> Self::Transaction<'_> {
Transaction(self, vec![])
}
}
pub fn new_parity_db(path: &str) -> Arc<ParityDb> {
Arc::new(ParityDb::open_or_create(&Options::with_columns(std::path::Path::new(path), 1)).unwrap())
}

View file

@ -30,7 +30,7 @@ frost-schnorrkel = { path = "../crypto/schnorrkel" }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
serai-db = { path = "../common/db", features = ["rocksdb"] }
serai-db = { path = "../common/db" }
serai-env = { path = "../common/env" }
processor-messages = { package = "serai-processor-messages", path = "../processor/messages" }
@ -55,3 +55,7 @@ futures = { version = "0.3", default-features = false, features = ["std"] }
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
[features]
parity-db = ["serai-db/parity-db"]
rocksdb = ["serai-db/rocksdb"]

View file

@ -16,7 +16,6 @@ use schnorr::SchnorrSignature;
use frost::Participant;
use serai_db::{DbTxn, Db};
use serai_env as env;
use scale::Encode;
use serai_client::{
@ -1199,7 +1198,18 @@ async fn main() {
log::info!("starting coordinator service...");
let db = serai_db::new_rocksdb(&env::var("DB_PATH").expect("path to DB wasn't specified"));
#[allow(unused_variables, unreachable_code)]
let db = {
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
panic!("built with parity-db and rocksdb");
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
let db =
serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
#[cfg(feature = "rocksdb")]
let db =
serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
db
};
let key = {
let mut key_hex = serai_env::var("SERAI_KEY").expect("Serai key wasn't provided");

View file

@ -37,11 +37,12 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
# Uses a single threaded runtime since this shouldn't ever be CPU-bound
tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] }
serai-db = { path = "../common/db", features = ["rocksdb"], optional = true }
serai-db = { path = "../common/db", optional = true }
serai-env = { path = "../common/env" }
serai-primitives = { path = "../substrate/primitives", features = ["borsh"] }
[features]
binaries = ["serai-db"]
parity-db = ["serai-db/parity-db"]
rocksdb = ["serai-db/rocksdb"]

View file

@ -1,154 +1,149 @@
#[cfg(feature = "binaries")]
mod messages;
#[cfg(feature = "binaries")]
mod queue;
#[cfg(feature = "binaries")]
mod binaries {
pub(crate) use std::{
sync::{Arc, RwLock},
collections::HashMap,
};
pub(crate) use std::{
sync::{Arc, RwLock},
collections::HashMap,
};
pub(crate) use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
pub(crate) use schnorr_signatures::SchnorrSignature;
pub(crate) use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
pub(crate) use schnorr_signatures::SchnorrSignature;
pub(crate) use serai_primitives::NetworkId;
pub(crate) use serai_primitives::NetworkId;
pub(crate) use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::TcpListener,
};
pub(crate) use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::TcpListener,
};
use serai_db::{Get, DbTxn, Db as DbTrait};
use serai_db::{Get, DbTxn, Db as DbTrait};
pub(crate) use crate::messages::*;
pub(crate) use crate::messages::*;
pub(crate) use crate::queue::Queue;
pub(crate) use crate::queue::Queue;
pub(crate) type Db = serai_db::RocksDB;
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
pub(crate) type Db = Arc<serai_db::ParityDb>;
#[cfg(feature = "rocksdb")]
pub(crate) type Db = serai_db::RocksDB;
#[allow(clippy::type_complexity)]
mod clippy {
use super::*;
use once_cell::sync::Lazy;
pub(crate) static KEYS: Lazy<Arc<RwLock<HashMap<Service, <Ristretto as Ciphersuite>::G>>>> =
Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));
pub(crate) static QUEUES: Lazy<Arc<RwLock<HashMap<(Service, Service), RwLock<Queue<Db>>>>>> =
Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));
}
pub(crate) use self::clippy::*;
#[allow(clippy::type_complexity)]
mod clippy {
use super::*;
use once_cell::sync::Lazy;
pub(crate) static KEYS: Lazy<Arc<RwLock<HashMap<Service, <Ristretto as Ciphersuite>::G>>>> =
Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));
pub(crate) static QUEUES: Lazy<Arc<RwLock<HashMap<(Service, Service), RwLock<Queue<Db>>>>>> =
Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));
}
pub(crate) use self::clippy::*;
// queue RPC method
/*
Queues a message to be delivered from a processor to a coordinator, or vice versa.
// queue RPC method
/*
Queues a message to be delivered from a processor to a coordinator, or vice versa.
Messages are authenticated to be coming from the claimed service. Recipient services SHOULD
independently verify signatures.
Messages are authenticated to be coming from the claimed service. Recipient services SHOULD
independently verify signatures.
The metadata specifies an intent. Only one message, for a specified intent, will be delivered.
This allows services to safely send messages multiple times without them being delivered
multiple times.
The metadata specifies an intent. Only one message, for a specified intent, will be delivered.
This allows services to safely send messages multiple times without them being delivered
multiple times.
The message will be ordered by this service, with the order having no guarantees other than
successful ordering by the time this call returns.
*/
pub(crate) fn queue_message(
db: &mut Db,
meta: Metadata,
msg: Vec<u8>,
sig: SchnorrSignature<Ristretto>,
) {
{
let from = (*KEYS).read().unwrap()[&meta.from];
assert!(
sig.verify(from, message_challenge(meta.from, from, meta.to, &meta.intent, &msg, sig.R))
);
}
// Assert one, and only one of these, is the coordinator
assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator));
// Verify (from, to, intent) hasn't been prior seen
fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
[&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat()
}
fn intent_key(from: Service, to: Service, intent: &[u8]) -> Vec<u8> {
key(b"intent_seen", borsh::to_vec(&(from, to, intent)).unwrap())
}
let mut txn = db.txn();
let intent_key = intent_key(meta.from, meta.to, &meta.intent);
if Get::get(&txn, &intent_key).is_some() {
log::warn!(
"Prior queued message attempted to be queued again. From: {:?} To: {:?} Intent: {}",
meta.from,
meta.to,
hex::encode(&meta.intent)
);
return;
}
DbTxn::put(&mut txn, intent_key, []);
// Queue it
let id = (*QUEUES).read().unwrap()[&(meta.from, meta.to)].write().unwrap().queue_message(
&mut txn,
QueuedMessage {
from: meta.from,
// Temporary value which queue_message will override
id: u64::MAX,
msg,
sig: sig.serialize(),
},
The message will be ordered by this service, with the order having no guarantees other than
successful ordering by the time this call returns.
*/
pub(crate) fn queue_message(
db: &mut Db,
meta: Metadata,
msg: Vec<u8>,
sig: SchnorrSignature<Ristretto>,
) {
{
let from = (*KEYS).read().unwrap()[&meta.from];
assert!(
sig.verify(from, message_challenge(meta.from, from, meta.to, &meta.intent, &msg, sig.R))
);
log::info!("Queued message. From: {:?} To: {:?} ID: {id}", meta.from, meta.to);
DbTxn::commit(txn);
}
// next RPC method
/*
Gets the next message in queue for the named services.
// Assert one, and only one of these, is the coordinator
assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator));
This is not authenticated due to the fact every nonce would have to be saved to prevent
replays, or a challenge-response protocol implemented. Neither are worth doing when there
should be no sensitive data on this server.
*/
pub(crate) fn get_next_message(from: Service, to: Service) -> Option<QueuedMessage> {
let queue_outer = (*QUEUES).read().unwrap();
let queue = queue_outer[&(from, to)].read().unwrap();
let next = queue.last_acknowledged().map(|i| i + 1).unwrap_or(0);
queue.get_message(next)
// Verify (from, to, intent) hasn't been prior seen
fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
[&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat()
}
// ack RPC method
/*
Acknowledges a message as received and handled, meaning it'll no longer be returned as the next
message.
*/
pub(crate) fn ack_message(from: Service, to: Service, id: u64, sig: SchnorrSignature<Ristretto>) {
{
let to_key = (*KEYS).read().unwrap()[&to];
assert!(sig.verify(to_key, ack_challenge(to, to_key, from, id, sig.R)));
}
// Is it:
// The acknowledged message should be > last acknowledged OR
// The acknowledged message should be >=
// It's the first if we save messages as acknowledged before acknowledging them
// It's the second if we acknowledge messages before saving them as acknowledged
// TODO: Check only a proper message is being acked
log::info!("Acknowledging From: {:?} To: {:?} ID: {}", from, to, id);
(*QUEUES).read().unwrap()[&(from, to)].write().unwrap().ack_message(id)
fn intent_key(from: Service, to: Service, intent: &[u8]) -> Vec<u8> {
key(b"intent_seen", borsh::to_vec(&(from, to, intent)).unwrap())
}
let mut txn = db.txn();
let intent_key = intent_key(meta.from, meta.to, &meta.intent);
if Get::get(&txn, &intent_key).is_some() {
log::warn!(
"Prior queued message attempted to be queued again. From: {:?} To: {:?} Intent: {}",
meta.from,
meta.to,
hex::encode(&meta.intent)
);
return;
}
DbTxn::put(&mut txn, intent_key, []);
// Queue it
let id = (*QUEUES).read().unwrap()[&(meta.from, meta.to)].write().unwrap().queue_message(
&mut txn,
QueuedMessage {
from: meta.from,
// Temporary value which queue_message will override
id: u64::MAX,
msg,
sig: sig.serialize(),
},
);
log::info!("Queued message. From: {:?} To: {:?} ID: {id}", meta.from, meta.to);
DbTxn::commit(txn);
}
// next RPC method
/*
Gets the next message in queue for the named services.
This is not authenticated due to the fact every nonce would have to be saved to prevent
replays, or a challenge-response protocol implemented. Neither are worth doing when there
should be no sensitive data on this server.
*/
pub(crate) fn get_next_message(from: Service, to: Service) -> Option<QueuedMessage> {
let queue_outer = (*QUEUES).read().unwrap();
let queue = queue_outer[&(from, to)].read().unwrap();
let next = queue.last_acknowledged().map(|i| i + 1).unwrap_or(0);
queue.get_message(next)
}
// ack RPC method
/*
Acknowledges a message as received and handled, meaning it'll no longer be returned as the next
message.
*/
pub(crate) fn ack_message(from: Service, to: Service, id: u64, sig: SchnorrSignature<Ristretto>) {
{
let to_key = (*KEYS).read().unwrap()[&to];
assert!(sig.verify(to_key, ack_challenge(to, to_key, from, id, sig.R)));
}
// Is it:
// The acknowledged message should be > last acknowledged OR
// The acknowledged message should be >=
// It's the first if we save messages as acknowledged before acknowledging them
// It's the second if we acknowledge messages before saving them as acknowledged
// TODO: Check only a proper message is being acked
log::info!("Acknowledging From: {:?} To: {:?} ID: {}", from, to, id);
(*QUEUES).read().unwrap()[&(from, to)].write().unwrap().ack_message(id)
}
#[cfg(feature = "binaries")]
#[tokio::main(flavor = "current_thread")]
async fn main() {
use binaries::*;
// Override the panic handler with one which will panic if any tokio task panics
{
let existing = std::panic::take_hook();
@ -169,7 +164,18 @@ async fn main() {
log::info!("Starting message-queue service...");
// Open the DB
let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
#[allow(unused_variables, unreachable_code)]
let db = {
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
panic!("built with parity-db and rocksdb");
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
let db =
serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
#[cfg(feature = "rocksdb")]
let db =
serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
db
};
let read_key = |str| {
let key = serai_env::var(str)?;
@ -272,8 +278,3 @@ async fn main() {
});
}
}
#[cfg(not(feature = "binaries"))]
fn main() {
panic!("To run binaries, please build with `--feature binaries`.");
}

View file

@ -46,7 +46,7 @@ RUN --mount=type=cache,target=/root/.cargo \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/serai/target \
mkdir /serai/bin && \
cargo build -p serai-coordinator --all-features && \
cargo build -p serai-coordinator --features parity-db && \
mv /serai/target/debug/serai-coordinator /serai/bin
FROM debian:bookworm-slim as image

View file

@ -1,2 +1,2 @@
cargo build -p serai-coordinator --all-features && \
cargo build -p serai-coordinator --features parity-db && \
mv /serai/target/debug/serai-coordinator /serai/bin

View file

@ -46,7 +46,7 @@ RUN --mount=type=cache,target=/root/.cargo \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/serai/target \
mkdir /serai/bin && \
cargo build --all-features -p serai-message-queue && \
cargo build --features parity-db -p serai-message-queue && \
mv /serai/target/debug/serai-message-queue /serai/bin
FROM debian:bookworm-slim as image

View file

@ -1,2 +1,2 @@
cargo build --all-features -p serai-message-queue && \
cargo build --features parity-db -p serai-message-queue && \
mv /serai/target/debug/serai-message-queue /serai/bin

View file

@ -46,7 +46,7 @@ RUN --mount=type=cache,target=/root/.cargo \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/serai/target \
mkdir /serai/bin && \
cargo build --features "binaries bitcoin" -p serai-processor && \
cargo build --features "binaries parity-db bitcoin" -p serai-processor && \
mv /serai/target/debug/serai-processor /serai/bin
FROM debian:bookworm-slim as image

View file

@ -1,2 +1,2 @@
cargo build --features "binaries bitcoin" -p serai-processor && \
cargo build --features "binaries parity-db bitcoin" -p serai-processor && \
mv /serai/target/debug/serai-processor /serai/bin

View file

@ -46,7 +46,7 @@ RUN --mount=type=cache,target=/root/.cargo \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/serai/target \
mkdir /serai/bin && \
cargo build --features "binaries monero" -p serai-processor && \
cargo build --features "binaries parity-db monero" -p serai-processor && \
mv /serai/target/debug/serai-processor /serai/bin
FROM debian:bookworm-slim as image

View file

@ -1,2 +1,2 @@
cargo build --features "binaries monero" -p serai-processor && \
cargo build --features "binaries parity-db monero" -p serai-processor && \
mv /serai/target/debug/serai-processor /serai/bin

View file

@ -52,7 +52,7 @@ log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
serai-db = { path = "../common/db", features = ["rocksdb"], optional = true }
serai-db = { path = "../common/db", optional = true }
serai-env = { path = "../common/env", optional = true }
# TODO: Replace with direct usage of primitives
serai-client = { path = "../substrate/client", default-features = false }
@ -78,4 +78,6 @@ bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"
ed25519 = ["dalek-ff-group", "frost/ed25519"]
monero = ["ed25519", "monero-serai", "serai-client/monero"]
binaries = ["env_logger", "serai-db", "serai-env", "messages", "message-queue"]
binaries = ["env_logger", "serai-env", "messages", "message-queue"]
parity-db = ["serai-db/parity-db"]
rocksdb = ["serai-db/rocksdb"]

View file

@ -679,7 +679,18 @@ async fn main() {
}
env_logger::init();
let db = serai_db::new_rocksdb(&env::var("DB_PATH").expect("path to DB wasn't specified"));
#[allow(unused_variables, unreachable_code)]
let db = {
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
panic!("built with parity-db and rocksdb");
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
let db =
serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
#[cfg(feature = "rocksdb")]
let db =
serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
db
};
// Network configuration
let url = {