diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs new file mode 100644 index 00000000..101448b9 --- /dev/null +++ b/common/db/src/create_db.rs @@ -0,0 +1,68 @@ +#[doc(hidden)] +pub fn serai_db_key( + db_dst: &'static [u8], + item_dst: &'static [u8], + key: impl AsRef<[u8]>, +) -> Vec { + let db_len = u8::try_from(db_dst.len()).unwrap(); + let dst_len = u8::try_from(item_dst.len()).unwrap(); + [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat() +} + +/// Creates a series of structs which provide namespacing for keys +/// +/// # Description +/// +/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro +/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key, +/// they must be `scale` encodable. The return type is used to auto encode and decode the database +/// value bytes using `bincode`. +/// +/// # Arguments +/// +/// * `db_name` - A database name +/// * `field_name` - An item name +/// * `args` - Comma seperated list of key arguments +/// * `field_type` - The return type +/// +/// # Example +/// +/// ```ignore +/// create_db!( +/// TrubutariesDb { +/// AttemptsDb: (key_bytes: &[u8], attempt_id: u32) -> u64, +/// ExpiredDb: (genesis: [u8; 32]) -> Vec +/// } +/// ) +/// ``` +#[macro_export] +macro_rules! create_db { + ($db_name: ident { + $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty),* + }) => { + $( + #[derive(Clone, Debug)] + pub struct $field_name; + impl $field_name { + pub fn key($($arg: $arg_type),*) -> Vec { + $crate::serai_db_key( + stringify!($db_name).as_bytes(), + stringify!($field_name).as_bytes(), + ($($arg),*).encode() + ) + } + #[allow(dead_code)] + pub fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &impl serde::Serialize) { + let key = $field_name::key($($arg),*); + txn.put(&key, bincode::serialize(data).unwrap()); + } + #[allow(dead_code)] + pub fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> { + getter.get($field_name::key($($arg),*)).map(|data| { + bincode::deserialize(data.as_ref()).unwrap() + }) + } + } + )* + }; +} diff --git a/common/db/src/lib.rs b/common/db/src/lib.rs index 0bcf9810..031bcd4e 100644 --- a/common/db/src/lib.rs +++ b/common/db/src/lib.rs @@ -1,3 +1,6 @@ +mod create_db; +pub use create_db::*; + mod mem; pub use mem::*; diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index 8788cd22..9e20f57e 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -1,4 +1,3 @@ -use core::marker::PhantomData; use std::collections::HashMap; use zeroize::Zeroizing; @@ -19,7 +18,7 @@ use scale::Encode; use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair}; use messages::key_gen::*; -use crate::{Get, DbTxn, Db, networks::Network}; +use crate::{Get, DbTxn, Db, create_db, networks::Network}; #[derive(Debug)] pub struct KeyConfirmed { @@ -27,80 +26,22 @@ pub struct KeyConfirmed { pub network_keys: Vec>, } -#[derive(Clone, Debug)] -struct KeyGenDb(PhantomData, PhantomData); -impl KeyGenDb { - fn key_gen_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { - D::key(b"KEY_GEN", dst, key) +create_db!( + KeyGenDb { + ParamsDb: (key: &ValidatorSet) -> (ThresholdParams, u16), + // Not scoped to the set since that'd have latter attempts overwrite former + // A former attempt may become the finalized attempt, even if it doesn't in a timely manner + // Overwriting its commitments would be accordingly poor + CommitmentsDb: (key: &KeyGenId) -> HashMap>, + GeneratedKeysDb: (set: &ValidatorSet, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec, + KeysDb: (network_key: &[u8]) -> Vec } +); - fn params_key(set: &ValidatorSet) -> Vec { - Self::key_gen_key(b"params", set.encode()) - } - fn save_params( - txn: &mut D::Transaction<'_>, - set: &ValidatorSet, - params: &ThresholdParams, - shares: u16, - ) { - txn.put(Self::params_key(set), bincode::serialize(&(params, shares)).unwrap()); - } - fn params(getter: &G, set: &ValidatorSet) -> Option<(ThresholdParams, u16)> { - getter.get(Self::params_key(set)).map(|bytes| bincode::deserialize(&bytes).unwrap()) - } - - // Not scoped to the set since that'd have latter attempts overwrite former - // A former attempt may become the finalized attempt, even if it doesn't in a timely manner - // Overwriting its commitments would be accordingly poor - fn commitments_key(id: &KeyGenId) -> Vec { - Self::key_gen_key(b"commitments", id.encode()) - } - fn save_commitments( - txn: &mut D::Transaction<'_>, - id: &KeyGenId, - commitments: &HashMap>, - ) { - txn.put(Self::commitments_key(id), bincode::serialize(commitments).unwrap()); - } - fn commitments(getter: &G, id: &KeyGenId) -> HashMap> { - bincode::deserialize::>>( - &getter.get(Self::commitments_key(id)).unwrap(), - ) - .unwrap() - } - - fn generated_keys_key(set: ValidatorSet, key_pair: (&[u8; 32], &[u8])) -> Vec { - Self::key_gen_key(b"generated_keys", (set, key_pair).encode()) - } - fn save_keys( - txn: &mut D::Transaction<'_>, - id: &KeyGenId, - substrate_keys: &[ThresholdCore], - network_keys: &[ThresholdKeys], - ) { - let mut keys = Zeroizing::new(vec![]); - for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { - keys.extend(substrate_keys.serialize().as_slice()); - keys.extend(network_keys.serialize().as_slice()); - } - txn.put( - Self::generated_keys_key( - id.set, - ( - &substrate_keys[0].group_key().to_bytes(), - network_keys[0].group_key().to_bytes().as_ref(), - ), - ), - &keys, - ); - } - - fn keys_key(key: &::G) -> Vec { - Self::key_gen_key(b"keys", key.to_bytes()) - } +impl GeneratedKeysDb { #[allow(clippy::type_complexity)] - fn read_keys( - getter: &G, + fn read_keys( + getter: &impl Get, key: &[u8], ) -> Option<(Vec, (Vec>, Vec>))> { let keys_vec = getter.get(key)?; @@ -116,14 +57,40 @@ impl KeyGenDb { } Some((keys_vec, (substrate_keys, network_keys))) } - fn confirm_keys( - txn: &mut D::Transaction<'_>, + + fn save_keys( + txn: &mut impl DbTxn, + id: &KeyGenId, + substrate_keys: &[ThresholdCore], + network_keys: &[ThresholdKeys], + ) { + let mut keys = Zeroizing::new(vec![]); + for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { + keys.extend(substrate_keys.serialize().as_slice()); + keys.extend(network_keys.serialize().as_slice()); + } + txn.put( + Self::key( + &id.set, + &substrate_keys[0].group_key().to_bytes(), + network_keys[0].group_key().to_bytes().as_ref(), + ), + keys, + ); + } +} + +impl KeysDb { + fn confirm_keys( + txn: &mut impl DbTxn, set: ValidatorSet, key_pair: KeyPair, ) -> (Vec>, Vec>) { - let (keys_vec, keys) = - Self::read_keys(txn, &Self::generated_keys_key(set, (&key_pair.0 .0, key_pair.1.as_ref()))) - .unwrap(); + let (keys_vec, keys) = GeneratedKeysDb::read_keys::( + txn, + &GeneratedKeysDb::key(&set, &key_pair.0 .0, key_pair.1.as_ref()), + ) + .unwrap(); assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes()); assert_eq!( { @@ -132,16 +99,18 @@ impl KeyGenDb { }, keys.1[0].group_key().to_bytes().as_ref(), ); - txn.put(Self::keys_key(&keys.1[0].group_key()), keys_vec); + txn.put(KeysDb::key(keys.1[0].group_key().to_bytes().as_ref()), keys_vec); keys } + #[allow(clippy::type_complexity)] - fn keys( - getter: &G, - key: &::G, + fn keys( + getter: &impl Get, + network_key: &::G, ) -> Option<(Vec>, Vec>)> { - let res = Self::read_keys(getter, &Self::keys_key(key))?.1; - assert_eq!(&res.1[0].group_key(), key); + let res = + GeneratedKeysDb::read_keys::(getter, &Self::key(network_key.to_bytes().as_ref()))?.1; + assert_eq!(&res.1[0].group_key(), network_key); Some(res) } } @@ -168,7 +137,7 @@ impl KeyGen { pub fn in_set(&self, set: &ValidatorSet) -> bool { // We determine if we're in set using if we have the parameters for a set's key generation - KeyGenDb::::params(&self.db, set).is_some() + ParamsDb::get(&self.db, set).is_some() } #[allow(clippy::type_complexity)] @@ -177,15 +146,8 @@ impl KeyGen { key: &::G, ) -> Option<(Vec>, Vec>)> { // This is safe, despite not having a txn, since it's a static value - // The only concern is it may not be set when expected, or it may be set unexpectedly - // - // They're only expected to be set on boot, if confirmed. If they were confirmed yet the - // transaction wasn't committed, their confirmation will be re-handled - // - // The only other concern is if it's set when it's not safe to use - // The keys are only written on confirmation, and the transaction writing them is atomic to - // every associated operation - KeyGenDb::::keys(&self.db, key) + // It doesn't change over time/in relation to other operations + KeysDb::keys::(&self.db, key) } pub async fn handle( @@ -313,7 +275,7 @@ impl KeyGen { self.active_share.remove(&id.set).is_none() { // If we haven't handled this set before, save the params - KeyGenDb::::save_params(txn, &id.set, ¶ms, shares); + ParamsDb::set(txn, &id.set, &(params, shares)); } let (machines, commitments) = key_gen_machines(id, params, shares); @@ -332,7 +294,7 @@ impl KeyGen { panic!("commitments when already handled commitments"); } - let (params, share_quantity) = KeyGenDb::::params(txn, &id.set).unwrap(); + let (params, share_quantity) = ParamsDb::get(txn, &id.set).unwrap(); // Unwrap the machines, rebuilding them if we didn't have them in our cache // We won't if the processor rebooted @@ -344,7 +306,7 @@ impl KeyGen { .remove(&id.set) .unwrap_or_else(|| key_gen_machines(id, params, share_quantity)); - KeyGenDb::::save_commitments(txn, &id, &commitments); + CommitmentsDb::set(txn, &id, &commitments); let (machines, shares) = secret_share_machines(id, params, prior, commitments); self.active_share.insert(id.set, (machines, shares.clone())); @@ -355,12 +317,12 @@ impl KeyGen { CoordinatorMessage::Shares { id, shares } => { info!("Received shares for {:?}", id); - let (params, share_quantity) = KeyGenDb::::params(txn, &id.set).unwrap(); + let (params, share_quantity) = ParamsDb::get(txn, &id.set).unwrap(); // Same commentary on inconsistency as above exists let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| { let prior = key_gen_machines(id, params, share_quantity); - secret_share_machines(id, params, prior, KeyGenDb::::commitments(txn, &id)) + secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap()) }); let mut rng = share_rng(id); @@ -437,7 +399,7 @@ impl KeyGen { } } - KeyGenDb::::save_keys(txn, &id, &substrate_keys, &network_keys); + GeneratedKeysDb::save_keys::(txn, &id, &substrate_keys, &network_keys); ProcessorMessage::GeneratedKeyPair { id, @@ -454,15 +416,15 @@ impl KeyGen { set: ValidatorSet, key_pair: KeyPair, ) -> KeyConfirmed { - let (substrate_keys, network_keys) = KeyGenDb::::confirm_keys(txn, set, key_pair.clone()); - info!( "Confirmed key pair {} {} for set {:?}", hex::encode(key_pair.0), - hex::encode(key_pair.1), + hex::encode(&key_pair.1), set, ); + let (substrate_keys, network_keys) = KeysDb::confirm_keys::(txn, set, key_pair); + KeyConfirmed { substrate_keys, network_keys } } }