diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index 9ee96e52..5508cbe1 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -147,8 +147,6 @@ async fn handle_batch_and_burns( network_had_event(&mut burns, &mut batches, network); // Make sure this is the only Batch event for this network in this Block - // TODO: Make sure Serai rejects multiple Batchs within the same block. It should, as of an - // yet to be merged branch assert!(batch_block.insert(network, network_block).is_none()); // Add the batch included by this block diff --git a/coordinator/src/tributary/db.rs b/coordinator/src/tributary/db.rs index acb6e842..9a89139b 100644 --- a/coordinator/src/tributary/db.rs +++ b/coordinator/src/tributary/db.rs @@ -17,11 +17,17 @@ pub enum Topic { impl Topic { fn as_key(&self, genesis: [u8; 32]) -> Vec { - match self { - Topic::Dkg => [genesis.as_slice(), b"dkg".as_ref()].concat(), - Topic::Batch(id) => [genesis.as_slice(), b"batch".as_ref(), id.as_ref()].concat(), - Topic::Sign(id) => [genesis.as_slice(), b"sign".as_ref(), id.as_ref()].concat(), - } + let mut res = genesis.to_vec(); + let (label, id) = match self { + Topic::Dkg => (b"dkg".as_slice(), [].as_slice()), + Topic::Batch(id) => (b"batch".as_slice(), id.as_slice()), + Topic::Sign(id) => (b"sign".as_slice(), id.as_slice()), + }; + res.push(u8::try_from(label.len()).unwrap()); + res.extend(label); + res.push(u8::try_from(id.len()).unwrap()); + res.extend(id); + res } } @@ -35,13 +41,12 @@ pub struct DataSpecification { impl DataSpecification { fn as_key(&self, genesis: [u8; 32]) -> Vec { - // TODO: Use a proper transcript here to avoid conflicts? - [ - self.topic.as_key(genesis).as_ref(), - self.label.as_bytes(), - self.attempt.to_le_bytes().as_ref(), - ] - .concat() + let mut res = self.topic.as_key(genesis); + let label_bytes = self.label.bytes(); + res.push(u8::try_from(label_bytes.len()).unwrap()); + res.extend(label_bytes); + res.extend(self.attempt.to_le_bytes()); + res } } diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs index 257f6932..a9cc23dc 100644 --- a/coordinator/src/tributary/handle.rs +++ b/coordinator/src/tributary/handle.rs @@ -40,9 +40,19 @@ use crate::{ }, }; +const DKG_COMMITMENTS: &str = "commitments"; +const DKG_SHARES: &str = "shares"; const DKG_CONFIRMATION_NONCES: &str = "confirmation_nonces"; const DKG_CONFIRMATION_SHARES: &str = "confirmation_shares"; +// These s/b prefixes between Batch and Sign should be unnecessary, as Batch/Share entries +// themselves should already be domain separated +const BATCH_PREPROCESS: &str = "b_preprocess"; +const BATCH_SHARE: &str = "b_share"; + +const SIGN_PREPROCESS: &str = "s_preprocess"; +const SIGN_SHARE: &str = "s_share"; + // Instead of maintaing state, this simply re-creates the machine(s) in-full on every call (which // should only be once per tributary). // This simplifies data flow and prevents requiring multiple paths. @@ -290,7 +300,7 @@ pub(crate) async fn handle_application_tx< Transaction::DkgCommitments(attempt, bytes, signed) => { match handle( txn, - &DataSpecification { topic: Topic::Dkg, label: "commitments", attempt }, + &DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt }, bytes, &signed, ) { @@ -345,7 +355,7 @@ pub(crate) async fn handle_application_tx< ); match handle( txn, - &DataSpecification { topic: Topic::Dkg, label: "shares", attempt }, + &DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt }, bytes, &signed, ) { @@ -436,7 +446,7 @@ pub(crate) async fn handle_application_tx< txn, &DataSpecification { topic: Topic::Batch(data.plan), - label: "preprocess", + label: BATCH_PREPROCESS, attempt: data.attempt, }, data.data, @@ -463,7 +473,7 @@ pub(crate) async fn handle_application_tx< txn, &DataSpecification { topic: Topic::Batch(data.plan), - label: "share", + label: BATCH_SHARE, attempt: data.attempt, }, data.data, @@ -494,7 +504,7 @@ pub(crate) async fn handle_application_tx< txn, &DataSpecification { topic: Topic::Sign(data.plan), - label: "preprocess", + label: SIGN_PREPROCESS, attempt: data.attempt, }, data.data, @@ -527,7 +537,11 @@ pub(crate) async fn handle_application_tx< let key_pair = TributaryDb::::key_pair(txn, spec.set()); match handle( txn, - &DataSpecification { topic: Topic::Sign(data.plan), label: "share", attempt: data.attempt }, + &DataSpecification { + topic: Topic::Sign(data.plan), + label: SIGN_SHARE, + attempt: data.attempt, + }, data.data, &data.signed, ) {