Use a proper transcript for Tributary scanner topics

This commit is contained in:
Luke Parker 2023-09-27 13:33:25 -04:00
parent 01a4b9e694
commit 4a32f22418
No known key found for this signature in database
3 changed files with 37 additions and 20 deletions

View file

@ -147,8 +147,6 @@ async fn handle_batch_and_burns<Pro: Processors>(
network_had_event(&mut burns, &mut batches, network); network_had_event(&mut burns, &mut batches, network);
// Make sure this is the only Batch event for this network in this Block // Make sure this is the only Batch event for this network in this Block
// TODO: Make sure Serai rejects multiple Batchs within the same block. It should, as of an
// yet to be merged branch
assert!(batch_block.insert(network, network_block).is_none()); assert!(batch_block.insert(network, network_block).is_none());
// Add the batch included by this block // Add the batch included by this block

View file

@ -17,11 +17,17 @@ pub enum Topic {
impl Topic { impl Topic {
fn as_key(&self, genesis: [u8; 32]) -> Vec<u8> { fn as_key(&self, genesis: [u8; 32]) -> Vec<u8> {
match self { let mut res = genesis.to_vec();
Topic::Dkg => [genesis.as_slice(), b"dkg".as_ref()].concat(), let (label, id) = match self {
Topic::Batch(id) => [genesis.as_slice(), b"batch".as_ref(), id.as_ref()].concat(), Topic::Dkg => (b"dkg".as_slice(), [].as_slice()),
Topic::Sign(id) => [genesis.as_slice(), b"sign".as_ref(), id.as_ref()].concat(), Topic::Batch(id) => (b"batch".as_slice(), id.as_slice()),
} Topic::Sign(id) => (b"sign".as_slice(), id.as_slice()),
};
res.push(u8::try_from(label.len()).unwrap());
res.extend(label);
res.push(u8::try_from(id.len()).unwrap());
res.extend(id);
res
} }
} }
@ -35,13 +41,12 @@ pub struct DataSpecification {
impl DataSpecification { impl DataSpecification {
fn as_key(&self, genesis: [u8; 32]) -> Vec<u8> { fn as_key(&self, genesis: [u8; 32]) -> Vec<u8> {
// TODO: Use a proper transcript here to avoid conflicts? let mut res = self.topic.as_key(genesis);
[ let label_bytes = self.label.bytes();
self.topic.as_key(genesis).as_ref(), res.push(u8::try_from(label_bytes.len()).unwrap());
self.label.as_bytes(), res.extend(label_bytes);
self.attempt.to_le_bytes().as_ref(), res.extend(self.attempt.to_le_bytes());
] res
.concat()
} }
} }

View file

@ -40,9 +40,19 @@ use crate::{
}, },
}; };
const DKG_COMMITMENTS: &str = "commitments";
const DKG_SHARES: &str = "shares";
const DKG_CONFIRMATION_NONCES: &str = "confirmation_nonces"; const DKG_CONFIRMATION_NONCES: &str = "confirmation_nonces";
const DKG_CONFIRMATION_SHARES: &str = "confirmation_shares"; const DKG_CONFIRMATION_SHARES: &str = "confirmation_shares";
// These s/b prefixes between Batch and Sign should be unnecessary, as Batch/Share entries
// themselves should already be domain separated
const BATCH_PREPROCESS: &str = "b_preprocess";
const BATCH_SHARE: &str = "b_share";
const SIGN_PREPROCESS: &str = "s_preprocess";
const SIGN_SHARE: &str = "s_share";
// Instead of maintaing state, this simply re-creates the machine(s) in-full on every call (which // Instead of maintaing state, this simply re-creates the machine(s) in-full on every call (which
// should only be once per tributary). // should only be once per tributary).
// This simplifies data flow and prevents requiring multiple paths. // This simplifies data flow and prevents requiring multiple paths.
@ -290,7 +300,7 @@ pub(crate) async fn handle_application_tx<
Transaction::DkgCommitments(attempt, bytes, signed) => { Transaction::DkgCommitments(attempt, bytes, signed) => {
match handle( match handle(
txn, txn,
&DataSpecification { topic: Topic::Dkg, label: "commitments", attempt }, &DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt },
bytes, bytes,
&signed, &signed,
) { ) {
@ -345,7 +355,7 @@ pub(crate) async fn handle_application_tx<
); );
match handle( match handle(
txn, txn,
&DataSpecification { topic: Topic::Dkg, label: "shares", attempt }, &DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt },
bytes, bytes,
&signed, &signed,
) { ) {
@ -436,7 +446,7 @@ pub(crate) async fn handle_application_tx<
txn, txn,
&DataSpecification { &DataSpecification {
topic: Topic::Batch(data.plan), topic: Topic::Batch(data.plan),
label: "preprocess", label: BATCH_PREPROCESS,
attempt: data.attempt, attempt: data.attempt,
}, },
data.data, data.data,
@ -463,7 +473,7 @@ pub(crate) async fn handle_application_tx<
txn, txn,
&DataSpecification { &DataSpecification {
topic: Topic::Batch(data.plan), topic: Topic::Batch(data.plan),
label: "share", label: BATCH_SHARE,
attempt: data.attempt, attempt: data.attempt,
}, },
data.data, data.data,
@ -494,7 +504,7 @@ pub(crate) async fn handle_application_tx<
txn, txn,
&DataSpecification { &DataSpecification {
topic: Topic::Sign(data.plan), topic: Topic::Sign(data.plan),
label: "preprocess", label: SIGN_PREPROCESS,
attempt: data.attempt, attempt: data.attempt,
}, },
data.data, data.data,
@ -527,7 +537,11 @@ pub(crate) async fn handle_application_tx<
let key_pair = TributaryDb::<D>::key_pair(txn, spec.set()); let key_pair = TributaryDb::<D>::key_pair(txn, spec.set());
match handle( match handle(
txn, txn,
&DataSpecification { topic: Topic::Sign(data.plan), label: "share", attempt: data.attempt }, &DataSpecification {
topic: Topic::Sign(data.plan),
label: SIGN_SHARE,
attempt: data.attempt,
},
data.data, data.data,
&data.signed, &data.signed,
) { ) {