mirror of
https://github.com/serai-dex/serai.git
synced 2025-01-03 17:40:34 +00:00
Add dedicated BatchSignId
This commit is contained in:
parent
96f94966b7
commit
c03fb6c71b
17 changed files with 112 additions and 116 deletions
|
@ -94,7 +94,7 @@ impl SignableTransaction {
|
||||||
// the value is fixed size (so any value could be used here)
|
// the value is fixed size (so any value could be used here)
|
||||||
tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.script_pubkey() });
|
tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.script_pubkey() });
|
||||||
}
|
}
|
||||||
u64::try_from(tx.weight()).unwrap()
|
u64::from(tx.weight())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the fee necessary for this transaction to achieve the fee rate specified at
|
/// Returns the fee necessary for this transaction to achieve the fee rate specified at
|
||||||
|
|
|
@ -106,14 +106,14 @@ impl<D: Db> MainDb<D> {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn first_preprocess_key(network: NetworkId, id_type: RecognizedIdType, id: [u8; 32]) -> Vec<u8> {
|
fn first_preprocess_key(network: NetworkId, id_type: RecognizedIdType, id: &[u8]) -> Vec<u8> {
|
||||||
Self::main_key(b"first_preprocess", (network, id_type, id).encode())
|
Self::main_key(b"first_preprocess", (network, id_type, id).encode())
|
||||||
}
|
}
|
||||||
pub fn save_first_preprocess(
|
pub fn save_first_preprocess(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
id_type: RecognizedIdType,
|
id_type: RecognizedIdType,
|
||||||
id: [u8; 32],
|
id: &[u8],
|
||||||
preprocess: Vec<Vec<u8>>,
|
preprocess: Vec<Vec<u8>>,
|
||||||
) {
|
) {
|
||||||
let preprocess = preprocess.encode();
|
let preprocess = preprocess.encode();
|
||||||
|
@ -128,7 +128,7 @@ impl<D: Db> MainDb<D> {
|
||||||
getter: &G,
|
getter: &G,
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
id_type: RecognizedIdType,
|
id_type: RecognizedIdType,
|
||||||
id: [u8; 32],
|
id: &[u8],
|
||||||
) -> Option<Vec<Vec<u8>>> {
|
) -> Option<Vec<Vec<u8>>> {
|
||||||
getter
|
getter
|
||||||
.get(Self::first_preprocess_key(network, id_type, id))
|
.get(Self::first_preprocess_key(network, id_type, id))
|
||||||
|
|
|
@ -488,7 +488,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
network,
|
network,
|
||||||
RecognizedIdType::Plan,
|
RecognizedIdType::Plan,
|
||||||
id.id,
|
&id.id,
|
||||||
preprocesses,
|
preprocesses,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -547,7 +547,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
spec.set().network,
|
spec.set().network,
|
||||||
RecognizedIdType::Batch,
|
RecognizedIdType::Batch,
|
||||||
id.id,
|
&id.id,
|
||||||
preprocesses,
|
preprocesses,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -877,7 +877,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
move |set: ValidatorSet, genesis, id_type, id, nonce| {
|
move |set: ValidatorSet, genesis, id_type, id: Vec<u8>, nonce| {
|
||||||
let mut raw_db = raw_db.clone();
|
let mut raw_db = raw_db.clone();
|
||||||
let key = key.clone();
|
let key = key.clone();
|
||||||
let tributaries = tributaries.clone();
|
let tributaries = tributaries.clone();
|
||||||
|
@ -899,16 +899,16 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||||
|
|
||||||
let mut tx = match id_type {
|
let mut tx = match id_type {
|
||||||
RecognizedIdType::Batch => Transaction::BatchPreprocess(SignData {
|
RecognizedIdType::Batch => Transaction::BatchPreprocess(SignData {
|
||||||
plan: id,
|
data: get_preprocess(&raw_db, id_type, &id).await,
|
||||||
|
plan: id.try_into().unwrap(),
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
data: get_preprocess(&raw_db, id_type, id).await,
|
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
}),
|
}),
|
||||||
|
|
||||||
RecognizedIdType::Plan => Transaction::SignPreprocess(SignData {
|
RecognizedIdType::Plan => Transaction::SignPreprocess(SignData {
|
||||||
plan: id,
|
data: get_preprocess(&raw_db, id_type, &id).await,
|
||||||
|
plan: id.try_into().unwrap(),
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
data: get_preprocess(&raw_db, id_type, id).await,
|
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
|
|
|
@ -28,8 +28,8 @@ fn random_vec<R: RngCore>(rng: &mut R, limit: usize) -> Vec<u8> {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn random_sign_data<R: RngCore>(rng: &mut R) -> SignData {
|
fn random_sign_data<R: RngCore, const N: usize>(rng: &mut R) -> SignData<N> {
|
||||||
let mut plan = [0; 32];
|
let mut plan = [0; N];
|
||||||
rng.fill_bytes(&mut plan);
|
rng.fill_bytes(&mut plan);
|
||||||
|
|
||||||
SignData {
|
SignData {
|
||||||
|
@ -80,7 +80,10 @@ fn tx_size_limit() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn serialize_sign_data() {
|
fn serialize_sign_data() {
|
||||||
test_read_write(random_sign_data(&mut OsRng));
|
test_read_write(random_sign_data::<_, 3>(&mut OsRng));
|
||||||
|
test_read_write(random_sign_data::<_, 8>(&mut OsRng));
|
||||||
|
test_read_write(random_sign_data::<_, 16>(&mut OsRng));
|
||||||
|
test_read_write(random_sign_data::<_, 24>(&mut OsRng));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -143,7 +146,7 @@ fn serialize_transaction() {
|
||||||
{
|
{
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
OsRng.fill_bytes(&mut block);
|
OsRng.fill_bytes(&mut block);
|
||||||
let mut batch = [0; 32];
|
let mut batch = [0; 5];
|
||||||
OsRng.fill_bytes(&mut batch);
|
OsRng.fill_bytes(&mut batch);
|
||||||
test_read_write(Transaction::Batch(block, batch));
|
test_read_write(Transaction::Batch(block, batch));
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ use crate::tributary::TributarySpec;
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
pub enum Topic {
|
pub enum Topic {
|
||||||
Dkg,
|
Dkg,
|
||||||
Batch([u8; 32]),
|
Batch([u8; 5]),
|
||||||
Sign([u8; 32]),
|
Sign([u8; 32]),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ use tributary::Signed;
|
||||||
|
|
||||||
use processor_messages::{
|
use processor_messages::{
|
||||||
key_gen::{self, KeyGenId},
|
key_gen::{self, KeyGenId},
|
||||||
coordinator,
|
coordinator::{self, BatchSignId},
|
||||||
sign::{self, SignId},
|
sign::{self, SignId},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -370,7 +370,7 @@ pub(crate) async fn handle_application_tx<
|
||||||
// Because this Batch has achieved synchrony, its batch ID should be authorized
|
// Because this Batch has achieved synchrony, its batch ID should be authorized
|
||||||
TributaryDb::<D>::recognize_topic(txn, genesis, Topic::Batch(batch));
|
TributaryDb::<D>::recognize_topic(txn, genesis, Topic::Batch(batch));
|
||||||
let nonce = NonceDecider::<D>::handle_batch(txn, genesis, batch);
|
let nonce = NonceDecider::<D>::handle_batch(txn, genesis, batch);
|
||||||
recognized_id(spec.set(), genesis, RecognizedIdType::Batch, batch, nonce).await;
|
recognized_id(spec.set(), genesis, RecognizedIdType::Batch, batch.to_vec(), nonce).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateBlock(block) => {
|
Transaction::SubstrateBlock(block) => {
|
||||||
|
@ -382,7 +382,7 @@ pub(crate) async fn handle_application_tx<
|
||||||
let nonces = NonceDecider::<D>::handle_substrate_block(txn, genesis, &plan_ids);
|
let nonces = NonceDecider::<D>::handle_substrate_block(txn, genesis, &plan_ids);
|
||||||
for (nonce, id) in nonces.into_iter().zip(plan_ids.into_iter()) {
|
for (nonce, id) in nonces.into_iter().zip(plan_ids.into_iter()) {
|
||||||
TributaryDb::<D>::recognize_topic(txn, genesis, Topic::Sign(id));
|
TributaryDb::<D>::recognize_topic(txn, genesis, Topic::Sign(id));
|
||||||
recognized_id(spec.set(), genesis, RecognizedIdType::Plan, id, nonce).await;
|
recognized_id(spec.set(), genesis, RecognizedIdType::Plan, id.to_vec(), nonce).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -403,12 +403,12 @@ pub(crate) async fn handle_application_tx<
|
||||||
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
||||||
unflatten(spec, &mut preprocesses);
|
unflatten(spec, &mut preprocesses);
|
||||||
NonceDecider::<D>::selected_for_signing_batch(txn, genesis, data.plan);
|
NonceDecider::<D>::selected_for_signing_batch(txn, genesis, data.plan);
|
||||||
let key = TributaryDb::<D>::key_pair(txn, spec.set()).unwrap().0 .0.to_vec();
|
let key = TributaryDb::<D>::key_pair(txn, spec.set()).unwrap().0 .0;
|
||||||
processors
|
processors
|
||||||
.send(
|
.send(
|
||||||
spec.set().network,
|
spec.set().network,
|
||||||
coordinator::CoordinatorMessage::BatchPreprocesses {
|
coordinator::CoordinatorMessage::BatchPreprocesses {
|
||||||
id: SignId { key, id: data.plan, attempt: data.attempt },
|
id: BatchSignId { key, id: data.plan, attempt: data.attempt },
|
||||||
preprocesses,
|
preprocesses,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -434,12 +434,12 @@ pub(crate) async fn handle_application_tx<
|
||||||
) {
|
) {
|
||||||
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
||||||
unflatten(spec, &mut shares);
|
unflatten(spec, &mut shares);
|
||||||
let key = TributaryDb::<D>::key_pair(txn, spec.set()).unwrap().0 .0.to_vec();
|
let key = TributaryDb::<D>::key_pair(txn, spec.set()).unwrap().0 .0;
|
||||||
processors
|
processors
|
||||||
.send(
|
.send(
|
||||||
spec.set().network,
|
spec.set().network,
|
||||||
coordinator::CoordinatorMessage::BatchShares {
|
coordinator::CoordinatorMessage::BatchShares {
|
||||||
id: SignId { key, id: data.plan, attempt: data.attempt },
|
id: BatchSignId { key, id: data.plan, attempt: data.attempt },
|
||||||
shares: shares
|
shares: shares
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(validator, share)| (validator, share.try_into().unwrap()))
|
.map(|(validator, share)| (validator, share.try_into().unwrap()))
|
||||||
|
|
|
@ -167,8 +167,8 @@ impl TributarySpec {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
pub struct SignData {
|
pub struct SignData<const N: usize> {
|
||||||
pub plan: [u8; 32],
|
pub plan: [u8; N],
|
||||||
pub attempt: u32,
|
pub attempt: u32,
|
||||||
|
|
||||||
pub data: Vec<Vec<u8>>,
|
pub data: Vec<Vec<u8>>,
|
||||||
|
@ -176,9 +176,9 @@ pub struct SignData {
|
||||||
pub signed: Signed,
|
pub signed: Signed,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReadWrite for SignData {
|
impl<const N: usize> ReadWrite for SignData<N> {
|
||||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
let mut plan = [0; 32];
|
let mut plan = [0; N];
|
||||||
reader.read_exact(&mut plan)?;
|
reader.read_exact(&mut plan)?;
|
||||||
|
|
||||||
let mut attempt = [0; 4];
|
let mut attempt = [0; 4];
|
||||||
|
@ -249,16 +249,16 @@ pub enum Transaction {
|
||||||
// which would be binding over the block hash and automatically achieve synchrony on all
|
// which would be binding over the block hash and automatically achieve synchrony on all
|
||||||
// relevant batches. ExternalBlock was removed for this due to complexity around the pipeline
|
// relevant batches. ExternalBlock was removed for this due to complexity around the pipeline
|
||||||
// with the current processor, yet it would still be an improvement.
|
// with the current processor, yet it would still be an improvement.
|
||||||
Batch([u8; 32], [u8; 32]),
|
Batch([u8; 32], [u8; 5]),
|
||||||
// When a Serai block is finalized, with the contained batches, we can allow the associated plan
|
// When a Serai block is finalized, with the contained batches, we can allow the associated plan
|
||||||
// IDs
|
// IDs
|
||||||
SubstrateBlock(u64),
|
SubstrateBlock(u64),
|
||||||
|
|
||||||
BatchPreprocess(SignData),
|
BatchPreprocess(SignData<5>),
|
||||||
BatchShare(SignData),
|
BatchShare(SignData<5>),
|
||||||
|
|
||||||
SignPreprocess(SignData),
|
SignPreprocess(SignData<32>),
|
||||||
SignShare(SignData),
|
SignShare(SignData<32>),
|
||||||
// This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst
|
// This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst
|
||||||
// reporters (who should all report the same thing)
|
// reporters (who should all report the same thing)
|
||||||
// We do still track the signer in order to prevent a single signer from publishing arbitrarily
|
// We do still track the signer in order to prevent a single signer from publishing arbitrarily
|
||||||
|
@ -367,7 +367,7 @@ impl ReadWrite for Transaction {
|
||||||
3 => {
|
3 => {
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
let mut batch = [0; 32];
|
let mut batch = [0; 5];
|
||||||
reader.read_exact(&mut batch)?;
|
reader.read_exact(&mut batch)?;
|
||||||
Ok(Transaction::Batch(block, batch))
|
Ok(Transaction::Batch(block, batch))
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,31 +27,25 @@ impl<D: Db> NonceDecider<D> {
|
||||||
next
|
next
|
||||||
}
|
}
|
||||||
|
|
||||||
fn item_nonce_key(genesis: [u8; 32], code: u8, id: [u8; 32]) -> Vec<u8> {
|
fn item_nonce_key(genesis: [u8; 32], code: u8, id: &[u8]) -> Vec<u8> {
|
||||||
D::key(
|
D::key(
|
||||||
b"coordinator_tributary_nonce",
|
b"coordinator_tributary_nonce",
|
||||||
b"item",
|
b"item",
|
||||||
[genesis.as_slice(), [code].as_ref(), id.as_ref()].concat(),
|
[genesis.as_slice(), [code].as_ref(), id].concat(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
fn set_nonce(
|
fn set_nonce(txn: &mut D::Transaction<'_>, genesis: [u8; 32], code: u8, id: &[u8], nonce: u32) {
|
||||||
txn: &mut D::Transaction<'_>,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
code: u8,
|
|
||||||
id: [u8; 32],
|
|
||||||
nonce: u32,
|
|
||||||
) {
|
|
||||||
txn.put(Self::item_nonce_key(genesis, code, id), nonce.to_le_bytes())
|
txn.put(Self::item_nonce_key(genesis, code, id), nonce.to_le_bytes())
|
||||||
}
|
}
|
||||||
fn db_nonce<G: Get>(getter: &G, genesis: [u8; 32], code: u8, id: [u8; 32]) -> Option<u32> {
|
fn db_nonce<G: Get>(getter: &G, genesis: [u8; 32], code: u8, id: &[u8]) -> Option<u32> {
|
||||||
getter
|
getter
|
||||||
.get(Self::item_nonce_key(genesis, code, id))
|
.get(Self::item_nonce_key(genesis, code, id))
|
||||||
.map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap()))
|
.map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_batch(txn: &mut D::Transaction<'_>, genesis: [u8; 32], batch: [u8; 32]) -> u32 {
|
pub fn handle_batch(txn: &mut D::Transaction<'_>, genesis: [u8; 32], batch: [u8; 5]) -> u32 {
|
||||||
let nonce_for = Self::allocate_nonce(txn, genesis);
|
let nonce_for = Self::allocate_nonce(txn, genesis);
|
||||||
Self::set_nonce(txn, genesis, BATCH_CODE, batch, nonce_for);
|
Self::set_nonce(txn, genesis, BATCH_CODE, &batch, nonce_for);
|
||||||
nonce_for
|
nonce_for
|
||||||
}
|
}
|
||||||
// TODO: The processor won't yield shares for this if the signing protocol aborts. We need to
|
// TODO: The processor won't yield shares for this if the signing protocol aborts. We need to
|
||||||
|
@ -60,10 +54,10 @@ impl<D: Db> NonceDecider<D> {
|
||||||
pub fn selected_for_signing_batch(
|
pub fn selected_for_signing_batch(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
batch: [u8; 32],
|
batch: [u8; 5],
|
||||||
) {
|
) {
|
||||||
let nonce_for = Self::allocate_nonce(txn, genesis);
|
let nonce_for = Self::allocate_nonce(txn, genesis);
|
||||||
Self::set_nonce(txn, genesis, BATCH_SIGNING_CODE, batch, nonce_for);
|
Self::set_nonce(txn, genesis, BATCH_SIGNING_CODE, &batch, nonce_for);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_substrate_block(
|
pub fn handle_substrate_block(
|
||||||
|
@ -74,7 +68,7 @@ impl<D: Db> NonceDecider<D> {
|
||||||
let mut res = Vec::with_capacity(plans.len());
|
let mut res = Vec::with_capacity(plans.len());
|
||||||
for plan in plans {
|
for plan in plans {
|
||||||
let nonce_for = Self::allocate_nonce(txn, genesis);
|
let nonce_for = Self::allocate_nonce(txn, genesis);
|
||||||
Self::set_nonce(txn, genesis, PLAN_CODE, *plan, nonce_for);
|
Self::set_nonce(txn, genesis, PLAN_CODE, plan, nonce_for);
|
||||||
res.push(nonce_for);
|
res.push(nonce_for);
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
|
@ -86,7 +80,7 @@ impl<D: Db> NonceDecider<D> {
|
||||||
plan: [u8; 32],
|
plan: [u8; 32],
|
||||||
) {
|
) {
|
||||||
let nonce_for = Self::allocate_nonce(txn, genesis);
|
let nonce_for = Self::allocate_nonce(txn, genesis);
|
||||||
Self::set_nonce(txn, genesis, PLAN_SIGNING_CODE, plan, nonce_for);
|
Self::set_nonce(txn, genesis, PLAN_SIGNING_CODE, &plan, nonce_for);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn nonce<G: Get>(getter: &G, genesis: [u8; 32], tx: &Transaction) -> Option<Option<u32>> {
|
pub fn nonce<G: Get>(getter: &G, genesis: [u8; 32], tx: &Transaction) -> Option<Option<u32>> {
|
||||||
|
@ -109,20 +103,20 @@ impl<D: Db> NonceDecider<D> {
|
||||||
|
|
||||||
Transaction::BatchPreprocess(data) => {
|
Transaction::BatchPreprocess(data) => {
|
||||||
assert_eq!(data.attempt, 0);
|
assert_eq!(data.attempt, 0);
|
||||||
Some(Self::db_nonce(getter, genesis, BATCH_CODE, data.plan))
|
Some(Self::db_nonce(getter, genesis, BATCH_CODE, &data.plan))
|
||||||
}
|
}
|
||||||
Transaction::BatchShare(data) => {
|
Transaction::BatchShare(data) => {
|
||||||
assert_eq!(data.attempt, 0);
|
assert_eq!(data.attempt, 0);
|
||||||
Some(Self::db_nonce(getter, genesis, BATCH_SIGNING_CODE, data.plan))
|
Some(Self::db_nonce(getter, genesis, BATCH_SIGNING_CODE, &data.plan))
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SignPreprocess(data) => {
|
Transaction::SignPreprocess(data) => {
|
||||||
assert_eq!(data.attempt, 0);
|
assert_eq!(data.attempt, 0);
|
||||||
Some(Self::db_nonce(getter, genesis, PLAN_CODE, data.plan))
|
Some(Self::db_nonce(getter, genesis, PLAN_CODE, &data.plan))
|
||||||
}
|
}
|
||||||
Transaction::SignShare(data) => {
|
Transaction::SignShare(data) => {
|
||||||
assert_eq!(data.attempt, 0);
|
assert_eq!(data.attempt, 0);
|
||||||
Some(Self::db_nonce(getter, genesis, PLAN_SIGNING_CODE, data.plan))
|
Some(Self::db_nonce(getter, genesis, PLAN_SIGNING_CODE, &data.plan))
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SignCompleted { .. } => None,
|
Transaction::SignCompleted { .. } => None,
|
||||||
|
|
|
@ -35,10 +35,10 @@ pub enum RecognizedIdType {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) trait RIDTrait<FRid>:
|
pub(crate) trait RIDTrait<FRid>:
|
||||||
Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, [u8; 32], u32) -> FRid
|
Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>, u32) -> FRid
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
impl<FRid, F: Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, [u8; 32], u32) -> FRid>
|
impl<FRid, F: Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>, u32) -> FRid>
|
||||||
RIDTrait<FRid> for F
|
RIDTrait<FRid> for F
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,15 +106,22 @@ pub mod sign {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod coordinator {
|
pub mod coordinator {
|
||||||
use super::{sign::SignId, *};
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Hash, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
|
||||||
|
pub struct BatchSignId {
|
||||||
|
pub key: [u8; 32],
|
||||||
|
pub id: [u8; 5],
|
||||||
|
pub attempt: u32,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
||||||
pub enum CoordinatorMessage {
|
pub enum CoordinatorMessage {
|
||||||
// Uses Vec<u8> instead of [u8; 64] since serde Deserialize isn't implemented for [u8; 64]
|
// Uses Vec<u8> instead of [u8; 64] since serde Deserialize isn't implemented for [u8; 64]
|
||||||
BatchPreprocesses { id: SignId, preprocesses: HashMap<Participant, Vec<u8>> },
|
BatchPreprocesses { id: BatchSignId, preprocesses: HashMap<Participant, Vec<u8>> },
|
||||||
BatchShares { id: SignId, shares: HashMap<Participant, [u8; 32]> },
|
BatchShares { id: BatchSignId, shares: HashMap<Participant, [u8; 32]> },
|
||||||
// Re-attempt a batch signing protocol.
|
// Re-attempt a batch signing protocol.
|
||||||
BatchReattempt { id: SignId },
|
BatchReattempt { id: BatchSignId },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CoordinatorMessage {
|
impl CoordinatorMessage {
|
||||||
|
@ -148,8 +155,8 @@ pub mod coordinator {
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
|
||||||
pub enum ProcessorMessage {
|
pub enum ProcessorMessage {
|
||||||
SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<PlanMeta> },
|
SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<PlanMeta> },
|
||||||
BatchPreprocess { id: SignId, block: BlockHash, preprocesses: Vec<Vec<u8>> },
|
BatchPreprocess { id: BatchSignId, block: BlockHash, preprocesses: Vec<Vec<u8>> },
|
||||||
BatchShare { id: SignId, shares: Vec<[u8; 32]> },
|
BatchShare { id: BatchSignId, shares: Vec<[u8; 32]> },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -359,7 +366,7 @@ impl ProcessorMessage {
|
||||||
coordinator::ProcessorMessage::SubstrateBlockAck { network, block, .. } => {
|
coordinator::ProcessorMessage::SubstrateBlockAck { network, block, .. } => {
|
||||||
(0, (network, block).encode())
|
(0, (network, block).encode())
|
||||||
}
|
}
|
||||||
// Unique since SignId
|
// Unique since BatchSignId
|
||||||
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (1, id.encode()),
|
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (1, id.encode()),
|
||||||
coordinator::ProcessorMessage::BatchShare { id, .. } => (2, id.encode()),
|
coordinator::ProcessorMessage::BatchShare { id, .. } => (2, id.encode()),
|
||||||
};
|
};
|
||||||
|
|
|
@ -226,7 +226,7 @@ impl Monero {
|
||||||
// This isn't entirely accurate as Bulletproof TXs will have a higher weight than their
|
// This isn't entirely accurate as Bulletproof TXs will have a higher weight than their
|
||||||
// serialization length
|
// serialization length
|
||||||
// It's likely 'good enough'
|
// It's likely 'good enough'
|
||||||
// TODO: Improve
|
// TODO2: Improve
|
||||||
fees.push(tx.rct_signatures.base.fee / u64::try_from(tx.serialize().len()).unwrap());
|
fees.push(tx.rct_signatures.base.fee / u64::try_from(tx.serialize().len()).unwrap());
|
||||||
}
|
}
|
||||||
fees.sort();
|
fees.sort();
|
||||||
|
|
|
@ -3,7 +3,6 @@ use std::collections::{VecDeque, HashMap};
|
||||||
|
|
||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
|
||||||
use ciphersuite::group::GroupEncoding;
|
use ciphersuite::group::GroupEncoding;
|
||||||
use frost::{
|
use frost::{
|
||||||
curve::Ristretto,
|
curve::Ristretto,
|
||||||
|
@ -24,20 +23,12 @@ use serai_client::{
|
||||||
in_instructions::primitives::{Batch, SignedBatch, batch_message},
|
in_instructions::primitives::{Batch, SignedBatch, batch_message},
|
||||||
};
|
};
|
||||||
|
|
||||||
use messages::{sign::SignId, coordinator::*};
|
use messages::coordinator::*;
|
||||||
use crate::{Get, DbTxn, Db};
|
use crate::{Get, DbTxn, Db};
|
||||||
|
|
||||||
// Generate an ID unique to a Batch
|
// Generate an ID unique to a Batch
|
||||||
// TODO: Fork SignId to BatchSignId in order to just use the 5-byte encoding, not the hash of the
|
fn batch_sign_id(network: NetworkId, id: u32) -> [u8; 5] {
|
||||||
// 5-byte encoding
|
(network, id).encode().try_into().unwrap()
|
||||||
fn sign_id(network: NetworkId, id: u32) -> [u8; 32] {
|
|
||||||
let mut transcript = RecommendedTranscript::new(b"Serai Processor Batch Sign ID");
|
|
||||||
transcript.append_message(b"network", network.encode());
|
|
||||||
transcript.append_message(b"id", id.to_le_bytes());
|
|
||||||
|
|
||||||
let mut res = [0; 32];
|
|
||||||
res.copy_from_slice(&transcript.challenge(b"id")[.. 32]);
|
|
||||||
res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -53,23 +44,23 @@ impl<D: Db> SubstrateSignerDb<D> {
|
||||||
D::key(b"SUBSTRATE_SIGNER", dst, key)
|
D::key(b"SUBSTRATE_SIGNER", dst, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn completed_key(id: [u8; 32]) -> Vec<u8> {
|
fn completed_key(id: [u8; 5]) -> Vec<u8> {
|
||||||
Self::sign_key(b"completed", id)
|
Self::sign_key(b"completed", id)
|
||||||
}
|
}
|
||||||
fn complete(txn: &mut D::Transaction<'_>, id: [u8; 32]) {
|
fn complete(txn: &mut D::Transaction<'_>, id: [u8; 5]) {
|
||||||
txn.put(Self::completed_key(id), []);
|
txn.put(Self::completed_key(id), []);
|
||||||
}
|
}
|
||||||
fn completed<G: Get>(getter: &G, id: [u8; 32]) -> bool {
|
fn completed<G: Get>(getter: &G, id: [u8; 5]) -> bool {
|
||||||
getter.get(Self::completed_key(id)).is_some()
|
getter.get(Self::completed_key(id)).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn attempt_key(id: &SignId) -> Vec<u8> {
|
fn attempt_key(id: &BatchSignId) -> Vec<u8> {
|
||||||
Self::sign_key(b"attempt", id.encode())
|
Self::sign_key(b"attempt", id.encode())
|
||||||
}
|
}
|
||||||
fn attempt(txn: &mut D::Transaction<'_>, id: &SignId) {
|
fn attempt(txn: &mut D::Transaction<'_>, id: &BatchSignId) {
|
||||||
txn.put(Self::attempt_key(id), []);
|
txn.put(Self::attempt_key(id), []);
|
||||||
}
|
}
|
||||||
fn has_attempt<G: Get>(getter: &G, id: &SignId) -> bool {
|
fn has_attempt<G: Get>(getter: &G, id: &BatchSignId) -> bool {
|
||||||
getter.get(Self::attempt_key(id)).is_some()
|
getter.get(Self::attempt_key(id)).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,14 +80,14 @@ pub struct SubstrateSigner<D: Db> {
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
keys: Vec<ThresholdKeys<Ristretto>>,
|
keys: Vec<ThresholdKeys<Ristretto>>,
|
||||||
|
|
||||||
signable: HashMap<[u8; 32], Batch>,
|
signable: HashMap<[u8; 5], Batch>,
|
||||||
attempt: HashMap<[u8; 32], u32>,
|
attempt: HashMap<[u8; 5], u32>,
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
preprocessing:
|
preprocessing:
|
||||||
HashMap<[u8; 32], (Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,
|
HashMap<[u8; 5], (Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
signing:
|
signing:
|
||||||
HashMap<[u8; 32], (AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,
|
HashMap<[u8; 5], (AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,
|
||||||
|
|
||||||
pub events: VecDeque<SubstrateSignerEvent>,
|
pub events: VecDeque<SubstrateSignerEvent>,
|
||||||
}
|
}
|
||||||
|
@ -129,7 +120,7 @@ impl<D: Db> SubstrateSigner<D> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_id(&self, id: &SignId) -> Result<(), ()> {
|
fn verify_id(&self, id: &BatchSignId) -> Result<(), ()> {
|
||||||
// Check the attempt lines up
|
// Check the attempt lines up
|
||||||
match self.attempt.get(&id.id) {
|
match self.attempt.get(&id.id) {
|
||||||
// If we don't have an attempt logged, it's because the coordinator is faulty OR because we
|
// If we don't have an attempt logged, it's because the coordinator is faulty OR because we
|
||||||
|
@ -155,7 +146,7 @@ impl<D: Db> SubstrateSigner<D> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn attempt(&mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], attempt: u32) {
|
async fn attempt(&mut self, txn: &mut D::Transaction<'_>, id: [u8; 5], attempt: u32) {
|
||||||
// See above commentary for why this doesn't emit SignedBatch
|
// See above commentary for why this doesn't emit SignedBatch
|
||||||
if SubstrateSignerDb::<D>::completed(txn, id) {
|
if SubstrateSignerDb::<D>::completed(txn, id) {
|
||||||
return;
|
return;
|
||||||
|
@ -189,7 +180,7 @@ impl<D: Db> SubstrateSigner<D> {
|
||||||
// Update the attempt number
|
// Update the attempt number
|
||||||
self.attempt.insert(id, attempt);
|
self.attempt.insert(id, attempt);
|
||||||
|
|
||||||
let id = SignId { key: self.keys[0].group_key().to_bytes().to_vec(), id, attempt };
|
let id = BatchSignId { key: self.keys[0].group_key().to_bytes(), id, attempt };
|
||||||
info!("signing batch {} #{}", hex::encode(id.id), id.attempt);
|
info!("signing batch {} #{}", hex::encode(id.id), id.attempt);
|
||||||
|
|
||||||
// If we reboot mid-sign, the current design has us abort all signs and wait for latter
|
// If we reboot mid-sign, the current design has us abort all signs and wait for latter
|
||||||
|
@ -241,7 +232,7 @@ impl<D: Db> SubstrateSigner<D> {
|
||||||
|
|
||||||
pub async fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) {
|
pub async fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) {
|
||||||
debug_assert_eq!(self.network, batch.network);
|
debug_assert_eq!(self.network, batch.network);
|
||||||
let id = sign_id(batch.network, batch.id);
|
let id = batch_sign_id(batch.network, batch.id);
|
||||||
if SubstrateSignerDb::<D>::completed(txn, id) {
|
if SubstrateSignerDb::<D>::completed(txn, id) {
|
||||||
debug!("Sign batch order for ID we've already completed signing");
|
debug!("Sign batch order for ID we've already completed signing");
|
||||||
// See batch_signed for commentary on why this simply returns
|
// See batch_signed for commentary on why this simply returns
|
||||||
|
@ -400,7 +391,7 @@ impl<D: Db> SubstrateSigner<D> {
|
||||||
// block behind it, which will trigger starting the Batch
|
// block behind it, which will trigger starting the Batch
|
||||||
// TODO: There is a race condition between the Scanner recognizing the block and the Batch
|
// TODO: There is a race condition between the Scanner recognizing the block and the Batch
|
||||||
// having signing started
|
// having signing started
|
||||||
let sign_id = sign_id(self.network, id);
|
let sign_id = batch_sign_id(self.network, id);
|
||||||
|
|
||||||
// Stop trying to sign for this batch
|
// Stop trying to sign for this batch
|
||||||
SubstrateSignerDb::<D>::complete(txn, sign_id);
|
SubstrateSignerDb::<D>::complete(txn, sign_id);
|
||||||
|
|
|
@ -13,9 +13,10 @@ use sp_application_crypto::{RuntimePublic, sr25519::Public};
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db, MemDb};
|
use serai_db::{DbTxn, Db, MemDb};
|
||||||
|
|
||||||
|
use scale::Encode;
|
||||||
use serai_client::{primitives::*, in_instructions::primitives::*};
|
use serai_client::{primitives::*, in_instructions::primitives::*};
|
||||||
|
|
||||||
use messages::{sign::SignId, coordinator::*};
|
use messages::coordinator::*;
|
||||||
use crate::substrate_signer::{SubstrateSignerEvent, SubstrateSigner};
|
use crate::substrate_signer::{SubstrateSignerEvent, SubstrateSigner};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@ -26,11 +27,6 @@ async fn test_substrate_signer() {
|
||||||
|
|
||||||
let id: u32 = 5;
|
let id: u32 = 5;
|
||||||
let block = BlockHash([0xaa; 32]);
|
let block = BlockHash([0xaa; 32]);
|
||||||
let mut actual_id = SignId {
|
|
||||||
key: keys.values().next().unwrap().group_key().to_bytes().to_vec(),
|
|
||||||
id: [0; 32],
|
|
||||||
attempt: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
let batch = Batch {
|
let batch = Batch {
|
||||||
network: NetworkId::Monero,
|
network: NetworkId::Monero,
|
||||||
|
@ -48,6 +44,12 @@ async fn test_substrate_signer() {
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let actual_id = BatchSignId {
|
||||||
|
key: keys.values().next().unwrap().group_key().to_bytes(),
|
||||||
|
id: (batch.network, batch.id).encode().try_into().unwrap(),
|
||||||
|
attempt: 0,
|
||||||
|
};
|
||||||
|
|
||||||
let mut signers = HashMap::new();
|
let mut signers = HashMap::new();
|
||||||
let mut dbs = HashMap::new();
|
let mut dbs = HashMap::new();
|
||||||
let mut t = 0;
|
let mut t = 0;
|
||||||
|
@ -88,9 +90,6 @@ async fn test_substrate_signer() {
|
||||||
preprocesses: mut these_preprocesses,
|
preprocesses: mut these_preprocesses,
|
||||||
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap()
|
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap()
|
||||||
{
|
{
|
||||||
if actual_id.id == [0; 32] {
|
|
||||||
actual_id.id = id.id;
|
|
||||||
}
|
|
||||||
assert_eq!(id, actual_id);
|
assert_eq!(id, actual_id);
|
||||||
assert_eq!(batch_block, block);
|
assert_eq!(batch_block, block);
|
||||||
assert_eq!(these_preprocesses.len(), 1);
|
assert_eq!(these_preprocesses.len(), 1);
|
||||||
|
|
|
@ -23,7 +23,7 @@ use serai_client::{
|
||||||
InInstructionsEvent,
|
InInstructionsEvent,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use messages::{sign::SignId, SubstrateContext, CoordinatorMessage};
|
use messages::{coordinator::BatchSignId, SubstrateContext, CoordinatorMessage};
|
||||||
|
|
||||||
use crate::{*, tests::*};
|
use crate::{*, tests::*};
|
||||||
|
|
||||||
|
@ -33,10 +33,10 @@ pub async fn batch(
|
||||||
substrate_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
substrate_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
batch: Batch,
|
batch: Batch,
|
||||||
) -> u64 {
|
) -> u64 {
|
||||||
let mut id = [0; 32];
|
let mut id = [0; 5];
|
||||||
OsRng.fill_bytes(&mut id);
|
OsRng.fill_bytes(&mut id);
|
||||||
let id = SignId {
|
let id = BatchSignId {
|
||||||
key: (<Ristretto as Ciphersuite>::generator() * **substrate_key).to_bytes().to_vec(),
|
key: (<Ristretto as Ciphersuite>::generator() * **substrate_key).to_bytes(),
|
||||||
id,
|
id,
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
};
|
};
|
||||||
|
|
|
@ -89,7 +89,7 @@ pub async fn key_gen<C: Ciphersuite>(
|
||||||
(
|
(
|
||||||
participant_is[usize::from(l)],
|
participant_is[usize::from(l)],
|
||||||
vec![
|
vec![
|
||||||
u8::try_from(u16::try_from(participant_is[i]).unwrap()).unwrap(),
|
u8::try_from(u16::from(participant_is[i])).unwrap(),
|
||||||
u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(),
|
u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -402,8 +402,7 @@ impl Wallet {
|
||||||
view_pair.address(Network::Mainnet, AddressSpec::Standard),
|
view_pair.address(Network::Mainnet, AddressSpec::Standard),
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.try_into()
|
.into(),
|
||||||
.unwrap(),
|
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,8 +5,9 @@ use std::{
|
||||||
|
|
||||||
use dkg::{Participant, tests::clone_without};
|
use dkg::{Participant, tests::clone_without};
|
||||||
|
|
||||||
use messages::{coordinator::PlanMeta, sign::SignId, SubstrateContext};
|
use messages::{coordinator::*, SubstrateContext};
|
||||||
|
|
||||||
|
use scale::Encode;
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{
|
primitives::{
|
||||||
BlockHash, Amount, Balance, crypto::RuntimePublic, PublicKey, SeraiAddress, NetworkId,
|
BlockHash, Amount, Balance, crypto::RuntimePublic, PublicKey, SeraiAddress, NetworkId,
|
||||||
|
@ -25,8 +26,13 @@ pub(crate) async fn recv_batch_preprocesses(
|
||||||
substrate_key: &[u8; 32],
|
substrate_key: &[u8; 32],
|
||||||
batch: &Batch,
|
batch: &Batch,
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
) -> (SignId, HashMap<Participant, Vec<u8>>) {
|
) -> (BatchSignId, HashMap<Participant, Vec<u8>>) {
|
||||||
let mut id = None;
|
let id = BatchSignId {
|
||||||
|
key: *substrate_key,
|
||||||
|
id: (batch.network, batch.id).encode().try_into().unwrap(),
|
||||||
|
attempt,
|
||||||
|
};
|
||||||
|
|
||||||
let mut block = None;
|
let mut block = None;
|
||||||
let mut preprocesses = HashMap::new();
|
let mut preprocesses = HashMap::new();
|
||||||
for (i, coordinator) in coordinators.iter_mut().enumerate() {
|
for (i, coordinator) in coordinators.iter_mut().enumerate() {
|
||||||
|
@ -51,13 +57,10 @@ pub(crate) async fn recv_batch_preprocesses(
|
||||||
preprocesses: mut these_preprocesses,
|
preprocesses: mut these_preprocesses,
|
||||||
},
|
},
|
||||||
) => {
|
) => {
|
||||||
if id.is_none() {
|
assert_eq!(this_id, id);
|
||||||
assert_eq!(&this_id.key, substrate_key);
|
if block.is_none() {
|
||||||
assert_eq!(this_id.attempt, attempt);
|
|
||||||
id = Some(this_id.clone());
|
|
||||||
block = Some(this_block);
|
block = Some(this_block);
|
||||||
}
|
}
|
||||||
assert_eq!(&this_id, id.as_ref().unwrap());
|
|
||||||
assert_eq!(&this_block, block.as_ref().unwrap());
|
assert_eq!(&this_block, block.as_ref().unwrap());
|
||||||
|
|
||||||
assert_eq!(these_preprocesses.len(), 1);
|
assert_eq!(these_preprocesses.len(), 1);
|
||||||
|
@ -77,13 +80,13 @@ pub(crate) async fn recv_batch_preprocesses(
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
(id.unwrap(), preprocesses)
|
(id, preprocesses)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn sign_batch(
|
pub(crate) async fn sign_batch(
|
||||||
coordinators: &mut [Coordinator],
|
coordinators: &mut [Coordinator],
|
||||||
key: [u8; 32],
|
key: [u8; 32],
|
||||||
id: SignId,
|
id: BatchSignId,
|
||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
) -> SignedBatch {
|
) -> SignedBatch {
|
||||||
assert_eq!(preprocesses.len(), THRESHOLD);
|
assert_eq!(preprocesses.len(), THRESHOLD);
|
||||||
|
|
Loading…
Reference in a new issue