Support multiple key shares per validator (#416)

* Update the coordinator to give key shares based on weight, not based on existence

Participants are now identified by their starting index. While this compiles,
the following is unimplemented:

1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.

* Add a fn to the DkgConfirmer to convert `i` values as needed

Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.

* Have the Tributary DB track participation by shares, not by count

* Prevent a node from obtaining 34% of the maximum amount of key shares

This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.

* Correct the mechanism to detect if sufficient accumulation has occured

It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.

* Finish updating the coordinator to handle a multiple key share per validator environment

* Adjust stategy re: preventing noce reuse in DKG Confirmer

* Add TODOs regarding dropped transactions, add possible TODO fix

* Update tests/coordinator

This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.

* Update processor key_gen to handle generating multiple key shares at once

* Update SubstrateSigner

* Update signer, clippy

* Update processor tests

* Update processor docker tests
This commit is contained in:
Luke Parker 2023-11-04 19:26:13 -04:00 committed by GitHub
parent 5970a455d0
commit e05b77d830
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
28 changed files with 866 additions and 437 deletions

View file

@ -114,8 +114,9 @@ impl<D: Db> MainDb<D> {
network: NetworkId,
id_type: RecognizedIdType,
id: [u8; 32],
preprocess: Vec<u8>,
preprocess: Vec<Vec<u8>>,
) {
let preprocess = preprocess.encode();
let key = Self::first_preprocess_key(network, id_type, id);
if let Some(existing) = txn.get(&key) {
assert_eq!(existing, preprocess, "saved a distinct first preprocess");
@ -128,8 +129,10 @@ impl<D: Db> MainDb<D> {
network: NetworkId,
id_type: RecognizedIdType,
id: [u8; 32],
) -> Option<Vec<u8>> {
getter.get(Self::first_preprocess_key(network, id_type, id))
) -> Option<Vec<Vec<u8>>> {
getter
.get(Self::first_preprocess_key(network, id_type, id))
.map(|bytes| Vec::<_>::decode(&mut bytes.as_slice()).unwrap())
}
fn last_received_batch_key(network: NetworkId) -> Vec<u8> {

View file

@ -101,19 +101,16 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
// If we're rebooting, we'll re-fire this message
// This is safe due to the message-queue deduplicating based off the intent system
let set = spec.set();
let our_i = spec
.i(Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for");
processors
.send(
set.network,
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { set, attempt: 0 },
params: frost::ThresholdParams::new(
spec.t(),
spec.n(),
spec
.i(Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for"),
)
.unwrap(),
params: frost::ThresholdParams::new(spec.t(), spec.n(), our_i.start).unwrap(),
shares: u16::from(our_i.end) - u16::from(our_i.start),
},
)
.await;
@ -426,18 +423,29 @@ async fn handle_processor_message<D: Db, P: P2p>(
// Create a MuSig-based machine to inform Substrate of this key generation
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt);
let our_i = spec
.i(pub_key)
.expect("processor message to DKG for a session we aren't a validator in");
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
// without further context, it can't be deserialized without context
let mut tx_shares = Vec::with_capacity(shares.len());
for i in 1 ..= spec.n() {
let i = Participant::new(i).unwrap();
if i ==
spec
.i(pub_key)
.expect("processor message to DKG for a session we aren't a validator in")
{
if our_i.contains(&i) {
for shares in &shares {
if shares.contains_key(&i) {
panic!("processor sent us our own shares");
}
}
continue;
}
tx_shares
.push(shares.remove(&i).expect("processor didn't send share for another validator"));
tx_shares.push(vec![]);
for shares in &mut shares {
tx_shares.last_mut().unwrap().push(
shares.remove(&i).expect("processor didn't send share for another validator"),
);
}
}
vec![Transaction::DkgShares {
@ -474,14 +482,14 @@ async fn handle_processor_message<D: Db, P: P2p>(
}
},
ProcessorMessage::Sign(msg) => match msg {
sign::ProcessorMessage::Preprocess { id, preprocess } => {
sign::ProcessorMessage::Preprocess { id, preprocesses } => {
if id.attempt == 0 {
MainDb::<D>::save_first_preprocess(
&mut txn,
network,
RecognizedIdType::Plan,
id.id,
preprocess,
preprocesses,
);
vec![]
@ -489,17 +497,19 @@ async fn handle_processor_message<D: Db, P: P2p>(
vec![Transaction::SignPreprocess(SignData {
plan: id.id,
attempt: id.attempt,
data: preprocess,
data: preprocesses,
signed: Transaction::empty_signed(),
})]
}
}
sign::ProcessorMessage::Share { id, share } => vec![Transaction::SignShare(SignData {
plan: id.id,
attempt: id.attempt,
data: share,
signed: Transaction::empty_signed(),
})],
sign::ProcessorMessage::Share { id, shares } => {
vec![Transaction::SignShare(SignData {
plan: id.id,
attempt: id.attempt,
data: shares,
signed: Transaction::empty_signed(),
})]
}
sign::ProcessorMessage::Completed { key: _, id, tx } => {
let r = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
#[allow(non_snake_case)]
@ -522,7 +532,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
},
ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(),
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocess } => {
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => {
log::info!(
"informed of batch (sign ID {}, attempt {}) for block {}",
hex::encode(id.id),
@ -538,7 +548,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
spec.set().network,
RecognizedIdType::Batch,
id.id,
preprocess,
preprocesses,
);
// If this is the new key's first Batch, only create this TX once we verify all
@ -550,6 +560,10 @@ async fn handle_processor_message<D: Db, P: P2p>(
if last_received != 0 {
// Decrease by 1, to get the ID of the Batch prior to this Batch
let prior_sets_last_batch = last_received - 1;
// TODO: If we're looping here, we're not handling the messages we need to in order
// to create the Batch we're looking for
// Don't have the processor yield the handover batch untill the batch before is
// acknowledged on-chain?
loop {
let successfully_verified = substrate::verify_published_batches::<D>(
&mut txn,
@ -598,16 +612,16 @@ async fn handle_processor_message<D: Db, P: P2p>(
vec![Transaction::BatchPreprocess(SignData {
plan: id.id,
attempt: id.attempt,
data: preprocess,
data: preprocesses,
signed: Transaction::empty_signed(),
})]
}
}
coordinator::ProcessorMessage::BatchShare { id, share } => {
coordinator::ProcessorMessage::BatchShare { id, shares } => {
vec![Transaction::BatchShare(SignData {
plan: id.id,
attempt: id.attempt,
data: share.to_vec(),
data: shares.into_iter().map(|share| share.to_vec()).collect(),
signed: Transaction::empty_signed(),
})]
}

View file

@ -79,7 +79,7 @@ async fn handle_new_set<D: Db>(
.await?
.expect("validator selected for set yet didn't have an allocation")
.0;
set_data.push((participant, allocation / allocation_per_key_share));
set_data.push((participant, u16::try_from(allocation / allocation_per_key_share).unwrap()));
}
amortize_excess_key_shares(&mut set_data);
set_data

View file

@ -47,7 +47,8 @@ async fn dkg_test() {
let mut commitments = vec![0; 256];
OsRng.fill_bytes(&mut commitments);
let mut tx = Transaction::DkgCommitments(attempt, commitments, Transaction::empty_signed());
let mut tx =
Transaction::DkgCommitments(attempt, vec![commitments], Transaction::empty_signed());
tx.sign(&mut OsRng, spec.genesis(), key, 0);
txs.push(tx);
}
@ -69,7 +70,7 @@ async fn dkg_test() {
.enumerate()
.map(|(i, tx)| {
if let Transaction::DkgCommitments(_, commitments, _) = tx {
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments.clone())
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
} else {
panic!("txs had non-commitments");
}
@ -165,7 +166,7 @@ async fn dkg_test() {
if i != k {
let mut share = vec![0; 256];
OsRng.fill_bytes(&mut share);
shares.push(share);
shares.push(vec![share]);
}
}
@ -213,7 +214,7 @@ async fn dkg_test() {
let shares_for = |i: usize| {
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
id: KeyGenId { set: spec.set(), attempt: 0 },
shares: txs
shares: vec![txs
.iter()
.enumerate()
.filter_map(|(l, tx)| {
@ -224,14 +225,14 @@ async fn dkg_test() {
let relative_i = i - (if i > l { 1 } else { 0 });
Some((
Participant::new((l + 1).try_into().unwrap()).unwrap(),
shares[relative_i].clone(),
shares[relative_i][0].clone(),
))
}
} else {
panic!("txs had non-shares");
}
})
.collect::<HashMap<_, _>>(),
.collect::<HashMap<_, _>>()],
})
};

View file

@ -36,7 +36,13 @@ fn random_sign_data<R: RngCore>(rng: &mut R) -> SignData {
plan,
attempt: random_u32(&mut OsRng),
data: random_vec(&mut OsRng, 512),
data: {
let mut res = vec![];
for _ in 0 .. ((rng.next_u64() % 255) + 1) {
res.push(random_vec(&mut OsRng, 512));
}
res
},
signed: random_signed(&mut OsRng),
}
@ -46,6 +52,32 @@ fn test_read_write<RW: Eq + Debug + ReadWrite>(value: RW) {
assert_eq!(value, RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
}
#[test]
fn tx_size_limit() {
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, MAX_KEY_LEN};
use tributary::TRANSACTION_SIZE_LIMIT;
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
// 1024 bytes for all overhead
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
assert!(
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
);
// Encryption key, PoP (2 elements), message
let elements_per_share = 4;
let handwaved_dkg_shares_size =
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
assert!(
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
(handwaved_dkg_shares_size * max_key_shares_per_individual)
);
}
#[test]
fn serialize_sign_data() {
test_read_write(random_sign_data(&mut OsRng));
@ -53,23 +85,37 @@ fn serialize_sign_data() {
#[test]
fn serialize_transaction() {
test_read_write(Transaction::DkgCommitments(
random_u32(&mut OsRng),
random_vec(&mut OsRng, 512),
random_signed(&mut OsRng),
));
{
let mut commitments = vec![random_vec(&mut OsRng, 512)];
for _ in 0 .. (OsRng.next_u64() % 100) {
let mut temp = commitments[0].clone();
OsRng.fill_bytes(&mut temp);
commitments.push(temp);
}
test_read_write(Transaction::DkgCommitments(
random_u32(&mut OsRng),
commitments,
random_signed(&mut OsRng),
));
}
{
// This supports a variable share length, yet share length is expected to be constant among
// shares
let share_len = usize::try_from(OsRng.next_u64() % 512).unwrap();
// This supports a variable share length, and variable amount of sent shares, yet share length
// and sent shares is expected to be constant among recipients
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
// Create a valid vec of shares
let mut shares = vec![];
// Create up to 512 participants
for _ in 0 .. (OsRng.next_u64() % 512) {
let mut share = vec![0; share_len];
OsRng.fill_bytes(&mut share);
shares.push(share);
// Create up to 150 participants
for _ in 0 .. ((OsRng.next_u64() % 150) + 1) {
// Give each sender multiple shares
let mut sender_shares = vec![];
for _ in 0 .. amount_of_shares {
let mut share = vec![0; share_len];
OsRng.fill_bytes(&mut share);
sender_shares.push(share);
}
shares.push(sender_shares);
}
test_read_write(Transaction::DkgShares {

View file

@ -40,7 +40,7 @@ async fn tx_test() {
// Create the TX with a null signature so we can get its sig hash
let block_before_tx = tributaries[sender].1.tip().await;
let mut tx =
Transaction::DkgCommitments(attempt, commitments.clone(), Transaction::empty_signed());
Transaction::DkgCommitments(attempt, vec![commitments.clone()], Transaction::empty_signed());
tx.sign(&mut OsRng, spec.genesis(), &key, 0);
assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true));

View file

@ -220,22 +220,23 @@ impl<D: Db> TributaryDb<D> {
) -> Option<Vec<u8>> {
getter.get(Self::data_key(genesis, data_spec, signer))
}
pub fn set_data(
fn set_data(
txn: &mut D::Transaction<'_>,
genesis: [u8; 32],
data_spec: &DataSpecification,
signer: <Ristretto as Ciphersuite>::G,
signer_shares: u16,
data: &[u8],
) -> u16 {
) -> (u16, u16) {
let received_key = Self::data_received_key(genesis, data_spec);
let mut received =
let prior_received =
u16::from_le_bytes(txn.get(&received_key).unwrap_or(vec![0; 2]).try_into().unwrap());
received += 1;
let received = prior_received + signer_shares;
txn.put(received_key, received.to_le_bytes());
txn.put(Self::data_key(genesis, data_spec, signer), data);
received
(prior_received, received)
}
fn event_key(id: &[u8], index: u32) -> Vec<u8> {
@ -273,17 +274,22 @@ impl<D: Db> TributaryState<D> {
if TributaryDb::<D>::data(txn, spec.genesis(), data_spec, signer).is_some() {
panic!("accumulating data for a participant multiple times");
}
let received = TributaryDb::<D>::set_data(txn, spec.genesis(), data_spec, signer, data);
let signer_shares = {
let signer_i =
spec.i(signer).expect("transaction signed by a non-validator for this tributary");
u16::from(signer_i.end) - u16::from(signer_i.start)
};
let (prior_received, now_received) =
TributaryDb::<D>::set_data(txn, spec.genesis(), data_spec, signer, signer_shares, data);
// If we have all the needed commitments/preprocesses/shares, tell the processor
// TODO: This needs to be coded by weight, not by validator count
let needed = if data_spec.topic == Topic::Dkg { spec.n() } else { spec.t() };
if received == needed {
if (prior_received < needed) && (now_received >= needed) {
return Accumulation::Ready({
let mut data = HashMap::new();
for validator in spec.validators().iter().map(|validator| validator.0) {
data.insert(
spec.i(validator).unwrap(),
spec.i(validator).unwrap().start,
if let Some(data) = TributaryDb::<D>::data(txn, spec.genesis(), data_spec, validator) {
data
} else {
@ -298,7 +304,8 @@ impl<D: Db> TributaryState<D> {
.remove(
&spec
.i(Ristretto::generator() * our_key.deref())
.expect("handling a message for a Tributary we aren't part of"),
.expect("handling a message for a Tributary we aren't part of")
.start,
)
.is_some()
{

View file

@ -66,30 +66,43 @@ use crate::tributary::TributarySpec;
1) The local view of received messages is static
2) The local process doesn't rebuild after a byzantine fault produces multiple blockchains
We assume the former. The latter is deemed acceptable but sub-optimal.
We assume the former. We can prevent the latter (TODO) by:
The benefit for this behavior is that on a validator's infrastructure collapsing, they can
successfully rebuild on a new system.
1) Defining a per-build entropy, used so long as a DB is used.
2) Checking the initially used commitments for the DKG align with the per-build entropy.
TODO: Replace this with entropy. If a validator happens to have their infrastructure fail at this
exact moment, they should just be kicked out and accept the loss. The risk of losing a private
key on rebuild, by a feature meant to enable rebuild, can't be successfully argued for.
If a rebuild occurs, which is the only way we could follow a distinct blockchain, our entropy
will change (preventing nonce reuse).
Not only do we need to use randomly selected entropy, we need to confirm our local preprocess
matches the on-chain preprocess before actually publishing our shares.
This will allow a validator to still participate in DKGs within a single build, even if they have
spontaneous reboots, and on collapse triggering a rebuild, they don't lose safety.
We also need to review how we're handling Processor preprocesses and likely implement the same
on-chain-preprocess-matches-presumed-preprocess check before publishing shares (though a delay of
the re-attempt protocol's trigger length would also be sufficient).
TODO: We also need to review how we're handling Processor preprocesses and likely implement the
same on-chain-preprocess-matches-presumed-preprocess check before publishing shares.
*/
pub(crate) struct DkgConfirmer;
impl DkgConfirmer {
// Convert the passed in HashMap, which uses the validators' start index for their `s` threshold
// shares, to the indexes needed for MuSig
fn from_threshold_i_to_musig_i(
spec: &TributarySpec,
mut old_map: HashMap<Participant, Vec<u8>>,
) -> HashMap<Participant, Vec<u8>> {
let mut new_map = HashMap::new();
for (new_i, validator) in spec.validators().into_iter().enumerate() {
let threshold_i = spec.i(validator.0).unwrap();
if let Some(value) = old_map.remove(&threshold_i.start) {
new_map.insert(Participant::new(u16::try_from(new_i + 1).unwrap()).unwrap(), value);
}
}
new_map
}
fn preprocess_internal(
spec: &TributarySpec,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
attempt: u32,
) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
// TODO: Does Substrate already have a validator-uniqueness check?
let validators = spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
let context = musig_context(spec.set());
@ -127,7 +140,7 @@ impl DkgConfirmer {
key_pair: &KeyPair,
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
let machine = Self::preprocess_internal(spec, key, attempt).0;
let preprocesses = preprocesses
let preprocesses = Self::from_threshold_i_to_musig_i(spec, preprocesses)
.into_iter()
.map(|(p, preprocess)| {
machine
@ -173,7 +186,7 @@ impl DkgConfirmer {
.expect("trying to complete a machine which failed to preprocess")
.0;
let shares = shares
let shares = Self::from_threshold_i_to_musig_i(spec, shares)
.into_iter()
.map(|(p, share)| {
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)

View file

@ -1,10 +1,12 @@
use core::{ops::Deref, future::Future};
use std::collections::HashMap;
use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::dkg::Participant;
use scale::{Encode, Decode};
use serai_client::{
Signature,
validator_sets::primitives::{ValidatorSet, KeyPair},
@ -142,16 +144,53 @@ pub(crate) async fn handle_application_tx<
TributaryState::<D>::accumulate(txn, key, spec, data_spec, signed.signer, &bytes)
};
fn check_sign_data_len<D: Db>(
txn: &mut D::Transaction<'_>,
spec: &TributarySpec,
signer: <Ristretto as Ciphersuite>::G,
len: usize,
) -> Result<(), ()> {
let signer_i = spec.i(signer).unwrap();
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
fatal_slash::<D>(
txn,
spec.genesis(),
signer.to_bytes(),
"signer published a distinct amount of sign data than they had shares",
);
Err(())?;
}
Ok(())
}
fn unflatten(spec: &TributarySpec, data: &mut HashMap<Participant, Vec<u8>>) {
for (validator, _) in spec.validators() {
let range = spec.i(validator).unwrap();
let Some(all_segments) = data.remove(&range.start) else {
continue;
};
let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap();
for i in u16::from(range.start) .. u16::from(range.end) {
let i = Participant::new(i).unwrap();
data.insert(i, data_vec.remove(0));
}
}
}
match tx {
Transaction::DkgCommitments(attempt, bytes, signed) => {
Transaction::DkgCommitments(attempt, commitments, signed) => {
let Ok(_) = check_sign_data_len::<D>(txn, spec, signed.signer, commitments.len()) else {
return;
};
match handle(
txn,
&DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt },
bytes,
commitments.encode(),
&signed,
) {
Accumulation::Ready(DataSet::Participating(commitments)) => {
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
unflatten(spec, &mut commitments);
processors
.send(
spec.set().network,
@ -170,29 +209,59 @@ pub(crate) async fn handle_application_tx<
}
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
if shares.len() != (usize::from(spec.n()) - 1) {
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "invalid amount of DKG shares");
return;
}
let sender_i = spec
.i(signed.signer)
.expect("transaction added to tributary by signer who isn't a participant");
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
if shares.len() != (usize::from(spec.n() - sender_is_len)) {
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "invalid amount of DKG shares");
return;
}
for shares in &shares {
if shares.len() != usize::from(sender_is_len) {
fatal_slash::<D>(
txn,
genesis,
signed.signer.to_bytes(),
"invalid amount of DKG shares by key shares",
);
return;
}
}
// Only save our share's bytes
let our_i = spec
.i(Ristretto::generator() * key.deref())
.expect("in a tributary we're not a validator for");
let bytes = if sender_i == our_i {
let our_shares = if sender_i == our_i {
vec![]
} else {
// 1-indexed to 0-indexed, handling the omission of the sender's own data
let relative_i = usize::from(u16::from(our_i) - 1) -
(if u16::from(our_i) > u16::from(sender_i) { 1 } else { 0 });
// Safe since we length-checked shares
shares.swap_remove(relative_i)
// 1-indexed to 0-indexed
let mut our_i_pos = u16::from(our_i.start) - 1;
// Handle the omission of the sender's own data
if u16::from(our_i.start) > u16::from(sender_i.start) {
our_i_pos -= sender_is_len;
}
let our_i_pos = usize::from(our_i_pos);
let shares = shares
.drain(
our_i_pos .. (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
)
.collect::<Vec<_>>();
// Transpose from our shares -> sender shares -> shares to
// sender shares -> our shares -> shares
let mut transposed = vec![vec![]; shares[0].len()];
for shares in shares {
for (sender_index, share) in shares.into_iter().enumerate() {
transposed[sender_index].push(share);
}
}
transposed
};
// Drop shares as it's been mutated into invalidity
drop(shares);
let confirmation_nonces = handle(
@ -204,7 +273,7 @@ pub(crate) async fn handle_application_tx<
match handle(
txn,
&DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt },
bytes,
our_shares.encode(),
&signed,
) {
Accumulation::Ready(DataSet::Participating(shares)) => {
@ -217,12 +286,36 @@ pub(crate) async fn handle_application_tx<
};
TributaryDb::<D>::save_confirmation_nonces(txn, genesis, attempt, confirmation_nonces);
// shares is a HashMap<Participant, Vec<Vec<Vec<u8>>>>, with the values representing:
// - Each of the sender's shares
// - Each of the our shares
// - Each share
// We need a Vec<HashMap<Participant, Vec<u8>>>, with the outer being each of ours
let mut expanded_shares = vec![];
for (sender_start_i, shares) in shares {
let shares: Vec<Vec<Vec<u8>>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap();
for (sender_i_offset, our_shares) in shares.into_iter().enumerate() {
for (our_share_i, our_share) in our_shares.into_iter().enumerate() {
if expanded_shares.len() <= our_share_i {
expanded_shares.push(HashMap::new());
}
expanded_shares[our_share_i].insert(
Participant::new(
u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(),
)
.unwrap(),
our_share,
);
}
}
}
processors
.send(
spec.set().network,
key_gen::CoordinatorMessage::Shares {
id: KeyGenId { set: spec.set(), attempt },
shares,
shares: expanded_shares,
},
)
.await;
@ -294,6 +387,9 @@ pub(crate) async fn handle_application_tx<
}
Transaction::BatchPreprocess(data) => {
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
return;
};
match handle(
txn,
&DataSpecification {
@ -301,10 +397,11 @@ pub(crate) async fn handle_application_tx<
label: BATCH_PREPROCESS,
attempt: data.attempt,
},
data.data,
data.data.encode(),
&data.signed,
) {
Accumulation::Ready(DataSet::Participating(preprocesses)) => {
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
unflatten(spec, &mut preprocesses);
NonceDecider::<D>::selected_for_signing_batch(txn, genesis, data.plan);
let key = TributaryDb::<D>::key_pair(txn, spec.set()).unwrap().0 .0.to_vec();
processors
@ -322,6 +419,9 @@ pub(crate) async fn handle_application_tx<
}
}
Transaction::BatchShare(data) => {
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
return;
};
match handle(
txn,
&DataSpecification {
@ -329,10 +429,11 @@ pub(crate) async fn handle_application_tx<
label: BATCH_SHARE,
attempt: data.attempt,
},
data.data,
data.data.encode(),
&data.signed,
) {
Accumulation::Ready(DataSet::Participating(shares)) => {
Accumulation::Ready(DataSet::Participating(mut shares)) => {
unflatten(spec, &mut shares);
let key = TributaryDb::<D>::key_pair(txn, spec.set()).unwrap().0 .0.to_vec();
processors
.send(
@ -353,6 +454,9 @@ pub(crate) async fn handle_application_tx<
}
Transaction::SignPreprocess(data) => {
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
return;
};
let key_pair = TributaryDb::<D>::key_pair(txn, spec.set());
match handle(
txn,
@ -361,10 +465,11 @@ pub(crate) async fn handle_application_tx<
label: SIGN_PREPROCESS,
attempt: data.attempt,
},
data.data,
data.data.encode(),
&data.signed,
) {
Accumulation::Ready(DataSet::Participating(preprocesses)) => {
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
unflatten(spec, &mut preprocesses);
NonceDecider::<D>::selected_for_signing_plan(txn, genesis, data.plan);
processors
.send(
@ -388,6 +493,9 @@ pub(crate) async fn handle_application_tx<
}
}
Transaction::SignShare(data) => {
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
return;
};
let key_pair = TributaryDb::<D>::key_pair(txn, spec.set());
match handle(
txn,
@ -396,10 +504,11 @@ pub(crate) async fn handle_application_tx<
label: SIGN_SHARE,
attempt: data.attempt,
},
data.data,
data.data.encode(),
&data.signed,
) {
Accumulation::Ready(DataSet::Participating(shares)) => {
Accumulation::Ready(DataSet::Participating(mut shares)) => {
unflatten(spec, &mut shares);
processors
.send(
spec.set().network,

View file

@ -1,4 +1,4 @@
use core::ops::Deref;
use core::ops::{Deref, Range};
use std::io::{self, Read, Write};
use zeroize::Zeroizing;
@ -24,7 +24,8 @@ use serai_client::{
#[rustfmt::skip]
use tributary::{
ReadWrite,
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait},
TRANSACTION_SIZE_LIMIT,
};
mod db;
@ -45,7 +46,7 @@ pub struct TributarySpec {
serai_block: [u8; 32],
start_time: u64,
set: ValidatorSet,
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
}
impl TributarySpec {
@ -53,12 +54,10 @@ impl TributarySpec {
serai_block: [u8; 32],
start_time: u64,
set: ValidatorSet,
set_participants: Vec<(PublicKey, u64)>,
set_participants: Vec<(PublicKey, u16)>,
) -> TributarySpec {
let mut validators = vec![];
for (participant, shares) in set_participants {
// TODO: Ban invalid keys from being validators on the Serai side
// (make coordinator key a session key?)
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
.expect("invalid key registered as participant");
validators.push((participant, shares));
@ -88,31 +87,29 @@ impl TributarySpec {
}
pub fn n(&self) -> u16 {
// TODO: Support multiple key shares
// self.validators.iter().map(|(_, weight)| u16::try_from(weight).unwrap()).sum()
self.validators().len().try_into().unwrap()
self.validators.iter().map(|(_, weight)| weight).sum()
}
pub fn t(&self) -> u16 {
((2 * self.n()) / 3) + 1
}
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Participant> {
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
let mut i = 1;
// TODO: Support multiple key shares
for (validator, _weight) in &self.validators {
for (validator, weight) in &self.validators {
if validator == &key {
// return (i .. (i + weight)).to_vec();
return Some(Participant::new(i).unwrap());
return Some(Range {
start: Participant::new(i).unwrap(),
end: Participant::new(i + weight).unwrap(),
});
}
// i += weight;
i += 1;
i += weight;
}
None
}
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
self.validators.clone()
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
}
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
@ -160,9 +157,9 @@ impl TributarySpec {
let mut validators = Vec::with_capacity(validators_len);
for _ in 0 .. validators_len {
let key = Ristretto::read_G(reader)?;
let mut bond = [0; 8];
reader.read_exact(&mut bond)?;
validators.push((key, u64::from_le_bytes(bond)));
let mut weight = [0; 2];
reader.read_exact(&mut weight)?;
validators.push((key, u16::from_le_bytes(weight)));
}
Ok(Self { serai_block, start_time, set: ValidatorSet { session, network }, validators })
@ -174,7 +171,7 @@ pub struct SignData {
pub plan: [u8; 32],
pub attempt: u32,
pub data: Vec<u8>,
pub data: Vec<Vec<u8>>,
pub signed: Signed,
}
@ -189,11 +186,20 @@ impl ReadWrite for SignData {
let attempt = u32::from_le_bytes(attempt);
let data = {
let mut data_len = [0; 2];
reader.read_exact(&mut data_len)?;
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
reader.read_exact(&mut data)?;
data
let mut data_pieces = [0];
reader.read_exact(&mut data_pieces)?;
if data_pieces[0] == 0 {
Err(io::Error::new(io::ErrorKind::Other, "zero pieces of data in SignData"))?;
}
let mut all_data = vec![];
for _ in 0 .. data_pieces[0] {
let mut data_len = [0; 2];
reader.read_exact(&mut data_len)?;
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
reader.read_exact(&mut data)?;
all_data.push(data);
}
all_data
};
let signed = Signed::read(reader)?;
@ -205,16 +211,21 @@ impl ReadWrite for SignData {
writer.write_all(&self.plan)?;
writer.write_all(&self.attempt.to_le_bytes())?;
if self.data.len() > u16::MAX.into() {
// Currently, the largest sign item would be a Monero transaction
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
// key image and proof (96 bytes)
// Even with all of that, we could support 227 inputs in a single TX
// Monero is limited to ~120 inputs per TX
Err(io::Error::new(io::ErrorKind::Other, "signing data exceeded 65535 bytes"))?;
writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;
for data in &self.data {
if data.len() > u16::MAX.into() {
// Currently, the largest individual preproces is a Monero transaction
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
// key image and proof (96 bytes)
// Even with all of that, we could support 227 inputs in a single TX
// Monero is limited to ~120 inputs per TX
//
// Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess
Err(io::Error::new(io::ErrorKind::Other, "signing data exceeded 65535 bytes"))?;
}
writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?;
writer.write_all(data)?;
}
writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?;
writer.write_all(&self.data)?;
self.signed.write(writer)
}
@ -223,10 +234,11 @@ impl ReadWrite for SignData {
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Transaction {
// Once this completes successfully, no more instances should be created.
DkgCommitments(u32, Vec<u8>, Signed),
DkgCommitments(u32, Vec<Vec<u8>>, Signed),
DkgShares {
attempt: u32,
shares: Vec<Vec<u8>>,
// Receiving Participant, Sending Participant, Share
shares: Vec<Vec<Vec<u8>>>,
confirmation_nonces: [u8; 64],
signed: Signed,
},
@ -273,10 +285,27 @@ impl ReadWrite for Transaction {
let attempt = u32::from_le_bytes(attempt);
let commitments = {
let mut commitments_len = [0; 2];
let mut commitments_len = [0; 1];
reader.read_exact(&mut commitments_len)?;
let mut commitments = vec![0; usize::from(u16::from_le_bytes(commitments_len))];
reader.read_exact(&mut commitments)?;
let commitments_len = usize::from(commitments_len[0]);
if commitments_len == 0 {
Err(io::Error::new(io::ErrorKind::Other, "zero commitments in DkgCommitments"))?;
}
let mut each_commitments_len = [0; 2];
reader.read_exact(&mut each_commitments_len)?;
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
Err(io::Error::new(
io::ErrorKind::Other,
"commitments present in transaction exceeded transaction size limit",
))?;
}
let mut commitments = vec![vec![]; commitments_len];
for commitments in &mut commitments {
*commitments = vec![0; each_commitments_len];
reader.read_exact(commitments)?;
}
commitments
};
@ -291,20 +320,27 @@ impl ReadWrite for Transaction {
let attempt = u32::from_le_bytes(attempt);
let shares = {
let mut share_quantity = [0; 2];
let mut share_quantity = [0; 1];
reader.read_exact(&mut share_quantity)?;
let mut key_share_quantity = [0; 1];
reader.read_exact(&mut key_share_quantity)?;
let mut share_len = [0; 2];
reader.read_exact(&mut share_len)?;
let share_len = usize::from(u16::from_le_bytes(share_len));
let mut shares = vec![];
for _ in 0 .. u16::from_le_bytes(share_quantity) {
let mut share = vec![0; share_len];
reader.read_exact(&mut share)?;
shares.push(share);
let mut all_shares = vec![];
for _ in 0 .. share_quantity[0] {
let mut shares = vec![];
for _ in 0 .. key_share_quantity[0] {
let mut share = vec![0; share_len];
reader.read_exact(&mut share)?;
shares.push(share);
}
all_shares.push(shares);
}
shares
all_shares
};
let mut confirmation_nonces = [0; 64];
@ -372,12 +408,22 @@ impl ReadWrite for Transaction {
Transaction::DkgCommitments(attempt, commitments, signed) => {
writer.write_all(&[0])?;
writer.write_all(&attempt.to_le_bytes())?;
if commitments.len() > u16::MAX.into() {
// t commitments and an encryption key mean a u16 is fine until a threshold > 2000 occurs
Err(io::Error::new(io::ErrorKind::Other, "dkg commitments exceeded 65535 bytes"))?;
if commitments.is_empty() {
Err(io::Error::new(io::ErrorKind::Other, "zero commitments in DkgCommitments"))?
}
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
for commitments_i in commitments {
if commitments_i.len() != commitments[0].len() {
Err(io::Error::new(
io::ErrorKind::Other,
"commitments of differing sizes in DkgCommitments",
))?
}
}
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
for commitments in commitments {
writer.write_all(commitments)?;
}
writer.write_all(&u16::try_from(commitments.len()).unwrap().to_le_bytes())?;
writer.write_all(commitments)?;
signed.write(writer)
}
@ -385,14 +431,12 @@ impl ReadWrite for Transaction {
writer.write_all(&[1])?;
writer.write_all(&attempt.to_le_bytes())?;
// `shares` is a Vec which maps to a HashMap<Pariticpant, Vec<u8>> for any legitimate
// `DkgShares`. Since Participant has a range of 1 ..= u16::MAX, the length must be <
// u16::MAX. The only way for this to not be true if we were malicious, or if we read a
// `DkgShares` with a `shares.len() > u16::MAX`. The former is assumed untrue. The latter
// is impossible since we'll only read up to u16::MAX items.
writer.write_all(&u16::try_from(shares.len()).unwrap().to_le_bytes())?;
let share_len = shares.first().map(|share| share.len()).unwrap_or(0);
// `shares` is a Vec which is supposed to map to a HashMap<Pariticpant, Vec<u8>>. Since we
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
// This assumes at least one share is being sent to another party
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
let share_len = shares[0][0].len();
// For BLS12-381 G2, this would be:
// - A 32-byte share
// - A 96-byte ephemeral key
@ -400,9 +444,12 @@ impl ReadWrite for Transaction {
// Hence why this has to be u16
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
for share in shares {
assert_eq!(share.len(), share_len, "shares were of variable length");
writer.write_all(share)?;
for these_shares in shares {
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
for share in these_shares {
assert_eq!(share.len(), share_len, "sent shares were of variable length");
writer.write_all(share)?;
}
}
writer.write_all(confirmation_nonces)?;
@ -487,8 +534,10 @@ impl TransactionTrait for Transaction {
fn verify(&self) -> Result<(), TransactionError> {
if let Transaction::BatchShare(data) = self {
if data.data.len() != 32 {
Err(TransactionError::InvalidContent)?;
for data in &data.data {
if data.len() != 32 {
Err(TransactionError::InvalidContent)?;
}
}
}

View file

@ -54,6 +54,9 @@ impl<D: Db> NonceDecider<D> {
Self::set_nonce(txn, genesis, BATCH_CODE, batch, nonce_for);
nonce_for
}
// TODO: The processor won't yield shares for this if the signing protocol aborts. We need to
// detect when we're expecting shares for an aborted protocol and insert a dummy transaction
// there.
pub fn selected_for_signing_batch(
txn: &mut D::Transaction<'_>,
genesis: [u8; 32],
@ -76,6 +79,7 @@ impl<D: Db> NonceDecider<D> {
}
res
}
// TODO: Same TODO as selected_for_signing_batch
pub fn selected_for_signing_plan(
txn: &mut D::Transaction<'_>,
genesis: [u8; 32],

View file

@ -47,13 +47,13 @@ pub(crate) use crate::tendermint::*;
pub mod tests;
/// Size limit for an individual transaction.
pub const TRANSACTION_SIZE_LIMIT: usize = 50_000;
pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000;
/// Amount of transactions a single account may have in the mempool.
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
/// Block size limit.
// This targets a growth limit of roughly 5 GB a day, under load, in order to prevent a malicious
// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious
// participant from flooding disks and causing out of space errors in order processes.
pub const BLOCK_SIZE_LIMIT: usize = 350_000;
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
pub(crate) const BLOCK_MESSAGE: u8 = 1;

View file

@ -33,11 +33,11 @@ pub mod key_gen {
pub enum CoordinatorMessage {
// Instructs the Processor to begin the key generation process.
// TODO: Should this be moved under Substrate?
GenerateKey { id: KeyGenId, params: ThresholdParams },
GenerateKey { id: KeyGenId, params: ThresholdParams, shares: u16 },
// Received commitments for the specified key generation protocol.
Commitments { id: KeyGenId, commitments: HashMap<Participant, Vec<u8>> },
// Received shares for the specified key generation protocol.
Shares { id: KeyGenId, shares: HashMap<Participant, Vec<u8>> },
Shares { id: KeyGenId, shares: Vec<HashMap<Participant, Vec<u8>>> },
}
impl CoordinatorMessage {
@ -49,9 +49,9 @@ pub mod key_gen {
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum ProcessorMessage {
// Created commitments for the specified key generation protocol.
Commitments { id: KeyGenId, commitments: Vec<u8> },
Commitments { id: KeyGenId, commitments: Vec<Vec<u8>> },
// Created shares for the specified key generation protocol.
Shares { id: KeyGenId, shares: HashMap<Participant, Vec<u8>> },
Shares { id: KeyGenId, shares: Vec<HashMap<Participant, Vec<u8>>> },
// Resulting keys from the specified key generation protocol.
GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], network_key: Vec<u8> },
}
@ -97,9 +97,9 @@ pub mod sign {
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
pub enum ProcessorMessage {
// Created preprocess for the specified signing protocol.
Preprocess { id: SignId, preprocess: Vec<u8> },
Preprocess { id: SignId, preprocesses: Vec<Vec<u8>> },
// Signed share for the specified signing protocol.
Share { id: SignId, share: Vec<u8> },
Share { id: SignId, shares: Vec<Vec<u8>> },
// Completed a signing protocol already.
Completed { key: Vec<u8>, id: [u8; 32], tx: Vec<u8> },
}
@ -148,8 +148,8 @@ pub mod coordinator {
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
pub enum ProcessorMessage {
SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<PlanMeta> },
BatchPreprocess { id: SignId, block: BlockHash, preprocess: Vec<u8> },
BatchShare { id: SignId, share: [u8; 32] },
BatchPreprocess { id: SignId, block: BlockHash, preprocesses: Vec<Vec<u8>> },
BatchShare { id: SignId, shares: Vec<[u8; 32]> },
}
}

View file

@ -23,8 +23,8 @@ use crate::{Get, DbTxn, Db, networks::Network};
#[derive(Debug)]
pub struct KeyConfirmed<C: Ciphersuite> {
pub substrate_keys: ThresholdKeys<Ristretto>,
pub network_keys: ThresholdKeys<C>,
pub substrate_keys: Vec<ThresholdKeys<Ristretto>>,
pub network_keys: Vec<ThresholdKeys<C>>,
}
#[derive(Clone, Debug)]
@ -37,10 +37,15 @@ impl<N: Network, D: Db> KeyGenDb<N, D> {
fn params_key(set: &ValidatorSet) -> Vec<u8> {
Self::key_gen_key(b"params", set.encode())
}
fn save_params(txn: &mut D::Transaction<'_>, set: &ValidatorSet, params: &ThresholdParams) {
txn.put(Self::params_key(set), bincode::serialize(params).unwrap());
fn save_params(
txn: &mut D::Transaction<'_>,
set: &ValidatorSet,
params: &ThresholdParams,
shares: u16,
) {
txn.put(Self::params_key(set), bincode::serialize(&(params, shares)).unwrap());
}
fn params<G: Get>(getter: &G, set: &ValidatorSet) -> Option<ThresholdParams> {
fn params<G: Get>(getter: &G, set: &ValidatorSet) -> Option<(ThresholdParams, u16)> {
getter.get(Self::params_key(set)).map(|bytes| bincode::deserialize(&bytes).unwrap())
}
@ -70,17 +75,23 @@ impl<N: Network, D: Db> KeyGenDb<N, D> {
fn save_keys(
txn: &mut D::Transaction<'_>,
id: &KeyGenId,
substrate_keys: &ThresholdCore<Ristretto>,
network_keys: &ThresholdKeys<N::Curve>,
substrate_keys: &[ThresholdCore<Ristretto>],
network_keys: &[ThresholdKeys<N::Curve>],
) {
let mut keys = substrate_keys.serialize();
keys.extend(network_keys.serialize().iter());
let mut keys = Zeroizing::new(vec![]);
for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) {
keys.extend(substrate_keys.serialize().as_slice());
keys.extend(network_keys.serialize().as_slice());
}
txn.put(
Self::generated_keys_key(
id.set,
(&substrate_keys.group_key().to_bytes(), network_keys.group_key().to_bytes().as_ref()),
(
&substrate_keys[0].group_key().to_bytes(),
network_keys[0].group_key().to_bytes().as_ref(),
),
),
keys,
&keys,
);
}
@ -91,54 +102,62 @@ impl<N: Network, D: Db> KeyGenDb<N, D> {
fn read_keys<G: Get>(
getter: &G,
key: &[u8],
) -> Option<(Vec<u8>, (ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>))> {
) -> Option<(Vec<u8>, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {
let keys_vec = getter.get(key)?;
let mut keys_ref: &[u8] = keys_vec.as_ref();
let substrate_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
let mut network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
N::tweak_keys(&mut network_keys);
let mut substrate_keys = vec![];
let mut network_keys = vec![];
while !keys_ref.is_empty() {
substrate_keys.push(ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()));
let mut these_network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
N::tweak_keys(&mut these_network_keys);
network_keys.push(these_network_keys);
}
Some((keys_vec, (substrate_keys, network_keys)))
}
fn confirm_keys(
txn: &mut D::Transaction<'_>,
set: ValidatorSet,
key_pair: KeyPair,
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>) {
) -> (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>) {
let (keys_vec, keys) =
Self::read_keys(txn, &Self::generated_keys_key(set, (&key_pair.0 .0, key_pair.1.as_ref())))
.unwrap();
assert_eq!(key_pair.0 .0, keys.0.group_key().to_bytes());
assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes());
assert_eq!(
{
let network_key: &[u8] = key_pair.1.as_ref();
network_key
},
keys.1.group_key().to_bytes().as_ref(),
keys.1[0].group_key().to_bytes().as_ref(),
);
txn.put(Self::keys_key(&keys.1.group_key()), keys_vec);
txn.put(Self::keys_key(&keys.1[0].group_key()), keys_vec);
keys
}
#[allow(clippy::type_complexity)]
fn keys<G: Get>(
getter: &G,
key: &<N::Curve as Ciphersuite>::G,
) -> Option<(ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>)> {
) -> Option<(Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>)> {
let res = Self::read_keys(getter, &Self::keys_key(key))?.1;
assert_eq!(&res.1.group_key(), key);
assert_eq!(&res.1[0].group_key(), key);
Some(res)
}
}
/// Coded so if the processor spontaneously reboots, one of two paths occur:
/// 1) It either didn't send its response, so the attempt will be aborted
/// 2) It did send its response, and has locally saved enough data to continue
type SecretShareMachines<N> =
Vec<(SecretShareMachine<Ristretto>, SecretShareMachine<<N as Network>::Curve>)>;
type KeyMachines<N> = Vec<(KeyMachine<Ristretto>, KeyMachine<<N as Network>::Curve>)>;
#[derive(Debug)]
pub struct KeyGen<N: Network, D: Db> {
db: D,
entropy: Zeroizing<[u8; 32]>,
active_commit:
HashMap<ValidatorSet, (SecretShareMachine<Ristretto>, SecretShareMachine<N::Curve>)>,
active_share: HashMap<ValidatorSet, (KeyMachine<Ristretto>, KeyMachine<N::Curve>)>,
active_commit: HashMap<ValidatorSet, (SecretShareMachines<N>, Vec<Vec<u8>>)>,
#[allow(clippy::type_complexity)]
active_share: HashMap<ValidatorSet, (KeyMachines<N>, Vec<HashMap<Participant, Vec<u8>>>)>,
}
impl<N: Network, D: Db> KeyGen<N, D> {
@ -152,10 +171,11 @@ impl<N: Network, D: Db> KeyGen<N, D> {
KeyGenDb::<N, D>::params(&self.db, set).is_some()
}
#[allow(clippy::type_complexity)]
pub fn keys(
&self,
key: &<N::Curve as Ciphersuite>::G,
) -> Option<(ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>)> {
) -> Option<(Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>)> {
// This is safe, despite not having a txn, since it's a static value
// The only concern is it may not be set when expected, or it may be set unexpectedly
//
@ -191,58 +211,35 @@ impl<N: Network, D: Db> KeyGen<N, D> {
let secret_shares_rng = |id| rng(b"Key Gen Secret Shares", id);
let share_rng = |id| rng(b"Key Gen Share", id);
let key_gen_machines = |id, params| {
let key_gen_machines = |id, params: ThresholdParams, shares| {
let mut rng = coefficients_rng(id);
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
((substrate.0, network.0), (substrate.1, network.1))
let mut machines = vec![];
let mut commitments = vec![];
for s in 0 .. shares {
let params = ThresholdParams::new(
params.t(),
params.n(),
Participant::new(u16::from(params.i()) + s).unwrap(),
)
.unwrap();
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
machines.push((substrate.0, network.0));
let mut serialized = vec![];
substrate.1.write(&mut serialized).unwrap();
network.1.write(&mut serialized).unwrap();
commitments.push(serialized);
}
(machines, commitments)
};
match msg {
CoordinatorMessage::GenerateKey { id, params } => {
info!("Generating new key. ID: {:?} Params: {:?}", id, params);
// Remove old attempts
if self.active_commit.remove(&id.set).is_none() &&
self.active_share.remove(&id.set).is_none()
{
// If we haven't handled this set before, save the params
KeyGenDb::<N, D>::save_params(txn, &id.set, &params);
}
let (machines, commitments) = key_gen_machines(id, params);
let mut serialized = commitments.0.serialize();
serialized.extend(commitments.1.serialize());
self.active_commit.insert(id.set, machines);
ProcessorMessage::Commitments { id, commitments: serialized }
}
CoordinatorMessage::Commitments { id, commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) {
// We should've been told of a new attempt before receiving commitments again
// The coordinator is either missing messages or repeating itself
// Either way, it's faulty
panic!("commitments when already handled commitments");
}
let params = KeyGenDb::<N, D>::params(txn, &id.set).unwrap();
// Unwrap the machines, rebuilding them if we didn't have them in our cache
// We won't if the processor rebooted
// This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for
// attempt y
// The coordinator is trusted to be proper in this regard
let machines =
self.active_commit.remove(&id.set).unwrap_or_else(|| key_gen_machines(id, params).0);
let secret_share_machines =
|id,
params: ThresholdParams,
(machines, our_commitments): (SecretShareMachines<N>, Vec<Vec<u8>>),
commitments: HashMap<Participant, Vec<u8>>| {
let mut rng = secret_shares_rng(id);
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
#[allow(clippy::type_complexity)]
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
@ -269,26 +266,88 @@ impl<N: Network, D: Db> KeyGen<N, D> {
}
}
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, params, machines.0, &mut commitments_ref);
let (network_machine, network_shares) =
handle_machine(&mut rng, params, machines.1, &mut commitments_ref);
for (_, commitments) in commitments_ref {
if !commitments.is_empty() {
todo!("malicious signer: extra bytes");
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
for (i, our_commitments) in our_commitments.iter().enumerate() {
if m != i {
assert!(commitments_ref
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments.as_ref(),
)
.is_none());
}
}
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, params, substrate_machine, &mut commitments_ref);
let (network_machine, network_shares) =
handle_machine(&mut rng, params, network_machine, &mut commitments_ref);
key_machines.push((substrate_machine, network_machine));
for (_, commitments) in commitments_ref {
if !commitments.is_empty() {
todo!("malicious signer: extra bytes");
}
}
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in these_shares.iter_mut() {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
}
(key_machines, shares)
};
match msg {
CoordinatorMessage::GenerateKey { id, params, shares } => {
info!("Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}");
// Remove old attempts
if self.active_commit.remove(&id.set).is_none() &&
self.active_share.remove(&id.set).is_none()
{
// If we haven't handled this set before, save the params
KeyGenDb::<N, D>::save_params(txn, &id.set, &params, shares);
}
self.active_share.insert(id.set, (substrate_machine, network_machine));
let (machines, commitments) = key_gen_machines(id, params, shares);
self.active_commit.insert(id.set, (machines, commitments.clone()));
let mut shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in shares.iter_mut() {
share.extend(network_shares[i].serialize());
ProcessorMessage::Commitments { id, commitments }
}
CoordinatorMessage::Commitments { id, commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) {
// We should've been told of a new attempt before receiving commitments again
// The coordinator is either missing messages or repeating itself
// Either way, it's faulty
panic!("commitments when already handled commitments");
}
let (params, share_quantity) = KeyGenDb::<N, D>::params(txn, &id.set).unwrap();
// Unwrap the machines, rebuilding them if we didn't have them in our cache
// We won't if the processor rebooted
// This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for
// attempt y
// The coordinator is trusted to be proper in this regard
let prior = self
.active_commit
.remove(&id.set)
.unwrap_or_else(|| key_gen_machines(id, params, share_quantity));
KeyGenDb::<N, D>::save_commitments(txn, &id, &commitments);
let (machines, shares) = secret_share_machines(id, params, prior, commitments);
self.active_share.insert(id.set, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
}
@ -296,48 +355,16 @@ impl<N: Network, D: Db> KeyGen<N, D> {
CoordinatorMessage::Shares { id, shares } => {
info!("Received shares for {:?}", id);
let params = KeyGenDb::<N, D>::params(txn, &id.set).unwrap();
let (params, share_quantity) = KeyGenDb::<N, D>::params(txn, &id.set).unwrap();
// Same commentary on inconsistency as above exists
let machines = self.active_share.remove(&id.set).unwrap_or_else(|| {
let machines = key_gen_machines(id, params).0;
let mut rng = secret_shares_rng(id);
let commitments = KeyGenDb::<N, D>::commitments(txn, &id);
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
fn parse_commitments<C: Ciphersuite>(
params: ThresholdParams,
commitments_ref: &mut HashMap<Participant, &[u8]>,
) -> HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>> {
commitments_ref
.iter_mut()
.map(|(i, commitments)| {
(*i, EncryptionKeyMessage::<C, Commitments<C>>::read(commitments, params).unwrap())
})
.collect()
}
(
machines
.0
.generate_secret_shares(&mut rng, parse_commitments(params, &mut commitments_ref))
.unwrap()
.0,
machines
.1
.generate_secret_shares(&mut rng, parse_commitments(params, &mut commitments_ref))
.unwrap()
.0,
)
let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| {
let prior = key_gen_machines(id, params, share_quantity);
secret_share_machines(id, params, prior, KeyGenDb::<N, D>::commitments(txn, &id))
});
let mut rng = share_rng(id);
let mut shares_ref: HashMap<Participant, &[u8]> =
shares.iter().map(|(i, shares)| (*i, shares.as_ref())).collect();
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
params: ThresholdParams,
@ -364,24 +391,58 @@ impl<N: Network, D: Db> KeyGen<N, D> {
.complete()
}
let substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
let network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
for (_, shares) in shares_ref {
if !shares.is_empty() {
todo!("malicious signer: extra bytes");
let mut substrate_keys = vec![];
let mut network_keys = vec![];
for (m, machines) in machines.into_iter().enumerate() {
let mut shares_ref: HashMap<Participant, &[u8]> =
shares[m].iter().map(|(i, shares)| (*i, shares.as_ref())).collect();
for (i, our_shares) in our_shares.iter().enumerate() {
if m != i {
assert!(shares_ref
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_shares
[&Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap()]
.as_ref(),
)
.is_none());
}
}
let these_substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
let these_network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
for (_, shares) in shares_ref {
if !shares.is_empty() {
todo!("malicious signer: extra bytes");
}
}
let mut these_network_keys = ThresholdKeys::new(these_network_keys);
N::tweak_keys(&mut these_network_keys);
substrate_keys.push(these_substrate_keys);
network_keys.push(these_network_keys);
}
let mut network_keys = ThresholdKeys::new(network_keys);
N::tweak_keys(&mut network_keys);
let mut generated_substrate_key = None;
let mut generated_network_key = None;
for keys in substrate_keys.iter().zip(&network_keys) {
if generated_substrate_key.is_none() {
generated_substrate_key = Some(keys.0.group_key());
generated_network_key = Some(keys.1.group_key());
} else {
assert_eq!(generated_substrate_key, Some(keys.0.group_key()));
assert_eq!(generated_network_key, Some(keys.1.group_key()));
}
}
KeyGenDb::<N, D>::save_keys(txn, &id, &substrate_keys, &network_keys);
ProcessorMessage::GeneratedKeyPair {
id,
substrate_key: substrate_keys.group_key().to_bytes(),
network_key: network_keys.group_key().to_bytes().as_ref().to_vec(),
substrate_key: generated_substrate_key.unwrap().to_bytes(),
network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(),
}
}
}
@ -393,12 +454,12 @@ impl<N: Network, D: Db> KeyGen<N, D> {
set: ValidatorSet,
key_pair: KeyPair,
) -> KeyConfirmed<N::Curve> {
let (substrate_keys, network_keys) = KeyGenDb::<N, D>::confirm_keys(txn, set, key_pair);
let (substrate_keys, network_keys) = KeyGenDb::<N, D>::confirm_keys(txn, set, key_pair.clone());
info!(
"Confirmed key pair {} {} for set {:?}",
hex::encode(substrate_keys.group_key().to_bytes()),
hex::encode(network_keys.group_key().to_bytes()),
hex::encode(key_pair.0),
hex::encode(key_pair.1),
set,
);

View file

@ -424,7 +424,7 @@ async fn boot<N: Network, D: Db>(
for (i, key) in current_keys.iter().enumerate() {
let Some((substrate_keys, network_keys)) = key_gen.keys(key) else { continue };
let network_key = network_keys.group_key();
let network_key = network_keys[0].group_key();
// If this is the oldest key, load the SubstrateSigner for it as the active SubstrateSigner
// The new key only takes responsibility once the old key is fully deprecated

View file

@ -142,23 +142,26 @@ impl<N: Network, D: Db> SignerDb<N, D> {
}
}
type PreprocessFor<N> = <<N as Network>::TransactionMachine as PreprocessMachine>::Preprocess;
type SignMachineFor<N> = <<N as Network>::TransactionMachine as PreprocessMachine>::SignMachine;
type SignatureShareFor<N> =
<SignMachineFor<N> as SignMachine<<N as Network>::Transaction>>::SignatureShare;
type SignatureMachineFor<N> =
<SignMachineFor<N> as SignMachine<<N as Network>::Transaction>>::SignatureMachine;
pub struct Signer<N: Network, D: Db> {
db: PhantomData<D>,
network: N,
keys: ThresholdKeys<N::Curve>,
keys: Vec<ThresholdKeys<N::Curve>>,
signable: HashMap<[u8; 32], N::SignableTransaction>,
attempt: HashMap<[u8; 32], u32>,
preprocessing: HashMap<[u8; 32], <N::TransactionMachine as PreprocessMachine>::SignMachine>,
#[allow(clippy::type_complexity)]
signing: HashMap<
[u8; 32],
<
<N::TransactionMachine as PreprocessMachine>::SignMachine as SignMachine<N::Transaction>
>::SignatureMachine,
>,
preprocessing: HashMap<[u8; 32], (Vec<SignMachineFor<N>>, Vec<PreprocessFor<N>>)>,
#[allow(clippy::type_complexity)]
signing: HashMap<[u8; 32], (SignatureMachineFor<N>, Vec<SignatureShareFor<N>>)>,
pub events: VecDeque<SignerEvent<N>>,
}
@ -194,7 +197,8 @@ impl<N: Network, D: Db> Signer<N, D> {
tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await;
}
}
pub fn new(network: N, keys: ThresholdKeys<N::Curve>) -> Signer<N, D> {
pub fn new(network: N, keys: Vec<ThresholdKeys<N::Curve>>) -> Signer<N, D> {
assert!(!keys.is_empty());
Signer {
db: PhantomData,
@ -329,7 +333,7 @@ impl<N: Network, D: Db> Signer<N, D> {
assert!(!SignerDb::<N, D>::completions(txn, id).is_empty());
info!(
"signer {} informed of the eventuality completion for plan {}, {}",
hex::encode(self.keys.group_key().to_bytes()),
hex::encode(self.keys[0].group_key().to_bytes()),
hex::encode(id),
"which we already marked as completed",
);
@ -370,7 +374,7 @@ impl<N: Network, D: Db> Signer<N, D> {
// Update the attempt number
self.attempt.insert(id, attempt);
let id = SignId { key: self.keys.group_key().to_bytes().as_ref().to_vec(), id, attempt };
let id = SignId { key: self.keys[0].group_key().to_bytes().as_ref().to_vec(), id, attempt };
info!("signing for {} #{}", hex::encode(id.id), id.attempt);
@ -398,25 +402,34 @@ impl<N: Network, D: Db> Signer<N, D> {
SignerDb::<N, D>::attempt(txn, &id);
// Attempt to create the TX
let machine = match self.network.attempt_send(self.keys.clone(), tx).await {
Err(e) => {
error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e);
return;
}
Ok(machine) => machine,
};
let mut machines = vec![];
let mut preprocesses = vec![];
let mut serialized_preprocesses = vec![];
for keys in &self.keys {
let machine = match self.network.attempt_send(keys.clone(), tx.clone()).await {
Err(e) => {
error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e);
return;
}
Ok(machine) => machine,
};
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
// message, send distinct preprocess, and then attempt a signing session premised on the former
// with the latter
let (machine, preprocess) = machine.preprocess(&mut OsRng);
self.preprocessing.insert(id.id, machine);
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
// message, send distinct preprocess, and then attempt a signing session premised on the
// former with the latter
let (machine, preprocess) = machine.preprocess(&mut OsRng);
machines.push(machine);
serialized_preprocesses.push(preprocess.serialize());
preprocesses.push(preprocess);
}
self.preprocessing.insert(id.id, (machines, preprocesses));
// Broadcast our preprocess
self.events.push_back(SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess {
id,
preprocess: preprocess.serialize(),
preprocesses: serialized_preprocesses,
}));
}
@ -448,7 +461,7 @@ impl<N: Network, D: Db> Signer<N, D> {
return;
}
let machine = match self.preprocessing.remove(&id.id) {
let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) {
// Either rebooted or RPC error, or some invariant
None => {
warn!(
@ -464,7 +477,7 @@ impl<N: Network, D: Db> Signer<N, D> {
.drain()
.map(|(l, preprocess)| {
let mut preprocess_ref = preprocess.as_ref();
let res = machine
let res = machines[0]
.read_preprocess::<&[u8]>(&mut preprocess_ref)
.map(|preprocess| (l, preprocess));
if !preprocess_ref.is_empty() {
@ -472,23 +485,41 @@ impl<N: Network, D: Db> Signer<N, D> {
}
res
})
.collect::<Result<_, _>>()
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(preprocesses) => preprocesses,
Err(e) => todo!("malicious signer: {:?}", e),
};
// Use an empty message, as expected of TransactionMachines
let (machine, share) = match machine.sign(preprocesses, &[]) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
};
self.signing.insert(id.id, machine);
// Only keep a single machine as we only need one to get the signature
let mut signature_machine = None;
let mut shares = vec![];
let mut serialized_shares = vec![];
for (m, machine) in machines.into_iter().enumerate() {
let mut preprocesses = preprocesses.clone();
for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {
if i != m {
assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());
}
}
// Broadcast our share
// Use an empty message, as expected of TransactionMachines
let (machine, share) = match machine.sign(preprocesses, &[]) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
};
if m == 0 {
signature_machine = Some(machine);
}
serialized_shares.push(share.serialize());
shares.push(share);
}
self.signing.insert(id.id, (signature_machine.unwrap(), shares));
// Broadcast our shares
self.events.push_back(SignerEvent::ProcessorMessage(ProcessorMessage::Share {
id,
share: share.serialize(),
shares: serialized_shares,
}));
}
@ -497,7 +528,7 @@ impl<N: Network, D: Db> Signer<N, D> {
return;
}
let machine = match self.signing.remove(&id.id) {
let (machine, our_shares) = match self.signing.remove(&id.id) {
// Rebooted, RPC error, or some invariant
None => {
// If preprocessing has this ID, it means we were never sent the preprocess by the
@ -515,7 +546,7 @@ impl<N: Network, D: Db> Signer<N, D> {
Some(machine) => machine,
};
let shares = match shares
let mut shares = match shares
.drain()
.map(|(l, share)| {
let mut share_ref = share.as_ref();
@ -525,12 +556,16 @@ impl<N: Network, D: Db> Signer<N, D> {
}
res
})
.collect::<Result<_, _>>()
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
}
let tx = match machine.complete(shares) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),

View file

@ -8,6 +8,7 @@ use ciphersuite::group::GroupEncoding;
use frost::{
curve::Ristretto,
ThresholdKeys,
algorithm::Algorithm,
sign::{
Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
AlgorithmSignMachine, AlgorithmSignatureMachine,
@ -77,16 +78,25 @@ impl<D: Db> SubstrateSignerDb<D> {
}
}
type Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as PreprocessMachine>::Preprocess;
type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachine<
<Schnorrkel as Algorithm<Ristretto>>::Signature,
>>::SignatureShare;
pub struct SubstrateSigner<D: Db> {
db: PhantomData<D>,
network: NetworkId,
keys: ThresholdKeys<Ristretto>,
keys: Vec<ThresholdKeys<Ristretto>>,
signable: HashMap<[u8; 32], Batch>,
attempt: HashMap<[u8; 32], u32>,
preprocessing: HashMap<[u8; 32], AlgorithmSignMachine<Ristretto, Schnorrkel>>,
signing: HashMap<[u8; 32], AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
#[allow(clippy::type_complexity)]
preprocessing:
HashMap<[u8; 32], (Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,
#[allow(clippy::type_complexity)]
signing:
HashMap<[u8; 32], (AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,
pub events: VecDeque<SubstrateSignerEvent>,
}
@ -102,7 +112,8 @@ impl<D: Db> fmt::Debug for SubstrateSigner<D> {
}
impl<D: Db> SubstrateSigner<D> {
pub fn new(network: NetworkId, keys: ThresholdKeys<Ristretto>) -> SubstrateSigner<D> {
pub fn new(network: NetworkId, keys: Vec<ThresholdKeys<Ristretto>>) -> SubstrateSigner<D> {
assert!(!keys.is_empty());
SubstrateSigner {
db: PhantomData,
@ -178,7 +189,7 @@ impl<D: Db> SubstrateSigner<D> {
// Update the attempt number
self.attempt.insert(id, attempt);
let id = SignId { key: self.keys.group_key().to_bytes().to_vec(), id, attempt };
let id = SignId { key: self.keys[0].group_key().to_bytes().to_vec(), id, attempt };
info!("signing batch {} #{}", hex::encode(id.id), id.attempt);
// If we reboot mid-sign, the current design has us abort all signs and wait for latter
@ -204,19 +215,27 @@ impl<D: Db> SubstrateSigner<D> {
SubstrateSignerDb::<D>::attempt(txn, &id);
// b"substrate" is a literal from sp-core
let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), self.keys.clone());
let mut machines = vec![];
let mut preprocesses = vec![];
let mut serialized_preprocesses = vec![];
for keys in &self.keys {
// b"substrate" is a literal from sp-core
let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone());
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
// message, send distinct preprocess, and then attempt a signing session premised on the former
// with the latter
let (machine, preprocess) = machine.preprocess(&mut OsRng);
self.preprocessing.insert(id.id, machine);
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
// message, send distinct preprocess, and then attempt a signing session premised on the
// former with the latter
let (machine, preprocess) = machine.preprocess(&mut OsRng);
machines.push(machine);
serialized_preprocesses.push(preprocess.serialize());
preprocesses.push(preprocess);
}
self.preprocessing.insert(id.id, (machines, preprocesses));
// Broadcast our preprocess
// Broadcast our preprocesses
self.events.push_back(SubstrateSignerEvent::ProcessorMessage(
ProcessorMessage::BatchPreprocess { id, block, preprocess: preprocess.serialize() },
ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses },
));
}
@ -240,23 +259,23 @@ impl<D: Db> SubstrateSigner<D> {
return;
}
let machine = match self.preprocessing.remove(&id.id) {
let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) {
// Either rebooted or RPC error, or some invariant
None => {
warn!(
"not preprocessing for {}. this is an error if we didn't reboot",
hex::encode(id.id)
hex::encode(id.id),
);
return;
}
Some(machine) => machine,
Some(preprocess) => preprocess,
};
let preprocesses = match preprocesses
.drain()
.map(|(l, preprocess)| {
let mut preprocess_ref = preprocess.as_ref();
let res = machine
let res = machines[0]
.read_preprocess::<&[u8]>(&mut preprocess_ref)
.map(|preprocess| (l, preprocess));
if !preprocess_ref.is_empty() {
@ -264,24 +283,44 @@ impl<D: Db> SubstrateSigner<D> {
}
res
})
.collect::<Result<_, _>>()
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(preprocesses) => preprocesses,
Err(e) => todo!("malicious signer: {:?}", e),
};
let (machine, share) =
match machine.sign(preprocesses, &batch_message(&self.signable[&id.id])) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
};
self.signing.insert(id.id, machine);
// Only keep a single machine as we only need one to get the signature
let mut signature_machine = None;
let mut shares = vec![];
let mut serialized_shares = vec![];
for (m, machine) in machines.into_iter().enumerate() {
let mut preprocesses = preprocesses.clone();
for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {
if i != m {
assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());
}
}
// Broadcast our share
let mut share_bytes = [0; 32];
share_bytes.copy_from_slice(&share.serialize());
let (machine, share) =
match machine.sign(preprocesses, &batch_message(&self.signable[&id.id])) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
};
if m == 0 {
signature_machine = Some(machine);
}
let mut share_bytes = [0; 32];
share_bytes.copy_from_slice(&share.serialize());
serialized_shares.push(share_bytes);
shares.push(share);
}
self.signing.insert(id.id, (signature_machine.unwrap(), shares));
// Broadcast our shares
self.events.push_back(SubstrateSignerEvent::ProcessorMessage(
ProcessorMessage::BatchShare { id, share: share_bytes },
ProcessorMessage::BatchShare { id, shares: serialized_shares },
));
}
@ -290,7 +329,7 @@ impl<D: Db> SubstrateSigner<D> {
return;
}
let machine = match self.signing.remove(&id.id) {
let (machine, our_shares) = match self.signing.remove(&id.id) {
// Rebooted, RPC error, or some invariant
None => {
// If preprocessing has this ID, it means we were never sent the preprocess by the
@ -305,10 +344,10 @@ impl<D: Db> SubstrateSigner<D> {
);
return;
}
Some(machine) => machine,
Some(signing) => signing,
};
let shares = match shares
let mut shares = match shares
.drain()
.map(|(l, share)| {
let mut share_ref = share.as_ref();
@ -318,12 +357,16 @@ impl<D: Db> SubstrateSigner<D> {
}
res
})
.collect::<Result<_, _>>()
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
}
let sig = match machine.complete(shares) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),

View file

@ -41,19 +41,22 @@ pub async fn test_key_gen<N: Network>() {
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
if let ProcessorMessage::Commitments { id, commitments } = key_gen
if let ProcessorMessage::Commitments { id, mut commitments } = key_gen
.handle(
&mut txn,
CoordinatorMessage::GenerateKey {
id: ID,
params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())
.unwrap(),
shares: 1,
},
)
.await
{
assert_eq!(id, ID);
all_commitments.insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments);
assert_eq!(commitments.len(), 1);
all_commitments
.insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments.swap_remove(0));
} else {
panic!("didn't get commitments back");
}
@ -75,7 +78,7 @@ pub async fn test_key_gen<N: Network>() {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let ProcessorMessage::Shares { id, shares } = key_gen
if let ProcessorMessage::Shares { id, mut shares } = key_gen
.handle(
&mut txn,
CoordinatorMessage::Commitments {
@ -86,7 +89,8 @@ pub async fn test_key_gen<N: Network>() {
.await
{
assert_eq!(id, ID);
all_shares.insert(i, shares);
assert_eq!(shares.len(), 1);
all_shares.insert(i, shares.swap_remove(0));
} else {
panic!("didn't get shares back");
}
@ -107,10 +111,10 @@ pub async fn test_key_gen<N: Network>() {
&mut txn,
CoordinatorMessage::Shares {
id: ID,
shares: all_shares
shares: vec![all_shares
.iter()
.filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })
.collect(),
.collect()],
},
)
.await
@ -134,11 +138,16 @@ pub async fn test_key_gen<N: Network>() {
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let KeyConfirmed { substrate_keys, network_keys } = key_gen
let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen
.confirm(&mut txn, ID.set, (sr25519::Public(res.0), res.1.clone().try_into().unwrap()))
.await;
txn.commit();
assert_eq!(substrate_keys.len(), 1);
let substrate_keys = substrate_keys.swap_remove(0);
assert_eq!(network_keys.len(), 1);
let network_keys = network_keys.swap_remove(0);
let params =
ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap();
assert_eq!(substrate_keys.params(), params);

View file

@ -45,7 +45,7 @@ pub async fn sign<N: Network>(
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let keys = keys.remove(&i).unwrap();
t = keys.params().t();
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), keys));
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), vec![keys]));
dbs.insert(i, MemDb::new());
}
drop(keys);
@ -74,12 +74,15 @@ pub async fn sign<N: Network>(
let mut preprocesses = HashMap::new();
for i in 1 ..= signers.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess { id, preprocess }) =
signers.get_mut(&i).unwrap().events.pop_front().unwrap()
if let SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess {
id,
preprocesses: mut these_preprocesses,
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap()
{
assert_eq!(id, actual_id);
assert_eq!(these_preprocesses.len(), 1);
if signing_set.contains(&i) {
preprocesses.insert(i, preprocess);
preprocesses.insert(i, these_preprocesses.swap_remove(0));
}
} else {
panic!("didn't get preprocess back");
@ -102,11 +105,12 @@ pub async fn sign<N: Network>(
.await;
txn.commit();
if let SignerEvent::ProcessorMessage(ProcessorMessage::Share { id, share }) =
if let SignerEvent::ProcessorMessage(ProcessorMessage::Share { id, shares: mut these_shares }) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
{
assert_eq!(id, actual_id);
shares.insert(*i, share);
assert_eq!(these_shares.len(), 1);
shares.insert(*i, these_shares.swap_remove(0));
} else {
panic!("didn't get share back");
}

View file

@ -56,7 +56,7 @@ async fn test_substrate_signer() {
let keys = keys.get(&i).unwrap().clone();
t = keys.params().t();
let mut signer = SubstrateSigner::<MemDb>::new(NetworkId::Monero, keys);
let mut signer = SubstrateSigner::<MemDb>::new(NetworkId::Monero, vec![keys]);
let mut db = MemDb::new();
let mut txn = db.txn();
signer.sign(&mut txn, batch.clone()).await;
@ -85,7 +85,7 @@ async fn test_substrate_signer() {
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchPreprocess {
id,
block: batch_block,
preprocess,
preprocesses: mut these_preprocesses,
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap()
{
if actual_id.id == [0; 32] {
@ -93,8 +93,9 @@ async fn test_substrate_signer() {
}
assert_eq!(id, actual_id);
assert_eq!(batch_block, block);
assert_eq!(these_preprocesses.len(), 1);
if signing_set.contains(&i) {
preprocesses.insert(i, preprocess);
preprocesses.insert(i, these_preprocesses.swap_remove(0));
}
} else {
panic!("didn't get preprocess back");
@ -117,11 +118,14 @@ async fn test_substrate_signer() {
.await;
txn.commit();
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchShare { id, share }) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchShare {
id,
shares: mut these_shares,
}) = signers.get_mut(i).unwrap().events.pop_front().unwrap()
{
assert_eq!(id, actual_id);
shares.insert(*i, share);
assert_eq!(these_shares.len(), 1);
shares.insert(*i, these_shares.swap_remove(0));
} else {
panic!("didn't get share back");
}

View file

@ -355,6 +355,8 @@ pub mod pallet {
NotEnoughAllocated,
/// Allocation would cause the validator set to no longer achieve fault tolerance.
AllocationWouldRemoveFaultTolerance,
/// Allocation would cause the validator set to never be able to achieve fault tolerance.
AllocationWouldPreventFaultTolerance,
/// Deallocation would remove the participant from the set, despite the validator not
/// specifying so.
DeallocationWouldRemoveParticipant,
@ -410,6 +412,7 @@ pub mod pallet {
system_address(b"validator-sets").into()
}
// is_bft returns if the network is able to survive any single node becoming byzantine.
fn is_bft(network: NetworkId) -> bool {
let allocation_per_key_share = AllocationPerKeyShare::<T>::get(network).unwrap().0;
@ -454,6 +457,7 @@ pub mod pallet {
let increased_key_shares =
(old_allocation / allocation_per_key_share) < (new_allocation / allocation_per_key_share);
// Check if the net exhibited the ability to handle any single node becoming byzantine
let mut was_bft = None;
if increased_key_shares {
was_bft = Some(Self::is_bft(network));
@ -463,12 +467,19 @@ pub mod pallet {
Self::set_allocation(network, account, Amount(new_allocation));
Self::deposit_event(Event::AllocationIncreased { validator: account, network, amount });
// Error if the net no longer can handle any single node becoming byzantine
if let Some(was_bft) = was_bft {
if was_bft && (!Self::is_bft(network)) {
Err(Error::<T>::AllocationWouldRemoveFaultTolerance)?;
}
}
// The above is_bft calls are only used to check a BFT net doesn't become non-BFT
// Check here if this call would prevent a non-BFT net from *ever* becoming BFT
if (new_allocation / allocation_per_key_share) >= (MAX_KEY_SHARES_PER_SET / 3).into() {
Err(Error::<T>::AllocationWouldPreventFaultTolerance)?;
}
if InSet::<T>::contains_key(Self::in_set_key(network, account)) {
TotalAllocatedStake::<T>::set(
network,
@ -739,6 +750,7 @@ pub mod pallet {
Err(Error::InsufficientAllocation) |
Err(Error::NotEnoughAllocated) |
Err(Error::AllocationWouldRemoveFaultTolerance) |
Err(Error::AllocationWouldPreventFaultTolerance) |
Err(Error::DeallocationWouldRemoveParticipant) |
Err(Error::DeallocationWouldRemoveFaultTolerance) |
Err(Error::NonExistentDeallocation) |

View file

@ -18,7 +18,7 @@ use serai_primitives::NetworkId;
/// The maximum amount of key shares per set.
pub const MAX_KEY_SHARES_PER_SET: u32 = 150;
// Support keys up to 96 bytes (BLS12-381 G2).
const MAX_KEY_LEN: u32 = 96;
pub const MAX_KEY_LEN: u32 = 96;
/// The type used to identify a specific session of validators.
#[derive(
@ -97,10 +97,12 @@ pub fn set_keys_message(set: &ValidatorSet, key_pair: &KeyPair) -> Vec<u8> {
/// maximum.
///
/// Reduction occurs by reducing each validator in a reverse round-robin.
pub fn amortize_excess_key_shares(validators: &mut [(Public, u64)]) {
let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::<u64>();
for i in
0 .. usize::try_from(total_key_shares.saturating_sub(MAX_KEY_SHARES_PER_SET.into())).unwrap()
pub fn amortize_excess_key_shares(validators: &mut [(Public, u16)]) {
let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::<u16>();
for i in 0 .. usize::try_from(
total_key_shares.saturating_sub(u16::try_from(MAX_KEY_SHARES_PER_SET).unwrap()),
)
.unwrap()
{
validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1;
}

View file

@ -60,7 +60,7 @@ pub async fn batch(
.send_message(messages::coordinator::ProcessorMessage::BatchPreprocess {
id: id.clone(),
block: batch.block,
preprocess: [processor_is[i]; 64].to_vec(),
preprocesses: vec![[processor_is[i]; 64].to_vec()],
})
.await;
}
@ -74,7 +74,7 @@ pub async fn batch(
.send_message(messages::coordinator::ProcessorMessage::BatchPreprocess {
id: id.clone(),
block: batch.block,
preprocess: [processor_is[excluded_signer]; 64].to_vec(),
preprocesses: vec![[processor_is[excluded_signer]; 64].to_vec()],
})
.await;
@ -131,7 +131,7 @@ pub async fn batch(
processor
.send_message(messages::coordinator::ProcessorMessage::BatchShare {
id: id.clone(),
share: [u8::try_from(u16::from(i)).unwrap(); 32],
shares: vec![[u8::try_from(u16::from(i)).unwrap(); 32]],
})
.await;
}

View file

@ -51,14 +51,15 @@ pub async fn key_gen<C: Ciphersuite>(
u16::try_from(COORDINATORS).unwrap(),
participant_is[i],
)
.unwrap()
.unwrap(),
shares: 1,
})
);
processor
.send_message(messages::key_gen::ProcessorMessage::Commitments {
id,
commitments: vec![u8::try_from(u16::from(participant_is[i])).unwrap()],
commitments: vec![vec![u8::try_from(u16::from(participant_is[i])).unwrap()]],
})
.await;
}
@ -96,7 +97,9 @@ pub async fn key_gen<C: Ciphersuite>(
.collect::<HashMap<_, _>>();
shares.remove(&participant_is[i]);
processor.send_message(messages::key_gen::ProcessorMessage::Shares { id, shares }).await;
processor
.send_message(messages::key_gen::ProcessorMessage::Shares { id, shares: vec![shares] })
.await;
}
let substrate_priv_key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
@ -128,7 +131,7 @@ pub async fn key_gen<C: Ciphersuite>(
})
.collect::<HashMap<_, _>>();
shares.remove(&i);
shares
vec![shares]
},
})
);

View file

@ -52,7 +52,7 @@ pub async fn sign<C: Ciphersuite>(
processor
.send_message(messages::sign::ProcessorMessage::Preprocess {
id: id.clone(),
preprocess: [processor_is[i]; 64].to_vec(),
preprocesses: vec![[processor_is[i]; 64].to_vec()],
})
.await;
}
@ -65,7 +65,7 @@ pub async fn sign<C: Ciphersuite>(
processors[excluded_signer]
.send_message(messages::sign::ProcessorMessage::Preprocess {
id: id.clone(),
preprocess: [processor_is[excluded_signer]; 64].to_vec(),
preprocesses: vec![[processor_is[excluded_signer]; 64].to_vec()],
})
.await;
@ -120,7 +120,7 @@ pub async fn sign<C: Ciphersuite>(
processor
.send_message(messages::sign::ProcessorMessage::Share {
id: id.clone(),
share: vec![u8::try_from(u16::from(i)).unwrap(); 32],
shares: vec![vec![u8::try_from(u16::from(i)).unwrap(); 32]],
})
.await;
}

View file

@ -48,7 +48,7 @@ pub(crate) async fn recv_batch_preprocesses(
messages::coordinator::ProcessorMessage::BatchPreprocess {
id: this_id,
block: this_block,
preprocess,
preprocesses: mut these_preprocesses,
},
) => {
if id.is_none() {
@ -60,7 +60,8 @@ pub(crate) async fn recv_batch_preprocesses(
assert_eq!(&this_id, id.as_ref().unwrap());
assert_eq!(&this_block, block.as_ref().unwrap());
preprocesses.insert(i, preprocess);
assert_eq!(these_preprocesses.len(), 1);
preprocesses.insert(i, these_preprocesses.swap_remove(0));
}
_ => panic!("processor didn't send batch preprocess"),
}
@ -107,10 +108,14 @@ pub(crate) async fn sign_batch(
if preprocesses.contains_key(&i) {
match coordinator.recv_message().await {
messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::BatchShare { id: this_id, share },
messages::coordinator::ProcessorMessage::BatchShare {
id: this_id,
shares: mut these_shares,
},
) => {
assert_eq!(&this_id, &id);
shares.insert(i, share);
assert_eq!(these_shares.len(), 1);
shares.insert(i, these_shares.swap_remove(0));
}
_ => panic!("processor didn't send batch share"),
}

View file

@ -46,14 +46,16 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId
participant,
)
.unwrap(),
shares: 1,
},
|participant, msg| match msg {
messages::key_gen::ProcessorMessage::Commitments {
id: this_id,
commitments: these_commitments,
commitments: mut these_commitments,
} => {
assert_eq!(this_id, id);
commitments.insert(participant, these_commitments);
assert_eq!(these_commitments.len(), 1);
commitments.insert(participant, these_commitments.swap_remove(0));
}
_ => panic!("processor didn't return Commitments in response to GenerateKey"),
},
@ -69,9 +71,10 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId
commitments: clone_without(&commitments, &participant),
},
|participant, msg| match msg {
messages::key_gen::ProcessorMessage::Shares { id: this_id, shares: these_shares } => {
messages::key_gen::ProcessorMessage::Shares { id: this_id, shares: mut these_shares } => {
assert_eq!(this_id, id);
shares.insert(participant, these_shares);
assert_eq!(these_shares.len(), 1);
shares.insert(participant, these_shares.swap_remove(0));
}
_ => panic!("processor didn't return Shares in response to GenerateKey"),
},
@ -85,12 +88,12 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId
coordinators,
|participant| messages::key_gen::CoordinatorMessage::Shares {
id,
shares: shares
shares: vec![shares
.iter()
.filter_map(|(this_participant, shares)| {
shares.get(&participant).cloned().map(|share| (*this_participant, share))
})
.collect(),
.collect()],
},
|_, msg| match msg {
messages::key_gen::ProcessorMessage::GeneratedKeyPair {

View file

@ -30,7 +30,7 @@ pub(crate) async fn recv_sign_preprocesses(
match msg {
messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Preprocess {
id: this_id,
preprocess,
preprocesses: mut these_preprocesses,
}) => {
if id.is_none() {
assert_eq!(&this_id.key, &key);
@ -39,7 +39,8 @@ pub(crate) async fn recv_sign_preprocesses(
}
assert_eq!(&this_id, id.as_ref().unwrap());
preprocesses.insert(i, preprocess);
assert_eq!(these_preprocesses.len(), 1);
preprocesses.insert(i, these_preprocesses.swap_remove(0));
}
_ => panic!("processor didn't send sign preprocess"),
}
@ -87,10 +88,11 @@ pub(crate) async fn sign_tx(
match coordinator.recv_message().await {
messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Share {
id: this_id,
share,
shares: mut these_shares,
}) => {
assert_eq!(&this_id, &id);
shares.insert(i, share);
assert_eq!(these_shares.len(), 1);
shares.insert(i, these_shares.swap_remove(0));
}
_ => panic!("processor didn't send TX shares"),
}