2023-04-18 03:20:48 +00:00
|
|
|
use core::{marker::PhantomData, fmt};
|
2023-04-16 03:01:07 +00:00
|
|
|
use std::collections::{VecDeque, HashMap};
|
2023-04-10 15:11:46 +00:00
|
|
|
|
|
|
|
use rand_core::OsRng;
|
|
|
|
|
2023-08-24 22:44:09 +00:00
|
|
|
use transcript::{Transcript, RecommendedTranscript};
|
Start moving Coordinator to a multi-Tributary model
Prior, we only supported a single Tributary per network, and spawned a task to
handled Processor messages per Tributary. Now, we handle Processor messages per
network, yet we still only supported a single Tributary in that handling
function.
Now, when we handle a message, we load the Tributary which is relevant. Once we
know it, we ensure we have it (preventing race conditions), and then proceed.
We do need work to check if we should have a Tributary, or if we're not
participating. We also need to check if a Tributary has been retired, meaning
we shouldn't handle any transactions related to them, and to clean up retired
Tributaries.
2023-09-27 22:20:36 +00:00
|
|
|
use ciphersuite::group::GroupEncoding;
|
2023-04-10 15:11:46 +00:00
|
|
|
use frost::{
|
|
|
|
curve::Ristretto,
|
|
|
|
ThresholdKeys,
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
algorithm::Algorithm,
|
2023-04-10 15:11:46 +00:00
|
|
|
sign::{
|
|
|
|
Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
|
|
|
|
AlgorithmSignMachine, AlgorithmSignatureMachine,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
use frost_schnorrkel::Schnorrkel;
|
|
|
|
|
|
|
|
use log::{info, debug, warn};
|
|
|
|
|
2023-09-02 11:53:14 +00:00
|
|
|
use scale::Encode;
|
2023-08-24 22:52:31 +00:00
|
|
|
use serai_client::{
|
|
|
|
primitives::NetworkId,
|
|
|
|
in_instructions::primitives::{Batch, SignedBatch, batch_message},
|
|
|
|
};
|
2023-04-10 15:11:46 +00:00
|
|
|
|
|
|
|
use messages::{sign::SignId, coordinator::*};
|
2023-04-18 03:20:48 +00:00
|
|
|
use crate::{Get, DbTxn, Db};
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-08-24 22:52:31 +00:00
|
|
|
// Generate an ID unique to a Batch
|
|
|
|
// TODO: Fork SignId to BatchSignId in order to just use the 5-byte encoding, not the hash of the
|
|
|
|
// 5-byte encoding
|
|
|
|
fn sign_id(network: NetworkId, id: u32) -> [u8; 32] {
|
|
|
|
let mut transcript = RecommendedTranscript::new(b"Serai Processor Batch Sign ID");
|
|
|
|
transcript.append_message(b"network", network.encode());
|
|
|
|
transcript.append_message(b"id", id.to_le_bytes());
|
|
|
|
|
|
|
|
let mut res = [0; 32];
|
|
|
|
res.copy_from_slice(&transcript.challenge(b"id")[.. 32]);
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2023-04-10 15:11:46 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum SubstrateSignerEvent {
|
|
|
|
ProcessorMessage(ProcessorMessage),
|
|
|
|
SignedBatch(SignedBatch),
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct SubstrateSignerDb<D: Db>(D);
|
|
|
|
impl<D: Db> SubstrateSignerDb<D> {
|
|
|
|
fn sign_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
|
|
|
D::key(b"SUBSTRATE_SIGNER", dst, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn completed_key(id: [u8; 32]) -> Vec<u8> {
|
|
|
|
Self::sign_key(b"completed", id)
|
|
|
|
}
|
2023-04-14 15:41:01 +00:00
|
|
|
fn complete(txn: &mut D::Transaction<'_>, id: [u8; 32]) {
|
2023-08-24 22:44:09 +00:00
|
|
|
txn.put(Self::completed_key(id), []);
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
2023-04-18 03:20:48 +00:00
|
|
|
fn completed<G: Get>(getter: &G, id: [u8; 32]) -> bool {
|
|
|
|
getter.get(Self::completed_key(id)).is_some()
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn attempt_key(id: &SignId) -> Vec<u8> {
|
2023-09-02 11:53:14 +00:00
|
|
|
Self::sign_key(b"attempt", id.encode())
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
2023-04-14 15:41:01 +00:00
|
|
|
fn attempt(txn: &mut D::Transaction<'_>, id: &SignId) {
|
2023-04-10 15:11:46 +00:00
|
|
|
txn.put(Self::attempt_key(id), []);
|
|
|
|
}
|
2023-04-18 03:20:48 +00:00
|
|
|
fn has_attempt<G: Get>(getter: &G, id: &SignId) -> bool {
|
|
|
|
getter.get(Self::attempt_key(id)).is_some()
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
|
|
|
|
2023-04-14 15:41:01 +00:00
|
|
|
fn save_batch(txn: &mut D::Transaction<'_>, batch: &SignedBatch) {
|
2023-04-10 15:11:46 +00:00
|
|
|
txn.put(Self::sign_key(b"batch", batch.batch.block), batch.encode());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
type Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as PreprocessMachine>::Preprocess;
|
|
|
|
type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachine<
|
|
|
|
<Schnorrkel as Algorithm<Ristretto>>::Signature,
|
|
|
|
>>::SignatureShare;
|
|
|
|
|
2023-04-10 15:11:46 +00:00
|
|
|
pub struct SubstrateSigner<D: Db> {
|
2023-04-18 03:20:48 +00:00
|
|
|
db: PhantomData<D>,
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-08-24 22:52:31 +00:00
|
|
|
network: NetworkId,
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
keys: Vec<ThresholdKeys<Ristretto>>,
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
signable: HashMap<[u8; 32], Batch>,
|
2023-04-10 15:11:46 +00:00
|
|
|
attempt: HashMap<[u8; 32], u32>,
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
preprocessing:
|
|
|
|
HashMap<[u8; 32], (Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,
|
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
signing:
|
|
|
|
HashMap<[u8; 32], (AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
pub events: VecDeque<SubstrateSignerEvent>,
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<D: Db> fmt::Debug for SubstrateSigner<D> {
|
|
|
|
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
fmt
|
|
|
|
.debug_struct("SubstrateSigner")
|
|
|
|
.field("signable", &self.signable)
|
|
|
|
.field("attempt", &self.attempt)
|
|
|
|
.finish_non_exhaustive()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<D: Db> SubstrateSigner<D> {
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
pub fn new(network: NetworkId, keys: Vec<ThresholdKeys<Ristretto>>) -> SubstrateSigner<D> {
|
|
|
|
assert!(!keys.is_empty());
|
2023-04-16 03:01:07 +00:00
|
|
|
SubstrateSigner {
|
2023-04-18 03:20:48 +00:00
|
|
|
db: PhantomData,
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-08-24 22:52:31 +00:00
|
|
|
network,
|
2023-04-10 15:11:46 +00:00
|
|
|
keys,
|
|
|
|
|
|
|
|
signable: HashMap::new(),
|
|
|
|
attempt: HashMap::new(),
|
|
|
|
preprocessing: HashMap::new(),
|
|
|
|
signing: HashMap::new(),
|
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
events: VecDeque::new(),
|
|
|
|
}
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn verify_id(&self, id: &SignId) -> Result<(), ()> {
|
|
|
|
// Check the attempt lines up
|
|
|
|
match self.attempt.get(&id.id) {
|
2023-04-16 03:01:07 +00:00
|
|
|
// If we don't have an attempt logged, it's because the coordinator is faulty OR because we
|
2023-04-18 00:16:58 +00:00
|
|
|
// rebooted OR we detected the signed batch on chain
|
|
|
|
// The latter is the expected flow for batches not actively being participated in
|
2023-04-10 15:11:46 +00:00
|
|
|
None => {
|
2023-04-18 00:16:58 +00:00
|
|
|
warn!("not attempting batch {} #{}", hex::encode(id.id), id.attempt);
|
2023-04-10 15:11:46 +00:00
|
|
|
Err(())?;
|
|
|
|
}
|
|
|
|
Some(attempt) => {
|
|
|
|
if attempt != &id.attempt {
|
2023-04-16 03:01:07 +00:00
|
|
|
warn!(
|
|
|
|
"sent signing data for batch {} #{} yet we have attempt #{}",
|
|
|
|
hex::encode(id.id),
|
|
|
|
id.attempt,
|
|
|
|
attempt
|
|
|
|
);
|
2023-04-10 15:11:46 +00:00
|
|
|
Err(())?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-04-18 03:20:48 +00:00
|
|
|
async fn attempt(&mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], attempt: u32) {
|
2023-04-16 03:01:07 +00:00
|
|
|
// See above commentary for why this doesn't emit SignedBatch
|
2023-04-18 03:20:48 +00:00
|
|
|
if SubstrateSignerDb::<D>::completed(txn, id) {
|
2023-04-16 03:01:07 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we're already working on this attempt
|
|
|
|
if let Some(curr_attempt) = self.attempt.get(&id) {
|
|
|
|
if curr_attempt >= &attempt {
|
|
|
|
warn!(
|
|
|
|
"told to attempt {} #{} yet we're already working on {}",
|
|
|
|
hex::encode(id),
|
|
|
|
attempt,
|
|
|
|
curr_attempt
|
|
|
|
);
|
|
|
|
return;
|
|
|
|
}
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
2023-04-16 03:01:07 +00:00
|
|
|
|
|
|
|
// Start this attempt
|
2023-08-14 15:57:38 +00:00
|
|
|
let block = if let Some(batch) = self.signable.get(&id) {
|
|
|
|
batch.block
|
|
|
|
} else {
|
2023-04-16 03:01:07 +00:00
|
|
|
warn!("told to attempt signing a batch we aren't currently signing for");
|
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Delete any existing machines
|
|
|
|
self.preprocessing.remove(&id);
|
|
|
|
self.signing.remove(&id);
|
|
|
|
|
|
|
|
// Update the attempt number
|
|
|
|
self.attempt.insert(id, attempt);
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
let id = SignId { key: self.keys[0].group_key().to_bytes().to_vec(), id, attempt };
|
2023-08-14 15:57:38 +00:00
|
|
|
info!("signing batch {} #{}", hex::encode(id.id), id.attempt);
|
2023-04-16 03:01:07 +00:00
|
|
|
|
|
|
|
// If we reboot mid-sign, the current design has us abort all signs and wait for latter
|
|
|
|
// attempts/new signing protocols
|
|
|
|
// This is distinct from the DKG which will continue DKG sessions, even on reboot
|
|
|
|
// This is because signing is tolerant of failures of up to 1/3rd of the group
|
|
|
|
// The DKG requires 100% participation
|
|
|
|
// While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for
|
|
|
|
// reboots, it's not worth the complexity when messing up here leaks our secret share
|
|
|
|
//
|
|
|
|
// Despite this, on reboot, we'll get told of active signing items, and may be in this
|
|
|
|
// branch again for something we've already attempted
|
|
|
|
//
|
|
|
|
// Only run if this hasn't already been attempted
|
2023-04-18 03:20:48 +00:00
|
|
|
if SubstrateSignerDb::<D>::has_attempt(txn, &id) {
|
2023-04-16 03:01:07 +00:00
|
|
|
warn!(
|
2023-07-26 18:02:17 +00:00
|
|
|
"already attempted batch {}, attempt #{}. this is an error if we didn't reboot",
|
2023-04-16 03:01:07 +00:00
|
|
|
hex::encode(id.id),
|
|
|
|
id.attempt
|
|
|
|
);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-04-18 03:20:48 +00:00
|
|
|
SubstrateSignerDb::<D>::attempt(txn, &id);
|
2023-04-16 03:01:07 +00:00
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
let mut machines = vec![];
|
|
|
|
let mut preprocesses = vec![];
|
|
|
|
let mut serialized_preprocesses = vec![];
|
|
|
|
for keys in &self.keys {
|
|
|
|
// b"substrate" is a literal from sp-core
|
|
|
|
let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone());
|
|
|
|
|
|
|
|
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
|
|
|
|
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
|
|
|
|
// message, send distinct preprocess, and then attempt a signing session premised on the
|
|
|
|
// former with the latter
|
|
|
|
let (machine, preprocess) = machine.preprocess(&mut OsRng);
|
|
|
|
machines.push(machine);
|
|
|
|
serialized_preprocesses.push(preprocess.serialize());
|
|
|
|
preprocesses.push(preprocess);
|
|
|
|
}
|
|
|
|
self.preprocessing.insert(id.id, (machines, preprocesses));
|
2023-04-16 03:01:07 +00:00
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
// Broadcast our preprocesses
|
2023-04-16 03:01:07 +00:00
|
|
|
self.events.push_back(SubstrateSignerEvent::ProcessorMessage(
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses },
|
2023-04-16 03:01:07 +00:00
|
|
|
));
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
|
|
|
|
2023-04-18 03:20:48 +00:00
|
|
|
pub async fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) {
|
2023-08-24 22:52:31 +00:00
|
|
|
debug_assert_eq!(self.network, batch.network);
|
|
|
|
let id = sign_id(batch.network, batch.id);
|
2023-08-14 15:57:38 +00:00
|
|
|
if SubstrateSignerDb::<D>::completed(txn, id) {
|
2023-04-16 03:01:07 +00:00
|
|
|
debug!("Sign batch order for ID we've already completed signing");
|
2023-04-18 00:16:58 +00:00
|
|
|
// See batch_signed for commentary on why this simply returns
|
2023-04-16 03:01:07 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.signable.insert(id, batch);
|
2023-04-18 03:20:48 +00:00
|
|
|
self.attempt(txn, id, 0).await;
|
2023-04-16 03:01:07 +00:00
|
|
|
}
|
|
|
|
|
2023-04-18 03:20:48 +00:00
|
|
|
pub async fn handle(&mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage) {
|
2023-04-10 15:11:46 +00:00
|
|
|
match msg {
|
|
|
|
CoordinatorMessage::BatchPreprocesses { id, mut preprocesses } => {
|
|
|
|
if self.verify_id(&id).is_err() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) {
|
2023-04-10 15:11:46 +00:00
|
|
|
// Either rebooted or RPC error, or some invariant
|
|
|
|
None => {
|
2023-04-11 10:06:17 +00:00
|
|
|
warn!(
|
|
|
|
"not preprocessing for {}. this is an error if we didn't reboot",
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
hex::encode(id.id),
|
2023-04-11 10:06:17 +00:00
|
|
|
);
|
2023-04-10 15:11:46 +00:00
|
|
|
return;
|
|
|
|
}
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
Some(preprocess) => preprocess,
|
2023-04-10 15:11:46 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let preprocesses = match preprocesses
|
|
|
|
.drain()
|
|
|
|
.map(|(l, preprocess)| {
|
2023-04-20 19:45:32 +00:00
|
|
|
let mut preprocess_ref = preprocess.as_ref();
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
let res = machines[0]
|
2023-04-20 19:45:32 +00:00
|
|
|
.read_preprocess::<&[u8]>(&mut preprocess_ref)
|
|
|
|
.map(|preprocess| (l, preprocess));
|
|
|
|
if !preprocess_ref.is_empty() {
|
|
|
|
todo!("malicious signer: extra bytes");
|
|
|
|
}
|
|
|
|
res
|
2023-04-10 15:11:46 +00:00
|
|
|
})
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
.collect::<Result<HashMap<_, _>, _>>()
|
2023-04-10 15:11:46 +00:00
|
|
|
{
|
|
|
|
Ok(preprocesses) => preprocesses,
|
|
|
|
Err(e) => todo!("malicious signer: {:?}", e),
|
|
|
|
};
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
// Only keep a single machine as we only need one to get the signature
|
|
|
|
let mut signature_machine = None;
|
|
|
|
let mut shares = vec![];
|
|
|
|
let mut serialized_shares = vec![];
|
|
|
|
for (m, machine) in machines.into_iter().enumerate() {
|
|
|
|
let mut preprocesses = preprocesses.clone();
|
|
|
|
for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {
|
|
|
|
if i != m {
|
|
|
|
assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());
|
|
|
|
}
|
|
|
|
}
|
2023-04-10 15:11:46 +00:00
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
let (machine, share) =
|
|
|
|
match machine.sign(preprocesses, &batch_message(&self.signable[&id.id])) {
|
|
|
|
Ok(res) => res,
|
|
|
|
Err(e) => todo!("malicious signer: {:?}", e),
|
|
|
|
};
|
|
|
|
if m == 0 {
|
|
|
|
signature_machine = Some(machine);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut share_bytes = [0; 32];
|
|
|
|
share_bytes.copy_from_slice(&share.serialize());
|
|
|
|
serialized_shares.push(share_bytes);
|
|
|
|
|
|
|
|
shares.push(share);
|
|
|
|
}
|
|
|
|
self.signing.insert(id.id, (signature_machine.unwrap(), shares));
|
|
|
|
|
|
|
|
// Broadcast our shares
|
2023-04-16 03:01:07 +00:00
|
|
|
self.events.push_back(SubstrateSignerEvent::ProcessorMessage(
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
ProcessorMessage::BatchShare { id, shares: serialized_shares },
|
2023-04-16 03:01:07 +00:00
|
|
|
));
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CoordinatorMessage::BatchShares { id, mut shares } => {
|
|
|
|
if self.verify_id(&id).is_err() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
let (machine, our_shares) = match self.signing.remove(&id.id) {
|
2023-04-10 15:11:46 +00:00
|
|
|
// Rebooted, RPC error, or some invariant
|
|
|
|
None => {
|
|
|
|
// If preprocessing has this ID, it means we were never sent the preprocess by the
|
|
|
|
// coordinator
|
|
|
|
if self.preprocessing.contains_key(&id.id) {
|
|
|
|
panic!("never preprocessed yet signing?");
|
|
|
|
}
|
|
|
|
|
2023-04-11 10:06:17 +00:00
|
|
|
warn!(
|
|
|
|
"not preprocessing for {}. this is an error if we didn't reboot",
|
|
|
|
hex::encode(id.id)
|
|
|
|
);
|
2023-04-10 15:11:46 +00:00
|
|
|
return;
|
|
|
|
}
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
Some(signing) => signing,
|
2023-04-10 15:11:46 +00:00
|
|
|
};
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
let mut shares = match shares
|
2023-04-10 15:11:46 +00:00
|
|
|
.drain()
|
|
|
|
.map(|(l, share)| {
|
2023-04-20 19:45:32 +00:00
|
|
|
let mut share_ref = share.as_ref();
|
|
|
|
let res = machine.read_share::<&[u8]>(&mut share_ref).map(|share| (l, share));
|
|
|
|
if !share_ref.is_empty() {
|
|
|
|
todo!("malicious signer: extra bytes");
|
|
|
|
}
|
|
|
|
res
|
2023-04-10 15:11:46 +00:00
|
|
|
})
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
.collect::<Result<HashMap<_, _>, _>>()
|
2023-04-10 15:11:46 +00:00
|
|
|
{
|
|
|
|
Ok(shares) => shares,
|
|
|
|
Err(e) => todo!("malicious signer: {:?}", e),
|
|
|
|
};
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
|
|
|
|
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
|
|
|
|
}
|
|
|
|
|
2023-04-10 15:11:46 +00:00
|
|
|
let sig = match machine.complete(shares) {
|
|
|
|
Ok(res) => res,
|
|
|
|
Err(e) => todo!("malicious signer: {:?}", e),
|
|
|
|
};
|
|
|
|
|
2023-07-25 22:09:23 +00:00
|
|
|
info!("signed batch {} with attempt #{}", hex::encode(id.id), id.attempt);
|
|
|
|
|
2023-04-10 15:11:46 +00:00
|
|
|
let batch =
|
2023-04-16 03:01:07 +00:00
|
|
|
SignedBatch { batch: self.signable.remove(&id.id).unwrap(), signature: sig.into() };
|
2023-04-10 15:11:46 +00:00
|
|
|
|
|
|
|
// Save the batch in case it's needed for recovery
|
2023-04-18 03:20:48 +00:00
|
|
|
SubstrateSignerDb::<D>::save_batch(txn, &batch);
|
|
|
|
SubstrateSignerDb::<D>::complete(txn, id.id);
|
2023-04-10 15:11:46 +00:00
|
|
|
|
|
|
|
// Stop trying to sign for this batch
|
|
|
|
assert!(self.attempt.remove(&id.id).is_some());
|
|
|
|
assert!(self.preprocessing.remove(&id.id).is_none());
|
|
|
|
assert!(self.signing.remove(&id.id).is_none());
|
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
self.events.push_back(SubstrateSignerEvent::SignedBatch(batch));
|
|
|
|
}
|
|
|
|
|
|
|
|
CoordinatorMessage::BatchReattempt { id } => {
|
2023-04-18 03:20:48 +00:00
|
|
|
self.attempt(txn, id.id, id.attempt).await;
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
2023-04-18 00:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-08-24 22:44:09 +00:00
|
|
|
pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, id: u32) {
|
|
|
|
// Safe since SubstrateSigner won't be told of the completion until the Scanner recognizes the
|
|
|
|
// block behind it, which will trigger starting the Batch
|
|
|
|
// TODO: There is a race condition between the Scanner recognizing the block and the Batch
|
|
|
|
// having signing started
|
2023-08-24 22:52:31 +00:00
|
|
|
let sign_id = sign_id(self.network, id);
|
2023-08-14 15:57:38 +00:00
|
|
|
|
2023-04-18 00:16:58 +00:00
|
|
|
// Stop trying to sign for this batch
|
2023-08-24 22:44:09 +00:00
|
|
|
SubstrateSignerDb::<D>::complete(txn, sign_id);
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-08-24 22:44:09 +00:00
|
|
|
self.signable.remove(&sign_id);
|
|
|
|
self.attempt.remove(&sign_id);
|
|
|
|
self.preprocessing.remove(&sign_id);
|
|
|
|
self.signing.remove(&sign_id);
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-04-18 00:16:58 +00:00
|
|
|
// This doesn't emit SignedBatch because it doesn't have access to the SignedBatch
|
|
|
|
// This function is expected to only be called once Substrate acknowledges this block,
|
|
|
|
// which means its batch must have been signed
|
|
|
|
// While a successive batch's signing would also cause this block to be acknowledged, Substrate
|
|
|
|
// guarantees a batch's ordered inclusion
|
2023-04-10 15:11:46 +00:00
|
|
|
|
2023-04-18 00:16:58 +00:00
|
|
|
// This also doesn't emit any further events since all mutation from the Batch being signed
|
|
|
|
// happens on the substrate::CoordinatorMessage::SubstrateBlock message (which SignedBatch is
|
|
|
|
// meant to end up triggering)
|
2023-04-10 15:11:46 +00:00
|
|
|
}
|
|
|
|
}
|