Route blame between Processor and Coordinator (#427)

* Have processor report errors during the DKG to the coordinator

* Add RemoveParticipant, InvalidDkgShare to coordinator

* Route DKG blame around coordinator

* Allow public construction of AdditionalBlameMachine

Necessary for upcoming work on handling DKG blame in the processor and
coordinator.

Additionally fixes a publicly reachable panic when commitments parsed with one
ThresholdParams are used in a machine using another set of ThresholdParams.

Renames InvalidProofOfKnowledge to InvalidCommitments.

* Remove unused error from dleq

* Implement support for VerifyBlame in the processor

* Have coordinator send the processor share message relevant to Blame

* Remove desync between processors reporting InvalidShare and ones reporting GeneratedKeyPair

* Route blame on sign between processor and coordinator

Doesn't yet act on it in coordinator.

* Move txn usage as needed for stable Rust to build

* Correct InvalidDkgShare serialization
This commit is contained in:
Luke Parker 2023-11-12 07:24:41 -05:00 committed by GitHub
parent d015ee96a3
commit 54f1929078
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 931 additions and 281 deletions

View file

@ -181,12 +181,18 @@ async fn handle_processor_message<D: Db, P: P2p>(
// in-set, making the Tributary relevant // in-set, making the Tributary relevant
ProcessorMessage::KeyGen(inner_msg) => match inner_msg { ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.set.session), key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::InvalidCommitments { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::Shares { id, .. } => Some(id.set.session), key_gen::ProcessorMessage::Shares { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::InvalidShare { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.set.session), key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.set.session),
}, },
// TODO: Review replacing key with Session in messages? // TODO: Review replacing key with Session in messages?
ProcessorMessage::Sign(inner_msg) => match inner_msg { ProcessorMessage::Sign(inner_msg) => match inner_msg {
// We'll only receive Preprocess and Share if we're actively signing // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
sign::ProcessorMessage::InvalidParticipant { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
sign::ProcessorMessage::Preprocess { id, .. } => { sign::ProcessorMessage::Preprocess { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap()) Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
} }
@ -261,6 +267,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
None None
} }
// We'll only fire these if we are the Substrate signer, making the Tributary relevant // We'll only fire these if we are the Substrate signer, making the Tributary relevant
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => { coordinator::ProcessorMessage::BatchPreprocess { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap()) Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
} }
@ -419,6 +428,15 @@ async fn handle_processor_message<D: Db, P: P2p>(
key_gen::ProcessorMessage::Commitments { id, commitments } => { key_gen::ProcessorMessage::Commitments { id, commitments } => {
vec![Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed())] vec![Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed())]
} }
key_gen::ProcessorMessage::InvalidCommitments { id: _, faulty } => {
// This doesn't need the ID since it's a Provided transaction which everyone will provide
// With this provision comes explicit ordering (with regards to other RemoveParticipant
// transactions) and group consensus
// Accordingly, this can't be replayed
// It could be included on-chain early/late with regards to the chain's active attempt,
// which attempt scheduling is written to avoid
vec![Transaction::RemoveParticipant(faulty)]
}
key_gen::ProcessorMessage::Shares { id, mut shares } => { key_gen::ProcessorMessage::Shares { id, mut shares } => {
// Create a MuSig-based machine to inform Substrate of this key generation // Create a MuSig-based machine to inform Substrate of this key generation
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt); let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt);
@ -427,6 +445,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
.i(pub_key) .i(pub_key)
.expect("processor message to DKG for a session we aren't a validator in"); .expect("processor message to DKG for a session we aren't a validator in");
// TODO: This is [receiver_share][sender_share] and is later transposed to
// [sender_share][receiver_share]. Make this [sender_share][receiver_share] from the
// start?
// `tx_shares` needs to be done here as while it can be serialized from the HashMap // `tx_shares` needs to be done here as while it can be serialized from the HashMap
// without further context, it can't be deserialized without context // without further context, it can't be deserialized without context
let mut tx_shares = Vec::with_capacity(shares.len()); let mut tx_shares = Vec::with_capacity(shares.len());
@ -455,10 +476,38 @@ async fn handle_processor_message<D: Db, P: P2p>(
signed: Transaction::empty_signed(), signed: Transaction::empty_signed(),
}] }]
} }
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
assert_eq!(
id.set.network, msg.network,
"processor claimed to be a different network than it was for in InvalidShare",
);
// Check if the MuSig signature had any errors as if so, we need to provide
// RemoveParticipant
// As for the safety of calling error_generating_key_pair, the processor is presumed
// to only send InvalidShare or GeneratedKeyPair for a given attempt
let mut txs = if let Some(faulty) =
crate::tributary::error_generating_key_pair::<D, _>(&txn, key, spec, id.attempt)
{
vec![Transaction::RemoveParticipant(faulty)]
} else {
vec![]
};
txs.push(Transaction::InvalidDkgShare {
attempt: id.attempt,
accuser,
faulty,
blame,
signed: Transaction::empty_signed(),
});
txs
}
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
assert_eq!( assert_eq!(
id.set.network, msg.network, id.set.network, msg.network,
"processor claimed to be a different network than it was for GeneratedKeyPair", "processor claimed to be a different network than it was for in GeneratedKeyPair",
); );
// TODO2: Also check the other KeyGenId fields // TODO2: Also check the other KeyGenId fields
@ -476,12 +525,24 @@ async fn handle_processor_message<D: Db, P: P2p>(
vec![Transaction::DkgConfirmed(id.attempt, share, Transaction::empty_signed())] vec![Transaction::DkgConfirmed(id.attempt, share, Transaction::empty_signed())]
} }
Err(p) => { Err(p) => {
todo!("participant {p:?} sent invalid DKG confirmation preprocesses") vec![Transaction::RemoveParticipant(p)]
} }
} }
} }
key_gen::ProcessorMessage::Blame { id, participant } => {
assert_eq!(
id.set.network, msg.network,
"processor claimed to be a different network than it was for in Blame",
);
vec![Transaction::RemoveParticipant(participant)]
}
}, },
ProcessorMessage::Sign(msg) => match msg { ProcessorMessage::Sign(msg) => match msg {
sign::ProcessorMessage::InvalidParticipant { .. } => {
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
// slash) and censor transactions (yet don't explicitly ban)
vec![]
}
sign::ProcessorMessage::Preprocess { id, preprocesses } => { sign::ProcessorMessage::Preprocess { id, preprocesses } => {
if id.attempt == 0 { if id.attempt == 0 {
MainDb::<D>::save_first_preprocess( MainDb::<D>::save_first_preprocess(
@ -532,6 +593,11 @@ async fn handle_processor_message<D: Db, P: P2p>(
}, },
ProcessorMessage::Coordinator(inner_msg) => match inner_msg { ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(), coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(),
coordinator::ProcessorMessage::InvalidParticipant { .. } => {
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
// slash) and censor transactions (yet don't explicitly ban)
vec![]
}
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => { coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => {
log::info!( log::info!(
"informed of batch (sign ID {}, attempt {}) for block {}", "informed of batch (sign ID {}, attempt {}) for block {}",

View file

@ -88,6 +88,11 @@ fn serialize_sign_data() {
#[test] #[test]
fn serialize_transaction() { fn serialize_transaction() {
test_read_write(Transaction::RemoveParticipant(
frost::Participant::new(u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1))
.unwrap(),
));
{ {
let mut commitments = vec![random_vec(&mut OsRng, 512)]; let mut commitments = vec![random_vec(&mut OsRng, 512)];
for _ in 0 .. (OsRng.next_u64() % 100) { for _ in 0 .. (OsRng.next_u64() % 100) {
@ -133,6 +138,26 @@ fn serialize_transaction() {
}); });
} }
for i in 0 .. 2 {
test_read_write(Transaction::InvalidDkgShare {
attempt: random_u32(&mut OsRng),
accuser: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
faulty: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
blame: if i == 0 {
None
} else {
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
},
signed: random_signed(&mut OsRng),
});
}
test_read_write(Transaction::DkgConfirmed( test_read_write(Transaction::DkgConfirmed(
random_u32(&mut OsRng), random_u32(&mut OsRng),
{ {

View file

@ -87,11 +87,14 @@ impl<D: Db> TributaryDb<D> {
fn fatal_slashes_key(genesis: [u8; 32]) -> Vec<u8> { fn fatal_slashes_key(genesis: [u8; 32]) -> Vec<u8> {
Self::tributary_key(b"fatal_slashes", genesis) Self::tributary_key(b"fatal_slashes", genesis)
} }
fn fatally_slashed_key(account: [u8; 32]) -> Vec<u8> { fn fatally_slashed_key(genesis: [u8; 32], account: [u8; 32]) -> Vec<u8> {
Self::tributary_key(b"fatally_slashed", account) Self::tributary_key(b"fatally_slashed", (genesis, account).encode())
}
pub fn is_fatally_slashed<G: Get>(getter: &G, genesis: [u8; 32], account: [u8; 32]) -> bool {
getter.get(Self::fatally_slashed_key(genesis, account)).is_some()
} }
pub fn set_fatally_slashed(txn: &mut D::Transaction<'_>, genesis: [u8; 32], account: [u8; 32]) { pub fn set_fatally_slashed(txn: &mut D::Transaction<'_>, genesis: [u8; 32], account: [u8; 32]) {
txn.put(Self::fatally_slashed_key(account), []); txn.put(Self::fatally_slashed_key(genesis, account), []);
let key = Self::fatal_slashes_key(genesis); let key = Self::fatal_slashes_key(genesis);
let mut existing = txn.get(&key).unwrap_or(vec![]); let mut existing = txn.get(&key).unwrap_or(vec![]);
@ -105,6 +108,27 @@ impl<D: Db> TributaryDb<D> {
txn.put(key, existing); txn.put(key, existing);
} }
fn share_for_blame_key(genesis: &[u8], from: Participant, to: Participant) -> Vec<u8> {
Self::tributary_key(b"share_for_blame", (genesis, u16::from(from), u16::from(to)).encode())
}
pub fn save_share_for_blame(
txn: &mut D::Transaction<'_>,
genesis: &[u8],
from: Participant,
to: Participant,
share: &[u8],
) {
txn.put(Self::share_for_blame_key(genesis, from, to), share);
}
pub fn share_for_blame<G: Get>(
getter: &G,
genesis: &[u8],
from: Participant,
to: Participant,
) -> Option<Vec<u8>> {
getter.get(Self::share_for_blame_key(genesis, from, to))
}
// The plan IDs associated with a Substrate block // The plan IDs associated with a Substrate block
fn plan_ids_key(genesis: &[u8], block: u64) -> Vec<u8> { fn plan_ids_key(genesis: &[u8], block: u64) -> Vec<u8> {
Self::tributary_key(b"plan_ids", [genesis, block.to_le_bytes().as_ref()].concat()) Self::tributary_key(b"plan_ids", [genesis, block.to_le_bytes().as_ref()].concat())

View file

@ -1,20 +1,20 @@
use core::{ops::Deref, future::Future}; use core::{ops::Deref, future::Future};
use std::collections::HashMap; use std::collections::HashMap;
use zeroize::Zeroizing; use zeroize::{Zeroize, Zeroizing};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::dkg::Participant; use frost::dkg::Participant;
use scale::{Encode, Decode}; use scale::{Encode, Decode};
use serai_client::{ use serai_client::{
Signature, Public, Signature,
validator_sets::primitives::{ValidatorSet, KeyPair}, validator_sets::primitives::{ValidatorSet, KeyPair},
subxt::utils::Encoded, subxt::utils::Encoded,
SeraiValidatorSets, SeraiValidatorSets,
}; };
use tributary::Signed; use tributary::{Signed, TransactionKind, TransactionTrait};
use processor_messages::{ use processor_messages::{
key_gen::{self, KeyGenId}, key_gen::{self, KeyGenId},
@ -22,7 +22,7 @@ use processor_messages::{
sign::{self, SignId}, sign::{self, SignId},
}; };
use serai_db::Db; use serai_db::{Get, Db};
use crate::{ use crate::{
processors::Processors, processors::Processors,
@ -56,7 +56,33 @@ pub fn dkg_confirmation_nonces(
DkgConfirmer::preprocess(spec, key, attempt) DkgConfirmer::preprocess(spec, key, attempt)
} }
#[allow(clippy::needless_pass_by_ref_mut)] // If there's an error generating a key pair, return any errors which would've occured when
// executing the DkgConfirmer in order to stay in sync with those who did.
//
// The caller must ensure only error_generating_key_pair or generated_key_pair is called for a
// given attempt.
pub fn error_generating_key_pair<D: Db, G: Get>(
getter: &G,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
spec: &TributarySpec,
attempt: u32,
) -> Option<Participant> {
let preprocesses =
TributaryDb::<D>::confirmation_nonces(getter, spec.genesis(), attempt).unwrap();
// Sign a key pair which can't be valid
// (0xff used as 0 would be the Ristretto identity point, 0-length for the network key)
let key_pair = (Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap());
match DkgConfirmer::share(spec, key, attempt, preprocesses, &key_pair) {
Ok(mut share) => {
// Zeroize the share to ensure it's not accessed
share.zeroize();
None
}
Err(p) => Some(p),
}
}
pub fn generated_key_pair<D: Db>( pub fn generated_key_pair<D: Db>(
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
@ -69,7 +95,7 @@ pub fn generated_key_pair<D: Db>(
DkgConfirmer::share(spec, key, attempt, preprocesses, key_pair) DkgConfirmer::share(spec, key, attempt, preprocesses, key_pair)
} }
pub(crate) fn fatal_slash<D: Db>( pub(super) fn fatal_slash<D: Db>(
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
genesis: [u8; 32], genesis: [u8; 32],
account: [u8; 32], account: [u8; 32],
@ -78,6 +104,33 @@ pub(crate) fn fatal_slash<D: Db>(
log::warn!("fatally slashing {}. reason: {}", hex::encode(account), reason); log::warn!("fatally slashing {}. reason: {}", hex::encode(account), reason);
TributaryDb::<D>::set_fatally_slashed(txn, genesis, account); TributaryDb::<D>::set_fatally_slashed(txn, genesis, account);
// TODO: disconnect the node from network/ban from further participation in all Tributaries // TODO: disconnect the node from network/ban from further participation in all Tributaries
// TODO: If during DKG, trigger a re-attempt
}
// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second
// Tributary post-DKG
// https://github.com/serai-dex/serai/issues/426
fn fatal_slash_with_participant_index<D: Db>(
spec: &TributarySpec,
txn: &mut <D as Db>::Transaction<'_>,
i: Participant,
reason: &str,
) {
// Resolve from Participant to <Ristretto as Ciphersuite>::G
let i = u16::from(i);
let mut validator = None;
for (potential, _) in spec.validators() {
let v_i = spec.i(potential).unwrap();
if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) {
validator = Some(potential);
break;
}
}
let validator = validator.unwrap();
fatal_slash::<D>(txn, spec.genesis(), validator.to_bytes(), reason);
} }
pub(crate) async fn handle_application_tx< pub(crate) async fn handle_application_tx<
@ -98,6 +151,15 @@ pub(crate) async fn handle_application_tx<
) { ) {
let genesis = spec.genesis(); let genesis = spec.genesis();
// Don't handle transactions from fatally slashed participants
// TODO: Because fatally slashed participants can still publish onto the blockchain, they have
// a notable DoS ability
if let TransactionKind::Signed(signed) = tx.kind() {
if TributaryDb::<D>::is_fatally_slashed(txn, genesis, signed.signer.to_bytes()) {
return;
}
}
let handle = |txn: &mut <D as Db>::Transaction<'_>, let handle = |txn: &mut <D as Db>::Transaction<'_>,
data_spec: &DataSpecification, data_spec: &DataSpecification,
bytes: Vec<u8>, bytes: Vec<u8>,
@ -178,6 +240,9 @@ pub(crate) async fn handle_application_tx<
} }
match tx { match tx {
Transaction::RemoveParticipant(i) => {
fatal_slash_with_participant_index::<D>(spec, txn, i, "RemoveParticipant Provided TX")
}
Transaction::DkgCommitments(attempt, commitments, signed) => { Transaction::DkgCommitments(attempt, commitments, signed) => {
let Ok(_) = check_sign_data_len::<D>(txn, spec, signed.signer, commitments.len()) else { let Ok(_) = check_sign_data_len::<D>(txn, spec, signed.signer, commitments.len()) else {
return; return;
@ -230,7 +295,28 @@ pub(crate) async fn handle_application_tx<
} }
} }
// Only save our share's bytes // Save each share as needed for blame
{
let from = spec.i(signed.signer).unwrap();
for (to, shares) in shares.iter().enumerate() {
// 0-indexed (the enumeration) to 1-indexed (Participant)
let mut to = u16::try_from(to).unwrap() + 1;
// Adjust for the omission of the sender's own shares
if to >= u16::from(from.start) {
to += u16::from(from.end) - u16::from(from.start);
}
let to = Participant::new(to).unwrap();
for (sender_share, share) in shares.iter().enumerate() {
let from =
Participant::new(u16::from(from.start) + u16::try_from(sender_share).unwrap())
.unwrap();
TributaryDb::<D>::save_share_for_blame(txn, &genesis, from, to, share);
}
}
}
// Filter down to only our share's bytes for handle
let our_i = spec let our_i = spec
.i(Ristretto::generator() * key.deref()) .i(Ristretto::generator() * key.deref())
.expect("in a tributary we're not a validator for"); .expect("in a tributary we're not a validator for");
@ -327,6 +413,49 @@ pub(crate) async fn handle_application_tx<
} }
} }
// TODO: Only accept one of either InvalidDkgShare/DkgConfirmed per signer
// TODO: Ban self-accusals
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
let range = spec.i(signed.signer).unwrap();
if (u16::from(accuser) < u16::from(range.start)) ||
(u16::from(range.end) <= u16::from(accuser))
{
fatal_slash::<D>(
txn,
genesis,
signed.signer.to_bytes(),
"accused with a Participant index which wasn't theirs",
);
return;
}
if !((u16::from(range.start) <= u16::from(faulty)) &&
(u16::from(faulty) < u16::from(range.end)))
{
fatal_slash::<D>(
txn,
genesis,
signed.signer.to_bytes(),
"accused self of having an InvalidDkgShare",
);
return;
}
let share = TributaryDb::<D>::share_for_blame(txn, &genesis, accuser, faulty).unwrap();
processors
.send(
spec.set().network,
key_gen::CoordinatorMessage::VerifyBlame {
id: KeyGenId { set: spec.set(), attempt },
accuser,
accused: faulty,
share,
blame,
},
)
.await;
}
Transaction::DkgConfirmed(attempt, shares, signed) => { Transaction::DkgConfirmed(attempt, shares, signed) => {
match handle( match handle(
txn, txn,
@ -347,11 +476,14 @@ pub(crate) async fn handle_application_tx<
"(including us) fires DkgConfirmed, yet no confirming key pair" "(including us) fires DkgConfirmed, yet no confirming key pair"
) )
}); });
let Ok(sig) = DkgConfirmer::complete(spec, key, attempt, preprocesses, &key_pair, shares) let sig =
else { match DkgConfirmer::complete(spec, key, attempt, preprocesses, &key_pair, shares) {
// TODO: Full slash Ok(sig) => sig,
todo!(); Err(p) => {
}; fatal_slash_with_participant_index::<D>(spec, txn, p, "invalid DkgConfirmer share");
return;
}
};
publish_serai_tx( publish_serai_tx(
spec.set(), spec.set(),

View file

@ -233,6 +233,8 @@ impl<const N: usize> ReadWrite for SignData<N> {
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
pub enum Transaction { pub enum Transaction {
RemoveParticipant(Participant),
// Once this completes successfully, no more instances should be created. // Once this completes successfully, no more instances should be created.
DkgCommitments(u32, Vec<Vec<u8>>, Signed), DkgCommitments(u32, Vec<Vec<u8>>, Signed),
DkgShares { DkgShares {
@ -242,6 +244,13 @@ pub enum Transaction {
confirmation_nonces: [u8; 64], confirmation_nonces: [u8; 64],
signed: Signed, signed: Signed,
}, },
InvalidDkgShare {
attempt: u32,
accuser: Participant,
faulty: Participant,
blame: Option<Vec<u8>>,
signed: Signed,
},
DkgConfirmed(u32, [u8; 32], Signed), DkgConfirmed(u32, [u8; 32], Signed),
// When we have synchrony on a batch, we can allow signing it // When we have synchrony on a batch, we can allow signing it
@ -279,7 +288,15 @@ impl ReadWrite for Transaction {
reader.read_exact(&mut kind)?; reader.read_exact(&mut kind)?;
match kind[0] { match kind[0] {
0 => { 0 => Ok(Transaction::RemoveParticipant({
let mut participant = [0; 2];
reader.read_exact(&mut participant)?;
Participant::new(u16::from_le_bytes(participant)).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "invalid participant in RemoveParticipant")
})?
})),
1 => {
let mut attempt = [0; 4]; let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?; reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt); let attempt = u32::from_le_bytes(attempt);
@ -314,7 +331,7 @@ impl ReadWrite for Transaction {
Ok(Transaction::DkgCommitments(attempt, commitments, signed)) Ok(Transaction::DkgCommitments(attempt, commitments, signed))
} }
1 => { 2 => {
let mut attempt = [0; 4]; let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?; reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt); let attempt = u32::from_le_bytes(attempt);
@ -351,7 +368,40 @@ impl ReadWrite for Transaction {
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })
} }
2 => { 3 => {
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let mut accuser = [0; 2];
reader.read_exact(&mut accuser)?;
let accuser = Participant::new(u16::from_le_bytes(accuser)).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "invalid participant in InvalidDkgShare")
})?;
let mut faulty = [0; 2];
reader.read_exact(&mut faulty)?;
let faulty = Participant::new(u16::from_le_bytes(faulty)).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "invalid participant in InvalidDkgShare")
})?;
let mut blame_len = [0; 2];
reader.read_exact(&mut blame_len)?;
let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];
reader.read_exact(&mut blame)?;
let signed = Signed::read(reader)?;
Ok(Transaction::InvalidDkgShare {
attempt,
accuser,
faulty,
blame: Some(blame).filter(|blame| !blame.is_empty()),
signed,
})
}
4 => {
let mut attempt = [0; 4]; let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?; reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt); let attempt = u32::from_le_bytes(attempt);
@ -364,7 +414,7 @@ impl ReadWrite for Transaction {
Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed)) Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed))
} }
3 => { 5 => {
let mut block = [0; 32]; let mut block = [0; 32];
reader.read_exact(&mut block)?; reader.read_exact(&mut block)?;
let mut batch = [0; 5]; let mut batch = [0; 5];
@ -372,19 +422,19 @@ impl ReadWrite for Transaction {
Ok(Transaction::Batch(block, batch)) Ok(Transaction::Batch(block, batch))
} }
4 => { 6 => {
let mut block = [0; 8]; let mut block = [0; 8];
reader.read_exact(&mut block)?; reader.read_exact(&mut block)?;
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
} }
5 => SignData::read(reader).map(Transaction::BatchPreprocess), 7 => SignData::read(reader).map(Transaction::BatchPreprocess),
6 => SignData::read(reader).map(Transaction::BatchShare), 8 => SignData::read(reader).map(Transaction::BatchShare),
7 => SignData::read(reader).map(Transaction::SignPreprocess), 9 => SignData::read(reader).map(Transaction::SignPreprocess),
8 => SignData::read(reader).map(Transaction::SignShare), 10 => SignData::read(reader).map(Transaction::SignShare),
9 => { 11 => {
let mut plan = [0; 32]; let mut plan = [0; 32];
reader.read_exact(&mut plan)?; reader.read_exact(&mut plan)?;
@ -405,8 +455,13 @@ impl ReadWrite for Transaction {
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self { match self {
Transaction::DkgCommitments(attempt, commitments, signed) => { Transaction::RemoveParticipant(i) => {
writer.write_all(&[0])?; writer.write_all(&[0])?;
writer.write_all(&u16::from(*i).to_le_bytes())
}
Transaction::DkgCommitments(attempt, commitments, signed) => {
writer.write_all(&[1])?;
writer.write_all(&attempt.to_le_bytes())?; writer.write_all(&attempt.to_le_bytes())?;
if commitments.is_empty() { if commitments.is_empty() {
Err(io::Error::new(io::ErrorKind::Other, "zero commitments in DkgCommitments"))? Err(io::Error::new(io::ErrorKind::Other, "zero commitments in DkgCommitments"))?
@ -428,7 +483,7 @@ impl ReadWrite for Transaction {
} }
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {
writer.write_all(&[1])?; writer.write_all(&[2])?;
writer.write_all(&attempt.to_le_bytes())?; writer.write_all(&attempt.to_le_bytes())?;
// `shares` is a Vec which is supposed to map to a HashMap<Pariticpant, Vec<u8>>. Since we // `shares` is a Vec which is supposed to map to a HashMap<Pariticpant, Vec<u8>>. Since we
@ -456,43 +511,59 @@ impl ReadWrite for Transaction {
signed.write(writer) signed.write(writer)
} }
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
writer.write_all(&[3])?;
writer.write_all(&attempt.to_le_bytes())?;
writer.write_all(&u16::from(*accuser).to_le_bytes())?;
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
assert!(blame.as_ref().map(|blame| blame.len()).unwrap_or(1) != 0);
let blame_len =
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
writer.write_all(&blame_len.to_le_bytes())?;
writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;
signed.write(writer)
}
Transaction::DkgConfirmed(attempt, share, signed) => { Transaction::DkgConfirmed(attempt, share, signed) => {
writer.write_all(&[2])?; writer.write_all(&[4])?;
writer.write_all(&attempt.to_le_bytes())?; writer.write_all(&attempt.to_le_bytes())?;
writer.write_all(share)?; writer.write_all(share)?;
signed.write(writer) signed.write(writer)
} }
Transaction::Batch(block, batch) => { Transaction::Batch(block, batch) => {
writer.write_all(&[3])?; writer.write_all(&[5])?;
writer.write_all(block)?; writer.write_all(block)?;
writer.write_all(batch) writer.write_all(batch)
} }
Transaction::SubstrateBlock(block) => { Transaction::SubstrateBlock(block) => {
writer.write_all(&[4])?; writer.write_all(&[6])?;
writer.write_all(&block.to_le_bytes()) writer.write_all(&block.to_le_bytes())
} }
Transaction::BatchPreprocess(data) => { Transaction::BatchPreprocess(data) => {
writer.write_all(&[5])?; writer.write_all(&[7])?;
data.write(writer) data.write(writer)
} }
Transaction::BatchShare(data) => { Transaction::BatchShare(data) => {
writer.write_all(&[6])?; writer.write_all(&[8])?;
data.write(writer) data.write(writer)
} }
Transaction::SignPreprocess(data) => { Transaction::SignPreprocess(data) => {
writer.write_all(&[7])?; writer.write_all(&[9])?;
data.write(writer) data.write(writer)
} }
Transaction::SignShare(data) => { Transaction::SignShare(data) => {
writer.write_all(&[8])?; writer.write_all(&[10])?;
data.write(writer) data.write(writer)
} }
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
writer.write_all(&[9])?; writer.write_all(&[11])?;
writer.write_all(plan)?; writer.write_all(plan)?;
writer writer
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
@ -507,8 +578,11 @@ impl ReadWrite for Transaction {
impl TransactionTrait for Transaction { impl TransactionTrait for Transaction {
fn kind(&self) -> TransactionKind<'_> { fn kind(&self) -> TransactionKind<'_> {
match self { match self {
Transaction::RemoveParticipant(_) => TransactionKind::Provided("remove"),
Transaction::DkgCommitments(_, _, signed) => TransactionKind::Signed(signed), Transaction::DkgCommitments(_, _, signed) => TransactionKind::Signed(signed),
Transaction::DkgShares { signed, .. } => TransactionKind::Signed(signed), Transaction::DkgShares { signed, .. } => TransactionKind::Signed(signed),
Transaction::InvalidDkgShare { signed, .. } => TransactionKind::Signed(signed),
Transaction::DkgConfirmed(_, _, signed) => TransactionKind::Signed(signed), Transaction::DkgConfirmed(_, _, signed) => TransactionKind::Signed(signed),
Transaction::Batch(_, _) => TransactionKind::Provided("batch"), Transaction::Batch(_, _) => TransactionKind::Provided("batch"),
@ -574,8 +648,11 @@ impl Transaction {
) { ) {
fn signed(tx: &mut Transaction) -> &mut Signed { fn signed(tx: &mut Transaction) -> &mut Signed {
match tx { match tx {
Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"),
Transaction::DkgCommitments(_, _, ref mut signed) => signed, Transaction::DkgCommitments(_, _, ref mut signed) => signed,
Transaction::DkgShares { ref mut signed, .. } => signed, Transaction::DkgShares { ref mut signed, .. } => signed,
Transaction::InvalidDkgShare { ref mut signed, .. } => signed,
Transaction::DkgConfirmed(_, _, ref mut signed) => signed, Transaction::DkgConfirmed(_, _, ref mut signed) => signed,
Transaction::Batch(_, _) => panic!("signing Batch"), Transaction::Batch(_, _) => panic!("signing Batch"),

View file

@ -85,6 +85,8 @@ impl<D: Db> NonceDecider<D> {
pub fn nonce<G: Get>(getter: &G, genesis: [u8; 32], tx: &Transaction) -> Option<Option<u32>> { pub fn nonce<G: Get>(getter: &G, genesis: [u8; 32], tx: &Transaction) -> Option<Option<u32>> {
match tx { match tx {
Transaction::RemoveParticipant(_) => None,
Transaction::DkgCommitments(attempt, _, _) => { Transaction::DkgCommitments(attempt, _, _) => {
assert_eq!(*attempt, 0); assert_eq!(*attempt, 0);
Some(Some(0)) Some(Some(0))
@ -93,6 +95,12 @@ impl<D: Db> NonceDecider<D> {
assert_eq!(*attempt, 0); assert_eq!(*attempt, 0);
Some(Some(1)) Some(Some(1))
} }
// InvalidDkgShare and DkgConfirmed share a nonce due to the expected existence of only one
// on-chain
Transaction::InvalidDkgShare { attempt, .. } => {
assert_eq!(*attempt, 0);
Some(Some(2))
}
Transaction::DkgConfirmed(attempt, _, _) => { Transaction::DkgConfirmed(attempt, _, _) => {
assert_eq!(*attempt, 0); assert_eq!(*attempt, 0);
Some(Some(2)) Some(Some(2))

View file

@ -341,7 +341,7 @@ pub(crate) enum DecryptionError {
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct Encryption<C: Ciphersuite> { pub(crate) struct Encryption<C: Ciphersuite> {
context: String, context: String,
i: Participant, i: Option<Participant>,
enc_key: Zeroizing<C::F>, enc_key: Zeroizing<C::F>,
enc_pub_key: C::G, enc_pub_key: C::G,
enc_keys: HashMap<Participant, C::G>, enc_keys: HashMap<Participant, C::G>,
@ -370,7 +370,11 @@ impl<C: Ciphersuite> Zeroize for Encryption<C> {
} }
impl<C: Ciphersuite> Encryption<C> { impl<C: Ciphersuite> Encryption<C> {
pub(crate) fn new<R: RngCore + CryptoRng>(context: String, i: Participant, rng: &mut R) -> Self { pub(crate) fn new<R: RngCore + CryptoRng>(
context: String,
i: Option<Participant>,
rng: &mut R,
) -> Self {
let enc_key = Zeroizing::new(C::random_nonzero_F(rng)); let enc_key = Zeroizing::new(C::random_nonzero_F(rng));
Self { Self {
context, context,
@ -404,7 +408,7 @@ impl<C: Ciphersuite> Encryption<C> {
participant: Participant, participant: Participant,
msg: Zeroizing<E>, msg: Zeroizing<E>,
) -> EncryptedMessage<C, E> { ) -> EncryptedMessage<C, E> {
encrypt(rng, &self.context, self.i, self.enc_keys[&participant], msg) encrypt(rng, &self.context, self.i.unwrap(), self.enc_keys[&participant], msg)
} }
pub(crate) fn decrypt<R: RngCore + CryptoRng, I: Copy + Zeroize, E: Encryptable>( pub(crate) fn decrypt<R: RngCore + CryptoRng, I: Copy + Zeroize, E: Encryptable>(

View file

@ -133,7 +133,7 @@ impl<C: Ciphersuite> KeyGenMachine<C> {
); );
// Additionally create an encryption mechanism to protect the secret shares // Additionally create an encryption mechanism to protect the secret shares
let encryption = Encryption::new(self.context.clone(), self.params.i, rng); let encryption = Encryption::new(self.context.clone(), Some(self.params.i), rng);
// Step 4: Broadcast // Step 4: Broadcast
let msg = let msg =
@ -249,35 +249,38 @@ impl<C: Ciphersuite> SecretShareMachine<C> {
fn verify_r1<R: RngCore + CryptoRng>( fn verify_r1<R: RngCore + CryptoRng>(
&mut self, &mut self,
rng: &mut R, rng: &mut R,
mut commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>, mut commitment_msgs: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<HashMap<Participant, Vec<C::G>>, FrostError<C>> { ) -> Result<HashMap<Participant, Vec<C::G>>, FrostError<C>> {
validate_map( validate_map(
&commitments, &commitment_msgs,
&(1 ..= self.params.n()).map(Participant).collect::<Vec<_>>(), &(1 ..= self.params.n()).map(Participant).collect::<Vec<_>>(),
self.params.i(), self.params.i(),
)?; )?;
let mut batch = BatchVerifier::<Participant, C::G>::new(commitments.len()); let mut batch = BatchVerifier::<Participant, C::G>::new(commitment_msgs.len());
let mut commitments = commitments let mut commitments = HashMap::new();
.drain() for l in (1 ..= self.params.n()).map(Participant) {
.map(|(l, msg)| { let Some(msg) = commitment_msgs.remove(&l) else { continue };
let mut msg = self.encryption.register(l, msg); let mut msg = self.encryption.register(l, msg);
// Step 5: Validate each proof of knowledge if msg.commitments.len() != self.params.t().into() {
// This is solely the prep step for the latter batch verification Err(FrostError::InvalidCommitments(l))?;
msg.sig.batch_verify( }
rng,
&mut batch,
l,
msg.commitments[0],
challenge::<C>(&self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg),
);
(l, msg.commitments.drain(..).collect::<Vec<_>>()) // Step 5: Validate each proof of knowledge
}) // This is solely the prep step for the latter batch verification
.collect::<HashMap<_, _>>(); msg.sig.batch_verify(
rng,
&mut batch,
l,
msg.commitments[0],
challenge::<C>(&self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg),
);
batch.verify_vartime_with_vartime_blame().map_err(FrostError::InvalidProofOfKnowledge)?; commitments.insert(l, msg.commitments.drain(..).collect::<Vec<_>>());
}
batch.verify_vartime_with_vartime_blame().map_err(FrostError::InvalidCommitments)?;
commitments.insert(self.params.i, self.our_commitments.drain(..).collect()); commitments.insert(self.params.i, self.our_commitments.drain(..).collect());
Ok(commitments) Ok(commitments)
@ -470,12 +473,12 @@ impl<C: Ciphersuite> KeyMachine<C> {
Ok(BlameMachine { Ok(BlameMachine {
commitments, commitments,
encryption, encryption,
result: ThresholdCore { result: Some(ThresholdCore {
params, params,
secret_share: secret, secret_share: secret,
group_key: stripes[0], group_key: stripes[0],
verification_shares, verification_shares,
}, }),
}) })
} }
} }
@ -484,7 +487,7 @@ impl<C: Ciphersuite> KeyMachine<C> {
pub struct BlameMachine<C: Ciphersuite> { pub struct BlameMachine<C: Ciphersuite> {
commitments: HashMap<Participant, Vec<C::G>>, commitments: HashMap<Participant, Vec<C::G>>,
encryption: Encryption<C>, encryption: Encryption<C>,
result: ThresholdCore<C>, result: Option<ThresholdCore<C>>,
} }
impl<C: Ciphersuite> fmt::Debug for BlameMachine<C> { impl<C: Ciphersuite> fmt::Debug for BlameMachine<C> {
@ -518,7 +521,7 @@ impl<C: Ciphersuite> BlameMachine<C> {
/// tooling to do so. This function is solely intended to force users to acknowledge they're /// tooling to do so. This function is solely intended to force users to acknowledge they're
/// completing the protocol, not processing any blame. /// completing the protocol, not processing any blame.
pub fn complete(self) -> ThresholdCore<C> { pub fn complete(self) -> ThresholdCore<C> {
self.result self.result.unwrap()
} }
fn blame_internal( fn blame_internal(
@ -585,6 +588,32 @@ impl<C: Ciphersuite> BlameMachine<C> {
#[derive(Debug, Zeroize)] #[derive(Debug, Zeroize)]
pub struct AdditionalBlameMachine<C: Ciphersuite>(BlameMachine<C>); pub struct AdditionalBlameMachine<C: Ciphersuite>(BlameMachine<C>);
impl<C: Ciphersuite> AdditionalBlameMachine<C> { impl<C: Ciphersuite> AdditionalBlameMachine<C> {
/// Create an AdditionalBlameMachine capable of evaluating Blame regardless of if the caller was
/// a member in the DKG protocol.
///
/// Takes in the parameters for the DKG protocol and all of the participant's commitment
/// messages.
///
/// This constructor assumes the full validity of the commitment messages. They must be fully
/// authenticated as having come from the supposed party and verified as valid. Usage of invalid
/// commitments is considered undefined behavior, and may cause everything from inaccurate blame
/// to panics.
pub fn new<R: RngCore + CryptoRng>(
rng: &mut R,
context: String,
n: u16,
mut commitment_msgs: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<Self, FrostError<C>> {
let mut commitments = HashMap::new();
let mut encryption = Encryption::new(context, None, rng);
for i in 1 ..= n {
let i = Participant::new(i).unwrap();
let Some(msg) = commitment_msgs.remove(&i) else { Err(DkgError::MissingParticipant(i))? };
commitments.insert(i, encryption.register(i, msg).commitments);
}
Ok(AdditionalBlameMachine(BlameMachine { commitments, encryption, result: None }))
}
/// Given an accusation of fault, determine the faulty party (either the sender, who sent an /// Given an accusation of fault, determine the faulty party (either the sender, who sent an
/// invalid secret share, or the receiver, who claimed a valid secret share was invalid). /// invalid secret share, or the receiver, who claimed a valid secret share was invalid).
/// ///
@ -596,7 +625,7 @@ impl<C: Ciphersuite> AdditionalBlameMachine<C> {
/// the caller's job to ensure they're unique in order to prevent multiple instances of blame /// the caller's job to ensure they're unique in order to prevent multiple instances of blame
/// over a single incident. /// over a single incident.
pub fn blame( pub fn blame(
self, &self,
sender: Participant, sender: Participant,
recipient: Participant, recipient: Participant,
msg: EncryptedMessage<C, SecretShare<C::F>>, msg: EncryptedMessage<C, SecretShare<C::F>>,

View file

@ -94,7 +94,7 @@ pub enum DkgError<B: Clone + PartialEq + Eq + Debug> {
/// An invalid proof of knowledge was provided. /// An invalid proof of knowledge was provided.
#[cfg_attr(feature = "std", error("invalid proof of knowledge (participant {0})"))] #[cfg_attr(feature = "std", error("invalid proof of knowledge (participant {0})"))]
InvalidProofOfKnowledge(Participant), InvalidCommitments(Participant),
/// An invalid DKG share was provided. /// An invalid DKG share was provided.
#[cfg_attr(feature = "std", error("invalid share (participant {participant}, blame {blame})"))] #[cfg_attr(feature = "std", error("invalid share (participant {participant}, blame {blame})"))]
InvalidShare { participant: Participant, blame: Option<B> }, InvalidShare { participant: Participant, blame: Option<B> },

View file

@ -109,7 +109,7 @@ where
&[C1::generator(), C2::generator()], &[C1::generator(), C2::generator()],
&[original_shares[&i], proof.share], &[original_shares[&i], proof.share],
) )
.map_err(|_| DkgError::InvalidProofOfKnowledge(i))?; .map_err(|_| DkgError::InvalidCommitments(i))?;
verification_shares.insert(i, proof.share); verification_shares.insert(i, proof.share);
} }

View file

@ -6,7 +6,7 @@ use ciphersuite::Ciphersuite;
use crate::{ use crate::{
Participant, ThresholdParams, ThresholdCore, Participant, ThresholdParams, ThresholdCore,
frost::{KeyGenMachine, SecretShare, KeyMachine}, frost::{Commitments, KeyGenMachine, SecretShare, KeyMachine},
encryption::{EncryptionKeyMessage, EncryptedMessage}, encryption::{EncryptionKeyMessage, EncryptedMessage},
tests::{THRESHOLD, PARTICIPANTS, clone_without}, tests::{THRESHOLD, PARTICIPANTS, clone_without},
}; };
@ -17,12 +17,13 @@ type FrostSecretShares<C> = HashMap<Participant, FrostEncryptedMessage<C>>;
const CONTEXT: &str = "DKG Test Key Generation"; const CONTEXT: &str = "DKG Test Key Generation";
// Commit, then return enc key and shares // Commit, then return commitment messages, enc keys, and shares
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>( fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R, rng: &mut R,
) -> ( ) -> (
HashMap<Participant, KeyMachine<C>>, HashMap<Participant, KeyMachine<C>>,
HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
HashMap<Participant, C::G>, HashMap<Participant, C::G>,
HashMap<Participant, FrostSecretShares<C>>, HashMap<Participant, FrostSecretShares<C>>,
) { ) {
@ -68,7 +69,7 @@ fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
}) })
.collect::<HashMap<_, _>>(); .collect::<HashMap<_, _>>();
(machines, enc_keys, secret_shares) (machines, commitments, enc_keys, secret_shares)
} }
fn generate_secret_shares<C: Ciphersuite>( fn generate_secret_shares<C: Ciphersuite>(
@ -89,7 +90,7 @@ fn generate_secret_shares<C: Ciphersuite>(
pub fn frost_gen<R: RngCore + CryptoRng, C: Ciphersuite>( pub fn frost_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R, rng: &mut R,
) -> HashMap<Participant, ThresholdCore<C>> { ) -> HashMap<Participant, ThresholdCore<C>> {
let (mut machines, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng); let (mut machines, _, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng);
let mut verification_shares = None; let mut verification_shares = None;
let mut group_key = None; let mut group_key = None;
@ -122,7 +123,11 @@ mod literal {
use ciphersuite::Ristretto; use ciphersuite::Ristretto;
use crate::{DkgError, encryption::EncryptionKeyProof, frost::BlameMachine}; use crate::{
DkgError,
encryption::EncryptionKeyProof,
frost::{BlameMachine, AdditionalBlameMachine},
};
use super::*; use super::*;
@ -130,6 +135,7 @@ mod literal {
const TWO: Participant = Participant(2); const TWO: Participant = Participant(2);
fn test_blame( fn test_blame(
commitment_msgs: HashMap<Participant, EncryptionKeyMessage<Ristretto, Commitments<Ristretto>>>,
machines: Vec<BlameMachine<Ristretto>>, machines: Vec<BlameMachine<Ristretto>>,
msg: FrostEncryptedMessage<Ristretto>, msg: FrostEncryptedMessage<Ristretto>,
blame: Option<EncryptionKeyProof<Ristretto>>, blame: Option<EncryptionKeyProof<Ristretto>>,
@ -139,13 +145,26 @@ mod literal {
assert_eq!(blamed, ONE); assert_eq!(blamed, ONE);
// Verify additional blame also works // Verify additional blame also works
assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE); assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE);
// Verify machines constructed with AdditionalBlameMachine::new work
assert_eq!(
AdditionalBlameMachine::new(
&mut OsRng,
CONTEXT.to_string(),
PARTICIPANTS,
commitment_msgs.clone()
)
.unwrap()
.blame(ONE, TWO, msg.clone(), blame.clone()),
ONE,
);
} }
} }
// TODO: Write a macro which expands to the following // TODO: Write a macro which expands to the following
#[test] #[test]
fn invalid_encryption_pop_blame() { fn invalid_encryption_pop_blame() {
let (mut machines, _, mut secret_shares) = let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the PoP of the encrypted message from 1 to 2 // Mutate the PoP of the encrypted message from 1 to 2
@ -169,12 +188,12 @@ mod literal {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
test_blame(machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap());
} }
#[test] #[test]
fn invalid_ecdh_blame() { fn invalid_ecdh_blame() {
let (mut machines, _, mut secret_shares) = let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the share to trigger a blame event // Mutate the share to trigger a blame event
@ -209,13 +228,13 @@ mod literal {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_key(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_key();
test_blame(machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap()); test_blame(commitment_msgs, machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap());
} }
// This should be largely equivalent to the prior test // This should be largely equivalent to the prior test
#[test] #[test]
fn invalid_dleq_blame() { fn invalid_dleq_blame() {
let (mut machines, _, mut secret_shares) = let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares secret_shares
@ -244,12 +263,12 @@ mod literal {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq();
test_blame(machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap()); test_blame(commitment_msgs, machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap());
} }
#[test] #[test]
fn invalid_share_serialization_blame() { fn invalid_share_serialization_blame() {
let (mut machines, enc_keys, mut secret_shares) = let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization( secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization(
@ -277,12 +296,12 @@ mod literal {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
test_blame(machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap());
} }
#[test] #[test]
fn invalid_share_value_blame() { fn invalid_share_value_blame() {
let (mut machines, enc_keys, mut secret_shares) = let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value( secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value(
@ -310,6 +329,6 @@ mod literal {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
test_blame(machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap());
} }
} }

View file

@ -95,9 +95,6 @@ impl<G: PrimeGroup> Generators<G> {
/// Error for cross-group DLEq proofs. /// Error for cross-group DLEq proofs.
#[derive(Error, PartialEq, Eq, Debug)] #[derive(Error, PartialEq, Eq, Debug)]
pub enum DLEqError { pub enum DLEqError {
/// Invalid proof of knowledge.
#[error("invalid proof of knowledge")]
InvalidProofOfKnowledge,
/// Invalid proof length. /// Invalid proof length.
#[error("invalid proof length")] #[error("invalid proof length")]
InvalidProofLength, InvalidProofLength,

23
docs/DKG Exclusions.md Normal file
View file

@ -0,0 +1,23 @@
Upon an issue with the DKG, the honest validators must remove the malicious
validators. Ideally, a threshold signature would be used, yet that would require
a threshold key (which would require authentication by a MuSig signature). A
MuSig signature which specifies the signing set (or rather, the excluded
signers) achieves the most efficiency.
While that resolves the on-chain behavior, the Tributary also has to perform
exclusion. This has the following forms:
1) Rejecting further transactions (required)
2) Rejecting further participation in Tendermint
With regards to rejecting further participation in Tendermint, it's *ideal* to
remove the validator from the list of validators. Each validator removed from
participation, yet not from the list of validators, increases the likelihood of
the network failing to form consensus.
With regards to the economic security, an honest 67% may remove a faulty
(explicitly or simply offline) 33%, letting 67% of the remaining 67% (4/9ths)
take control of the associated private keys. In such a case, the malicious
parties are defined as the 4/9ths of validators with access to the private key
and the 33% removed (who together form >67% of the originally intended
validator set and have presumably provided enough stake to cover losses).

View file

@ -33,11 +33,29 @@ pub mod key_gen {
pub enum CoordinatorMessage { pub enum CoordinatorMessage {
// Instructs the Processor to begin the key generation process. // Instructs the Processor to begin the key generation process.
// TODO: Should this be moved under Substrate? // TODO: Should this be moved under Substrate?
GenerateKey { id: KeyGenId, params: ThresholdParams, shares: u16 }, GenerateKey {
id: KeyGenId,
params: ThresholdParams,
shares: u16,
},
// Received commitments for the specified key generation protocol. // Received commitments for the specified key generation protocol.
Commitments { id: KeyGenId, commitments: HashMap<Participant, Vec<u8>> }, Commitments {
id: KeyGenId,
commitments: HashMap<Participant, Vec<u8>>,
},
// Received shares for the specified key generation protocol. // Received shares for the specified key generation protocol.
Shares { id: KeyGenId, shares: Vec<HashMap<Participant, Vec<u8>>> }, Shares {
id: KeyGenId,
shares: Vec<HashMap<Participant, Vec<u8>>>,
},
/// Instruction to verify a blame accusation.
VerifyBlame {
id: KeyGenId,
accuser: Participant,
accused: Participant,
share: Vec<u8>,
blame: Option<Vec<u8>>,
},
} }
impl CoordinatorMessage { impl CoordinatorMessage {
@ -49,11 +67,39 @@ pub mod key_gen {
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum ProcessorMessage { pub enum ProcessorMessage {
// Created commitments for the specified key generation protocol. // Created commitments for the specified key generation protocol.
Commitments { id: KeyGenId, commitments: Vec<Vec<u8>> }, Commitments {
id: KeyGenId,
commitments: Vec<Vec<u8>>,
},
// Participant published invalid commitments.
InvalidCommitments {
id: KeyGenId,
faulty: Participant,
},
// Created shares for the specified key generation protocol. // Created shares for the specified key generation protocol.
Shares { id: KeyGenId, shares: Vec<HashMap<Participant, Vec<u8>>> }, Shares {
id: KeyGenId,
shares: Vec<HashMap<Participant, Vec<u8>>>,
},
// Participant published an invalid share.
#[rustfmt::skip]
InvalidShare {
id: KeyGenId,
accuser: Participant,
faulty: Participant,
blame: Option<Vec<u8>>,
},
// Resulting keys from the specified key generation protocol. // Resulting keys from the specified key generation protocol.
GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], network_key: Vec<u8> }, GeneratedKeyPair {
id: KeyGenId,
substrate_key: [u8; 32],
network_key: Vec<u8>,
},
// Blame this participant.
Blame {
id: KeyGenId,
participant: Participant,
},
} }
} }
@ -94,8 +140,10 @@ pub mod sign {
} }
} }
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)] #[derive(Clone, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)]
pub enum ProcessorMessage { pub enum ProcessorMessage {
// Participant sent an invalid message during the sign protocol.
InvalidParticipant { id: SignId, participant: Participant },
// Created preprocess for the specified signing protocol. // Created preprocess for the specified signing protocol.
Preprocess { id: SignId, preprocesses: Vec<Vec<u8>> }, Preprocess { id: SignId, preprocesses: Vec<Vec<u8>> },
// Signed share for the specified signing protocol. // Signed share for the specified signing protocol.
@ -152,9 +200,10 @@ pub mod coordinator {
pub id: [u8; 32], pub id: [u8; 32],
} }
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)] #[derive(Clone, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)]
pub enum ProcessorMessage { pub enum ProcessorMessage {
SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<PlanMeta> }, SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<PlanMeta> },
InvalidParticipant { id: BatchSignId, participant: Participant },
BatchPreprocess { id: BatchSignId, block: BlockHash, preprocesses: Vec<Vec<u8>> }, BatchPreprocess { id: BatchSignId, block: BlockHash, preprocesses: Vec<Vec<u8>> },
BatchShare { id: BatchSignId, shares: Vec<[u8; 32]> }, BatchShare { id: BatchSignId, shares: Vec<[u8; 32]> },
} }
@ -275,6 +324,7 @@ impl CoordinatorMessage {
key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id), key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id),
key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id), key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id),
key_gen::CoordinatorMessage::Shares { id, .. } => (2, id), key_gen::CoordinatorMessage::Shares { id, .. } => (2, id),
key_gen::CoordinatorMessage::VerifyBlame { id, .. } => (3, id),
}; };
let mut res = vec![COORDINATOR_UID, TYPE_KEY_GEN_UID, sub]; let mut res = vec![COORDINATOR_UID, TYPE_KEY_GEN_UID, sub];
@ -340,8 +390,11 @@ impl ProcessorMessage {
let (sub, id) = match msg { let (sub, id) = match msg {
// Unique since KeyGenId // Unique since KeyGenId
key_gen::ProcessorMessage::Commitments { id, .. } => (0, id), key_gen::ProcessorMessage::Commitments { id, .. } => (0, id),
key_gen::ProcessorMessage::Shares { id, .. } => (1, id), key_gen::ProcessorMessage::InvalidCommitments { id, .. } => (1, id),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (2, id), key_gen::ProcessorMessage::Shares { id, .. } => (2, id),
key_gen::ProcessorMessage::InvalidShare { id, .. } => (3, id),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (4, id),
key_gen::ProcessorMessage::Blame { id, .. } => (5, id),
}; };
let mut res = vec![PROCESSSOR_UID, TYPE_KEY_GEN_UID, sub]; let mut res = vec![PROCESSSOR_UID, TYPE_KEY_GEN_UID, sub];
@ -351,10 +404,11 @@ impl ProcessorMessage {
ProcessorMessage::Sign(msg) => { ProcessorMessage::Sign(msg) => {
let (sub, id) = match msg { let (sub, id) = match msg {
// Unique since SignId // Unique since SignId
sign::ProcessorMessage::Preprocess { id, .. } => (0, id.encode()), sign::ProcessorMessage::InvalidParticipant { id, .. } => (0, id.encode()),
sign::ProcessorMessage::Share { id, .. } => (1, id.encode()), sign::ProcessorMessage::Preprocess { id, .. } => (1, id.encode()),
sign::ProcessorMessage::Share { id, .. } => (2, id.encode()),
// Unique since a processor will only sign a TX once // Unique since a processor will only sign a TX once
sign::ProcessorMessage::Completed { id, .. } => (2, id.to_vec()), sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()),
}; };
let mut res = vec![PROCESSSOR_UID, TYPE_SIGN_UID, sub]; let mut res = vec![PROCESSSOR_UID, TYPE_SIGN_UID, sub];
@ -367,8 +421,9 @@ impl ProcessorMessage {
(0, (network, block).encode()) (0, (network, block).encode())
} }
// Unique since BatchSignId // Unique since BatchSignId
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (1, id.encode()), coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()),
coordinator::ProcessorMessage::BatchShare { id, .. } => (2, id.encode()), coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (2, id.encode()),
coordinator::ProcessorMessage::BatchShare { id, .. } => (3, id.encode()),
}; };
let mut res = vec![PROCESSSOR_UID, TYPE_COORDINATOR_UID, sub]; let mut res = vec![PROCESSSOR_UID, TYPE_COORDINATOR_UID, sub];

View file

@ -9,7 +9,9 @@ use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::group::GroupEncoding; use ciphersuite::group::GroupEncoding;
use frost::{ use frost::{
curve::{Ciphersuite, Ristretto}, curve::{Ciphersuite, Ristretto},
dkg::{Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, frost::*}, dkg::{
DkgError, Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, frost::*,
},
}; };
use log::info; use log::info;
@ -28,7 +30,7 @@ pub struct KeyConfirmed<C: Ciphersuite> {
create_db!( create_db!(
KeyGenDb { KeyGenDb {
ParamsDb: (key: &ValidatorSet) -> (ThresholdParams, u16), ParamsDb: (set: &ValidatorSet) -> (ThresholdParams, u16),
// Not scoped to the set since that'd have latter attempts overwrite former // Not scoped to the set since that'd have latter attempts overwrite former
// A former attempt may become the finalized attempt, even if it doesn't in a timely manner // A former attempt may become the finalized attempt, even if it doesn't in a timely manner
// Overwriting its commitments would be accordingly poor // Overwriting its commitments would be accordingly poor
@ -155,18 +157,20 @@ impl<N: Network, D: Db> KeyGen<N, D> {
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
msg: CoordinatorMessage, msg: CoordinatorMessage,
) -> ProcessorMessage { ) -> ProcessorMessage {
let context = |id: &KeyGenId| { const SUBSTRATE_KEY_CONTEXT: &str = "substrate";
const NETWORK_KEY_CONTEXT: &str = "network";
let context = |id: &KeyGenId, key| {
// TODO2: Also embed the chain ID/genesis block // TODO2: Also embed the chain ID/genesis block
format!( format!(
"Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}", "Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}",
id.set.session, id.set.network, id.attempt id.set.session, id.set.network, id.attempt, key,
) )
}; };
let rng = |label, id: KeyGenId| { let rng = |label, id: KeyGenId| {
let mut transcript = RecommendedTranscript::new(label); let mut transcript = RecommendedTranscript::new(label);
transcript.append_message(b"entropy", &self.entropy); transcript.append_message(b"entropy", &self.entropy);
transcript.append_message(b"context", context(&id)); transcript.append_message(b"context", context(&id, "rng"));
ChaCha20Rng::from_seed(transcript.rng_seed(b"rng")) ChaCha20Rng::from_seed(transcript.rng_seed(b"rng"))
}; };
let coefficients_rng = |id| rng(b"Key Gen Coefficients", id); let coefficients_rng = |id| rng(b"Key Gen Coefficients", id);
@ -184,8 +188,10 @@ impl<N: Network, D: Db> KeyGen<N, D> {
Participant::new(u16::from(params.i()) + s).unwrap(), Participant::new(u16::from(params.i()) + s).unwrap(),
) )
.unwrap(); .unwrap();
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng); let substrate = KeyGenMachine::new(params, context(&id, SUBSTRATE_KEY_CONTEXT))
let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng); .generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id, NETWORK_KEY_CONTEXT))
.generate_coefficients(&mut rng);
machines.push((substrate.0, network.0)); machines.push((substrate.0, network.0));
let mut serialized = vec![]; let mut serialized = vec![];
substrate.1.write(&mut serialized).unwrap(); substrate.1.write(&mut serialized).unwrap();
@ -195,76 +201,91 @@ impl<N: Network, D: Db> KeyGen<N, D> {
(machines, commitments) (machines, commitments)
}; };
let secret_share_machines = let secret_share_machines = |id,
|id, params: ThresholdParams,
params: ThresholdParams, machines: SecretShareMachines<N>,
(machines, our_commitments): (SecretShareMachines<N>, Vec<Vec<u8>>), commitments: HashMap<Participant, Vec<u8>>|
commitments: HashMap<Participant, Vec<u8>>| { -> Result<_, ProcessorMessage> {
let mut rng = secret_shares_rng(id); let mut rng = secret_shares_rng(id);
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn handle_machine<C: Ciphersuite>( fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng, rng: &mut ChaCha20Rng,
params: ThresholdParams, id: KeyGenId,
machine: SecretShareMachine<C>, machine: SecretShareMachine<C>,
commitments_ref: &mut HashMap<Participant, &[u8]>, commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> (KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>) { ) -> Result<
// Parse the commitments (KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),
let parsed = match commitments_ref ProcessorMessage,
.iter_mut() > {
.map(|(i, commitments)| { match machine.generate_secret_shares(rng, commitments) {
EncryptionKeyMessage::<C, Commitments<C>>::read(commitments, params) Ok(res) => Ok(res),
.map(|commitments| (*i, commitments)) Err(e) => match e {
}) DkgError::ZeroParameter(_, _) |
.collect() DkgError::InvalidThreshold(_, _) |
{ DkgError::InvalidParticipant(_, _) |
Ok(commitments) => commitments, DkgError::InvalidSigningSet |
Err(e) => todo!("malicious signer: {:?}", e), DkgError::InvalidShare { .. } => unreachable!("{e:?}"),
}; DkgError::InvalidParticipantQuantity(_, _) |
DkgError::DuplicatedParticipant(_) |
match machine.generate_secret_shares(rng, parsed) { DkgError::MissingParticipant(_) => {
Ok(res) => res, panic!("coordinator sent invalid DKG commitments: {e:?}")
Err(e) => todo!("malicious signer: {:?}", e),
}
}
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
for (i, our_commitments) in our_commitments.iter().enumerate() {
if m != i {
assert!(commitments_ref
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments.as_ref(),
)
.is_none());
} }
} DkgError::InvalidCommitments(i) => {
Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, params, substrate_machine, &mut commitments_ref);
let (network_machine, network_shares) =
handle_machine(&mut rng, params, network_machine, &mut commitments_ref);
key_machines.push((substrate_machine, network_machine));
for (_, commitments) in commitments_ref {
if !commitments.is_empty() {
todo!("malicious signer: extra bytes");
} }
} },
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in these_shares.iter_mut() {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
} }
(key_machines, shares) }
};
let mut substrate_commitments = HashMap::new();
let mut network_commitments = HashMap::new();
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let mut commitments = commitments[&i].as_slice();
substrate_commitments.insert(
i,
EncryptionKeyMessage::<Ristretto, Commitments<Ristretto>>::read(&mut commitments, params)
.map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,
);
network_commitments.insert(
i,
EncryptionKeyMessage::<N::Curve, Commitments<N::Curve>>::read(&mut commitments, params)
.map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,
);
if !commitments.is_empty() {
// Malicious Participant included extra bytes in their commitments
// (a potential DoS attack)
Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?;
}
}
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let actual_i = Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap();
let mut substrate_commitments = substrate_commitments.clone();
substrate_commitments.remove(&actual_i);
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, id, substrate_machine, substrate_commitments)?;
let mut network_commitments = network_commitments.clone();
network_commitments.remove(&actual_i);
let (network_machine, network_shares) =
handle_machine(&mut rng, id, network_machine, network_commitments.clone())?;
key_machines.push((substrate_machine, network_machine));
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in these_shares.iter_mut() {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
}
Ok((key_machines, shares))
};
match msg { match msg {
CoordinatorMessage::GenerateKey { id, params, shares } => { CoordinatorMessage::GenerateKey { id, params, shares } => {
@ -284,7 +305,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
ProcessorMessage::Commitments { id, commitments } ProcessorMessage::Commitments { id, commitments }
} }
CoordinatorMessage::Commitments { id, commitments } => { CoordinatorMessage::Commitments { id, mut commitments } => {
info!("Received commitments for {:?}", id); info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) { if self.active_share.contains_key(&id.set) {
@ -301,17 +322,29 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for // This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for
// attempt y // attempt y
// The coordinator is trusted to be proper in this regard // The coordinator is trusted to be proper in this regard
let prior = self let (prior, our_commitments) = self
.active_commit .active_commit
.remove(&id.set) .remove(&id.set)
.unwrap_or_else(|| key_gen_machines(id, params, share_quantity)); .unwrap_or_else(|| key_gen_machines(id, params, share_quantity));
for (i, our_commitments) in our_commitments.into_iter().enumerate() {
assert!(commitments
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments,
)
.is_none());
}
CommitmentsDb::set(txn, &id, &commitments); CommitmentsDb::set(txn, &id, &commitments);
let (machines, shares) = secret_share_machines(id, params, prior, commitments);
self.active_share.insert(id.set, (machines, shares.clone())); match secret_share_machines(id, params, prior, commitments) {
Ok((machines, shares)) => {
ProcessorMessage::Shares { id, shares } self.active_share.insert(id.set, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
}
Err(e) => e,
}
} }
CoordinatorMessage::Shares { id, shares } => { CoordinatorMessage::Shares { id, shares } => {
@ -321,36 +354,70 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// Same commentary on inconsistency as above exists // Same commentary on inconsistency as above exists
let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| { let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| {
let prior = key_gen_machines(id, params, share_quantity); let prior = key_gen_machines(id, params, share_quantity).0;
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap()) let (machines, shares) =
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())
.expect("got Shares for a key gen which faulted");
(machines, shares)
}); });
let mut rng = share_rng(id); let mut rng = share_rng(id);
fn handle_machine<C: Ciphersuite>( fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng, rng: &mut ChaCha20Rng,
id: KeyGenId,
// These are the params of our first share, not this machine's shares
params: ThresholdParams, params: ThresholdParams,
m: usize,
machine: KeyMachine<C>, machine: KeyMachine<C>,
shares_ref: &mut HashMap<Participant, &[u8]>, shares_ref: &mut HashMap<Participant, &[u8]>,
) -> ThresholdCore<C> { ) -> Result<ThresholdCore<C>, ProcessorMessage> {
// Parse the shares let params = ThresholdParams::new(
let shares = match shares_ref params.t(),
.iter_mut() params.n(),
.map(|(i, share)| { Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(),
EncryptedMessage::<C, SecretShare<C::F>>::read(share, params).map(|share| (*i, share)) )
}) .unwrap();
.collect()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
// TODO2: Handle the blame machine properly // Parse the shares
(match machine.calculate_share(rng, shares) { let mut shares = HashMap::new();
Ok(res) => res, for i in 1 ..= params.n() {
Err(e) => todo!("malicious signer: {:?}", e), let i = Participant::new(i).unwrap();
}) let Some(share) = shares_ref.get_mut(&i) else { continue };
.complete() shares.insert(
i,
EncryptedMessage::<C, SecretShare<C::F>>::read(share, params).map_err(|_| {
ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: i, blame: None }
})?,
);
}
Ok(
(match machine.calculate_share(rng, shares) {
Ok(res) => res,
Err(e) => match e {
DkgError::ZeroParameter(_, _) |
DkgError::InvalidThreshold(_, _) |
DkgError::InvalidParticipant(_, _) |
DkgError::InvalidSigningSet |
DkgError::InvalidCommitments(_) => unreachable!("{e:?}"),
DkgError::InvalidParticipantQuantity(_, _) |
DkgError::DuplicatedParticipant(_) |
DkgError::MissingParticipant(_) => {
panic!("coordinator sent invalid DKG shares: {e:?}")
}
DkgError::InvalidShare { participant, blame } => {
Err(ProcessorMessage::InvalidShare {
id,
accuser: params.i(),
faulty: participant,
blame: Some(blame.map(|blame| blame.serialize())).flatten(),
})?
}
},
})
.complete(),
)
} }
let mut substrate_keys = vec![]; let mut substrate_keys = vec![];
@ -371,12 +438,27 @@ impl<N: Network, D: Db> KeyGen<N, D> {
} }
} }
let these_substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref); let these_substrate_keys =
let these_network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref); match handle_machine(&mut rng, id, params, m, machines.0, &mut shares_ref) {
Ok(keys) => keys,
Err(msg) => return msg,
};
let these_network_keys =
match handle_machine(&mut rng, id, params, m, machines.1, &mut shares_ref) {
Ok(keys) => keys,
Err(msg) => return msg,
};
for (_, shares) in shares_ref { for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let Some(shares) = shares_ref.get(&i) else { continue };
if !shares.is_empty() { if !shares.is_empty() {
todo!("malicious signer: extra bytes"); return ProcessorMessage::InvalidShare {
id,
accuser: these_substrate_keys.params().i(),
faulty: i,
blame: None,
};
} }
} }
@ -407,6 +489,70 @@ impl<N: Network, D: Db> KeyGen<N, D> {
network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(), network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(),
} }
} }
CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {
let params = ParamsDb::get(txn, &id.set).unwrap().0;
let mut share_ref = share.as_slice();
let Ok(substrate_share) = EncryptedMessage::<
Ristretto,
SecretShare<<Ristretto as Ciphersuite>::F>,
>::read(&mut share_ref, params) else {
return ProcessorMessage::Blame { id, participant: accused };
};
let Ok(network_share) = EncryptedMessage::<
N::Curve,
SecretShare<<N::Curve as Ciphersuite>::F>,
>::read(&mut share_ref, params) else {
return ProcessorMessage::Blame { id, participant: accused };
};
if !share_ref.is_empty() {
return ProcessorMessage::Blame { id, participant: accused };
}
let mut substrate_commitment_msgs = HashMap::new();
let mut network_commitment_msgs = HashMap::new();
let commitments = CommitmentsDb::get(txn, &id).unwrap();
for (i, commitments) in commitments {
let mut commitments = commitments.as_slice();
substrate_commitment_msgs
.insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());
network_commitment_msgs
.insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());
}
// There is a mild DoS here where someone with a valid blame bloats it to the maximum size
// Given the ambiguity, and limited potential to DoS (this being called means *someone* is
// getting fatally slashed) voids the need to ensure blame is minimal
let substrate_blame =
blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());
let network_blame =
blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());
let substrate_blame = AdditionalBlameMachine::new(
&mut rand_core::OsRng,
context(&id, SUBSTRATE_KEY_CONTEXT),
params.n(),
substrate_commitment_msgs,
)
.unwrap()
.blame(accuser, accused, substrate_share, substrate_blame);
let network_blame = AdditionalBlameMachine::new(
&mut rand_core::OsRng,
context(&id, NETWORK_KEY_CONTEXT),
params.n(),
network_commitment_msgs,
)
.unwrap()
.blame(accuser, accused, network_share, network_blame);
// If thw accused was blamed for either, mark them as at fault
if (substrate_blame == accused) || (network_blame == accused) {
return ProcessorMessage::Blame { id, participant: accused };
}
ProcessorMessage::Blame { id, participant: accuser }
}
} }
} }

View file

@ -938,7 +938,8 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
} }
// Save the plans created while scanning // Save the plans created while scanning
// TODO: Should we combine all of these plans? // TODO: Should we combine all of these plans to reduce the fees incurred from their
// execution? They're refunds and forwards. Neither should need isolate Plan/Eventualities.
MultisigsDb::<N, D>::set_plans_from_scanning(txn, block_number, plans); MultisigsDb::<N, D>::set_plans_from_scanning(txn, block_number, plans);
// If any outputs were delayed, append them into this block // If any outputs were delayed, append them into this block

View file

@ -5,7 +5,7 @@ use rand_core::OsRng;
use ciphersuite::group::GroupEncoding; use ciphersuite::group::GroupEncoding;
use frost::{ use frost::{
ThresholdKeys, ThresholdKeys, FrostError,
sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine},
}; };
@ -470,7 +470,7 @@ impl<N: Network, D: Db> Signer<N, D> {
msg: CoordinatorMessage, msg: CoordinatorMessage,
) -> Option<ProcessorMessage> { ) -> Option<ProcessorMessage> {
match msg { match msg {
CoordinatorMessage::Preprocesses { id, mut preprocesses } => { CoordinatorMessage::Preprocesses { id, preprocesses } => {
if self.verify_id(&id).is_err() { if self.verify_id(&id).is_err() {
return None; return None;
} }
@ -487,23 +487,22 @@ impl<N: Network, D: Db> Signer<N, D> {
Some(machine) => machine, Some(machine) => machine,
}; };
let preprocesses = match preprocesses let mut parsed = HashMap::new();
.drain() for l in {
.map(|(l, preprocess)| { let mut keys = preprocesses.keys().cloned().collect::<Vec<_>>();
let mut preprocess_ref = preprocess.as_ref(); keys.sort();
let res = machines[0] keys
.read_preprocess::<&[u8]>(&mut preprocess_ref) } {
.map(|preprocess| (l, preprocess)); let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();
if !preprocess_ref.is_empty() { let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {
todo!("malicious signer: extra bytes"); return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
} };
res if !preprocess_ref.is_empty() {
}) return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
.collect::<Result<HashMap<_, _>, _>>() }
{ parsed.insert(l, res);
Ok(preprocesses) => preprocesses, }
Err(e) => todo!("malicious signer: {:?}", e), let preprocesses = parsed;
};
// Only keep a single machine as we only need one to get the signature // Only keep a single machine as we only need one to get the signature
let mut signature_machine = None; let mut signature_machine = None;
@ -520,7 +519,18 @@ impl<N: Network, D: Db> Signer<N, D> {
// Use an empty message, as expected of TransactionMachines // Use an empty message, as expected of TransactionMachines
let (machine, share) = match machine.sign(preprocesses, &[]) { let (machine, share) = match machine.sign(preprocesses, &[]) {
Ok(res) => res, Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e), Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l })
}
},
}; };
if m == 0 { if m == 0 {
signature_machine = Some(machine); signature_machine = Some(machine);
@ -534,7 +544,7 @@ impl<N: Network, D: Db> Signer<N, D> {
Some(ProcessorMessage::Share { id, shares: serialized_shares }) Some(ProcessorMessage::Share { id, shares: serialized_shares })
} }
CoordinatorMessage::Shares { id, mut shares } => { CoordinatorMessage::Shares { id, shares } => {
if self.verify_id(&id).is_err() { if self.verify_id(&id).is_err() {
return None; return None;
} }
@ -557,21 +567,22 @@ impl<N: Network, D: Db> Signer<N, D> {
Some(machine) => machine, Some(machine) => machine,
}; };
let mut shares = match shares let mut parsed = HashMap::new();
.drain() for l in {
.map(|(l, share)| { let mut keys = shares.keys().cloned().collect::<Vec<_>>();
let mut share_ref = share.as_ref(); keys.sort();
let res = machine.read_share::<&[u8]>(&mut share_ref).map(|share| (l, share)); keys
if !share_ref.is_empty() { } {
todo!("malicious signer: extra bytes"); let mut share_ref = shares.get(&l).unwrap().as_slice();
} let Ok(res) = machine.read_share(&mut share_ref) else {
res return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
}) };
.collect::<Result<HashMap<_, _>, _>>() if !share_ref.is_empty() {
{ return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
Ok(shares) => shares, }
Err(e) => todo!("malicious signer: {:?}", e), parsed.insert(l, res);
}; }
let mut shares = parsed;
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
@ -579,7 +590,18 @@ impl<N: Network, D: Db> Signer<N, D> {
let tx = match machine.complete(shares) { let tx = match machine.complete(shares) {
Ok(res) => res, Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e), Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l })
}
},
}; };
// Save the transaction in case it's needed for recovery // Save the transaction in case it's needed for recovery

View file

@ -6,7 +6,7 @@ use rand_core::OsRng;
use ciphersuite::group::GroupEncoding; use ciphersuite::group::GroupEncoding;
use frost::{ use frost::{
curve::Ristretto, curve::Ristretto,
ThresholdKeys, ThresholdKeys, FrostError,
algorithm::Algorithm, algorithm::Algorithm,
sign::{ sign::{
Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
@ -246,7 +246,7 @@ impl<D: Db> SubstrateSigner<D> {
msg: CoordinatorMessage, msg: CoordinatorMessage,
) -> Option<messages::ProcessorMessage> { ) -> Option<messages::ProcessorMessage> {
match msg { match msg {
CoordinatorMessage::BatchPreprocesses { id, mut preprocesses } => { CoordinatorMessage::BatchPreprocesses { id, preprocesses } => {
if self.verify_id(&id).is_err() { if self.verify_id(&id).is_err() {
return None; return None;
} }
@ -263,23 +263,22 @@ impl<D: Db> SubstrateSigner<D> {
Some(preprocess) => preprocess, Some(preprocess) => preprocess,
}; };
let preprocesses = match preprocesses let mut parsed = HashMap::new();
.drain() for l in {
.map(|(l, preprocess)| { let mut keys = preprocesses.keys().cloned().collect::<Vec<_>>();
let mut preprocess_ref = preprocess.as_ref(); keys.sort();
let res = machines[0] keys
.read_preprocess::<&[u8]>(&mut preprocess_ref) } {
.map(|preprocess| (l, preprocess)); let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();
if !preprocess_ref.is_empty() { let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {
todo!("malicious signer: extra bytes"); return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into());
} };
res if !preprocess_ref.is_empty() {
}) return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into());
.collect::<Result<HashMap<_, _>, _>>() }
{ parsed.insert(l, res);
Ok(preprocesses) => preprocesses, }
Err(e) => todo!("malicious signer: {:?}", e), let preprocesses = parsed;
};
// Only keep a single machine as we only need one to get the signature // Only keep a single machine as we only need one to get the signature
let mut signature_machine = None; let mut signature_machine = None;
@ -296,7 +295,18 @@ impl<D: Db> SubstrateSigner<D> {
let (machine, share) = let (machine, share) =
match machine.sign(preprocesses, &batch_message(&self.signable[&id.id])) { match machine.sign(preprocesses, &batch_message(&self.signable[&id.id])) {
Ok(res) => res, Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e), Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into())
}
},
}; };
if m == 0 { if m == 0 {
signature_machine = Some(machine); signature_machine = Some(machine);
@ -314,7 +324,7 @@ impl<D: Db> SubstrateSigner<D> {
Some((ProcessorMessage::BatchShare { id, shares: serialized_shares }).into()) Some((ProcessorMessage::BatchShare { id, shares: serialized_shares }).into())
} }
CoordinatorMessage::BatchShares { id, mut shares } => { CoordinatorMessage::BatchShares { id, shares } => {
if self.verify_id(&id).is_err() { if self.verify_id(&id).is_err() {
return None; return None;
} }
@ -337,21 +347,22 @@ impl<D: Db> SubstrateSigner<D> {
Some(signing) => signing, Some(signing) => signing,
}; };
let mut shares = match shares let mut parsed = HashMap::new();
.drain() for l in {
.map(|(l, share)| { let mut keys = shares.keys().cloned().collect::<Vec<_>>();
let mut share_ref = share.as_ref(); keys.sort();
let res = machine.read_share::<&[u8]>(&mut share_ref).map(|share| (l, share)); keys
if !share_ref.is_empty() { } {
todo!("malicious signer: extra bytes"); let mut share_ref = shares.get(&l).unwrap().as_slice();
} let Ok(res) = machine.read_share(&mut share_ref) else {
res return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into());
}) };
.collect::<Result<HashMap<_, _>, _>>() if !share_ref.is_empty() {
{ return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into());
Ok(shares) => shares, }
Err(e) => todo!("malicious signer: {:?}", e), parsed.insert(l, res);
}; }
let mut shares = parsed;
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
@ -359,7 +370,18 @@ impl<D: Db> SubstrateSigner<D> {
let sig = match machine.complete(shares) { let sig = match machine.complete(shares) {
Ok(res) => res, Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e), Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into())
}
},
}; };
info!("signed batch {} with attempt #{}", hex::encode(id.id), id.attempt); info!("signed batch {} with attempt #{}", hex::encode(id.id), id.attempt);