diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index f7b24314..cec1cbd6 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -83,7 +83,7 @@ pub struct ClsagAddendum { impl WriteAddendum for ClsagAddendum { fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.key_image.compress().to_bytes().as_ref())?; - self.dleq.serialize(writer) + self.dleq.write(writer) } } @@ -197,7 +197,7 @@ impl Algorithm for ClsagMultisig { Err(io::Error::new(io::ErrorKind::Other, "non-canonical key image"))?; } - Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::::deserialize(reader)? }) + Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::::read(reader)? }) } fn process_addendum( diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 2e8d8030..7dcb6f13 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -4,7 +4,6 @@ use std::{ collections::HashMap, }; -use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -217,18 +216,14 @@ impl SignMachine for TransactionSignMachine { type SignatureShare = Vec>; type SignatureMachine = TransactionSignatureMachine; - fn cache(self) -> Zeroizing { + fn cache(self) -> CachedPreprocess { unimplemented!( "Monero transactions don't support caching their preprocesses due to {}", "being already bound to a specific transaction" ); } - fn from_cache( - _: (), - _: ThresholdKeys, - _: Zeroizing, - ) -> Result { + fn from_cache(_: (), _: ThresholdKeys, _: CachedPreprocess) -> Result { unimplemented!( "Monero transactions don't support caching their preprocesses due to {}", "being already bound to a specific transaction" diff --git a/crypto/dkg/src/encryption.rs b/crypto/dkg/src/encryption.rs index 08a56fe9..341f033d 100644 --- a/crypto/dkg/src/encryption.rs +++ b/crypto/dkg/src/encryption.rs @@ -1,10 +1,12 @@ -use core::{hash::Hash, fmt::Debug}; +use core::fmt::Debug; use std::{ ops::Deref, io::{self, Read, Write}, collections::HashMap, }; +use thiserror::Error; + use zeroize::{Zeroize, Zeroizing}; use rand_core::{RngCore, CryptoRng}; @@ -13,12 +15,17 @@ use chacha20::{ Key as Cc20Key, Nonce as Cc20Iv, ChaCha20, }; -use group::GroupEncoding; - -use ciphersuite::Ciphersuite; - use transcript::{Transcript, RecommendedTranscript}; +#[cfg(test)] +use group::ff::Field; +use group::GroupEncoding; +use ciphersuite::Ciphersuite; +use multiexp::BatchVerifier; + +use schnorr::SchnorrSignature; +use dleq::DLEqProof; + use crate::ThresholdParams; pub trait ReadWrite: Sized { @@ -35,6 +42,7 @@ pub trait ReadWrite: Sized { pub trait Message: Clone + PartialEq + Eq + Debug + Zeroize + ReadWrite {} impl Message for M {} +/// Wraps a message with a key to use for encryption in the future. #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] pub struct EncryptionKeyMessage { msg: M, @@ -57,20 +65,115 @@ impl EncryptionKeyMessage { self.write(&mut buf).unwrap(); buf } + + // Used by tests + pub(crate) fn enc_key(&self) -> C::G { + self.enc_key + } } -pub trait Encryptable: Clone + AsMut<[u8]> + Zeroize + ReadWrite {} -impl + Zeroize + ReadWrite> Encryptable for E {} -#[derive(Clone, Zeroize)] -pub struct EncryptedMessage(Zeroizing); +pub trait Encryptable: Clone + AsRef<[u8]> + AsMut<[u8]> + Zeroize + ReadWrite {} +impl + AsMut<[u8]> + Zeroize + ReadWrite> Encryptable for E {} -impl EncryptedMessage { +/// An encrypted message, with a per-message encryption key enabling revealing specific messages +/// without side effects. +#[derive(Clone, Zeroize)] +pub struct EncryptedMessage { + key: C::G, + // Also include a proof-of-possession for the key. + // If this proof-of-possession wasn't here, Eve could observe Alice encrypt to Bob with key X, + // then send Bob a message also claiming to use X. + // While Eve's message would fail to meaningfully decrypt, Bob would then use this to create a + // blame argument against Eve. When they do, they'd reveal bX, revealing Alice's message to Bob. + // This is a massive side effect which could break some protocols, in the worst case. + // While Eve can still reuse their own keys, causing Bob to leak all messages by revealing for + // any single one, that's effectively Eve revealing themselves, and not considered relevant. + pop: SchnorrSignature, + msg: Zeroizing, +} + +fn ecdh(private: &Zeroizing, public: C::G) -> Zeroizing { + Zeroizing::new(public * private.deref()) +} + +fn cipher(dst: &'static [u8], ecdh: &Zeroizing) -> ChaCha20 { + // Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly + // TODO: https://github.com/serai-dex/serai/issues/151 + let mut transcript = RecommendedTranscript::new(b"DKG Encryption v0.2"); + transcript.domain_separate(dst); + + let mut ecdh = ecdh.to_bytes(); + transcript.append_message(b"shared_key", ecdh.as_ref()); + ecdh.as_mut().zeroize(); + + let zeroize = |buf: &mut [u8]| buf.zeroize(); + + let mut key = Cc20Key::default(); + let mut challenge = transcript.challenge(b"key"); + key.copy_from_slice(&challenge[.. 32]); + zeroize(challenge.as_mut()); + + // The RecommendedTranscript isn't vulnerable to length extension attacks, yet if it was, + // it'd make sense to clone it (and fork it) just to hedge against that + let mut iv = Cc20Iv::default(); + let mut challenge = transcript.challenge(b"iv"); + iv.copy_from_slice(&challenge[.. 12]); + zeroize(challenge.as_mut()); + + // Same commentary as the transcript regarding ZAlloc + // TODO: https://github.com/serai-dex/serai/issues/151 + let res = ChaCha20::new(&key, &iv); + zeroize(key.as_mut()); + zeroize(iv.as_mut()); + res +} + +fn encrypt( + rng: &mut R, + dst: &'static [u8], + from: u16, + to: C::G, + mut msg: Zeroizing, +) -> EncryptedMessage { + /* + The following code could be used to replace the requirement on an RNG here. + It's just currently not an issue to require taking in an RNG here. + let last = self.last_enc_key.to_bytes(); + self.last_enc_key = C::hash_to_F(b"encryption_base", last.as_ref()); + let key = C::hash_to_F(b"encryption_key", last.as_ref()); + last.as_mut().zeroize(); + */ + + let key = Zeroizing::new(C::random_nonzero_F(rng)); + cipher::(dst, &ecdh::(&key, to)).apply_keystream(msg.as_mut().as_mut()); + + let pub_key = C::generator() * key.deref(); + let nonce = Zeroizing::new(C::random_nonzero_F(rng)); + let pub_nonce = C::generator() * nonce.deref(); + EncryptedMessage { + key: pub_key, + pop: SchnorrSignature::sign( + &key, + nonce, + pop_challenge::(pub_nonce, pub_key, from, msg.deref().as_ref()), + ), + msg, + } +} + +impl EncryptedMessage { pub fn read(reader: &mut R, params: ThresholdParams) -> io::Result { - Ok(Self(Zeroizing::new(E::read(reader, params)?))) + Ok(Self { + key: C::read_G(reader)?, + pop: SchnorrSignature::::read(reader)?, + msg: Zeroizing::new(E::read(reader, params)?), + }) } pub fn write(&self, writer: &mut W) -> io::Result<()> { - self.0.write(writer) + writer.write_all(self.key.to_bytes().as_ref())?; + self.pop.write(writer)?; + self.msg.write(writer) } pub fn serialize(&self) -> Vec { @@ -78,17 +181,150 @@ impl EncryptedMessage { self.write(&mut buf).unwrap(); buf } + + #[cfg(test)] + pub(crate) fn invalidate_pop(&mut self) { + self.pop.s += C::F::one(); + } + + #[cfg(test)] + pub(crate) fn invalidate_msg(&mut self, rng: &mut R, from: u16) { + // Invalidate the message by specifying a new key/Schnorr PoP + // This will cause all initial checks to pass, yet a decrypt to gibberish + let key = Zeroizing::new(C::random_nonzero_F(rng)); + let pub_key = C::generator() * key.deref(); + let nonce = Zeroizing::new(C::random_nonzero_F(rng)); + let pub_nonce = C::generator() * nonce.deref(); + self.key = pub_key; + self.pop = SchnorrSignature::sign( + &key, + nonce, + pop_challenge::(pub_nonce, pub_key, from, self.msg.deref().as_ref()), + ); + } + + // Assumes the encrypted message is a secret share. + #[cfg(test)] + pub(crate) fn invalidate_share_serialization( + &mut self, + rng: &mut R, + dst: &'static [u8], + from: u16, + to: C::G, + ) { + use group::ff::PrimeField; + + let mut repr = ::Repr::default(); + for b in repr.as_mut().iter_mut() { + *b = 255; + } + // Tries to guarantee the above assumption. + assert_eq!(repr.as_ref().len(), self.msg.as_ref().len()); + // Checks that this isn't over a field where this is somehow valid + assert!(!bool::from(C::F::from_repr(repr).is_some())); + + self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref()); + *self = encrypt(rng, dst, from, to, self.msg.clone()); + } + + // Assumes the encrypted message is a secret share. + #[cfg(test)] + pub(crate) fn invalidate_share_value( + &mut self, + rng: &mut R, + dst: &'static [u8], + from: u16, + to: C::G, + ) { + use group::ff::PrimeField; + + // Assumes the share isn't randomly 1 + let repr = C::F::one().to_repr(); + self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref()); + *self = encrypt(rng, dst, from, to, self.msg.clone()); + } } +/// A proof that the provided point is the legitimately derived shared key for some message. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +pub struct EncryptionKeyProof { + key: Zeroizing, + dleq: DLEqProof, +} + +impl EncryptionKeyProof { + pub fn read(reader: &mut R) -> io::Result { + Ok(Self { key: Zeroizing::new(C::read_G(reader)?), dleq: DLEqProof::read(reader)? }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(self.key.to_bytes().as_ref())?; + self.dleq.write(writer) + } + + pub fn serialize(&self) -> Vec { + let mut buf = vec![]; + self.write(&mut buf).unwrap(); + buf + } + + #[cfg(test)] + pub(crate) fn invalidate_key(&mut self) { + *self.key += C::generator(); + } + + #[cfg(test)] + pub(crate) fn invalidate_dleq(&mut self) { + let mut buf = vec![]; + self.dleq.write(&mut buf).unwrap(); + // Adds one to c since this is serialized c, s + // Adding one to c will leave a validly serialized c + // Adding one to s may leave an invalidly serialized s + buf[0] = buf[0].wrapping_add(1); + self.dleq = DLEqProof::read::<&[u8]>(&mut buf.as_ref()).unwrap(); + } +} + +// This doesn't need to take the msg. It just doesn't hurt as an extra layer. +// This still doesn't mean the DKG offers an authenticated channel. The per-message keys have no +// root of trust other than their existence in the assumed-to-exist external authenticated channel. +fn pop_challenge(nonce: C::G, key: C::G, sender: u16, msg: &[u8]) -> C::F { + let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Proof of Possession v0.2"); + transcript.append_message(b"nonce", nonce.to_bytes()); + transcript.append_message(b"key", key.to_bytes()); + // This is sufficient to prevent the attack this is meant to stop + transcript.append_message(b"sender", sender.to_le_bytes()); + // This, as written above, doesn't hurt + transcript.append_message(b"message", msg); + // While this is a PoK and a PoP, it's called a PoP here since the important part is its owner + // Elsewhere, where we use the term PoK, the important part is that it isn't some inverse, with + // an unknown to anyone discrete log, breaking the system + C::hash_to_F(b"DKG-encryption-proof_of_possession", &transcript.challenge(b"schnorr")) +} + +fn encryption_key_transcript() -> RecommendedTranscript { + RecommendedTranscript::new(b"DKG Encryption Key Correctness Proof v0.2") +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] +pub(crate) enum DecryptionError { + #[error("accused provided an invalid signature")] + InvalidSignature, + #[error("accuser provided an invalid decryption key")] + InvalidProof, +} + +// A simple box for managing encryption. #[derive(Clone)] -pub(crate) struct Encryption { +pub(crate) struct Encryption { dst: &'static [u8], + i: u16, enc_key: Zeroizing, enc_pub_key: C::G, - enc_keys: HashMap, + enc_keys: HashMap, } -impl Zeroize for Encryption { +impl Zeroize for Encryption { fn zeroize(&mut self) { self.enc_key.zeroize(); self.enc_pub_key.zeroize(); @@ -98,10 +334,16 @@ impl Zeroize for Encryption { } } -impl Encryption { - pub(crate) fn new(dst: &'static [u8], rng: &mut R) -> Self { +impl Encryption { + pub(crate) fn new(dst: &'static [u8], i: u16, rng: &mut R) -> Self { let enc_key = Zeroizing::new(C::random_nonzero_F(rng)); - Self { dst, enc_pub_key: C::generator() * enc_key.deref(), enc_key, enc_keys: HashMap::new() } + Self { + dst, + i, + enc_pub_key: C::generator() * enc_key.deref(), + enc_key, + enc_keys: HashMap::new(), + } } pub(crate) fn registration(&self, msg: M) -> EncryptionKeyMessage { @@ -110,7 +352,7 @@ impl Encryption { pub(crate) fn register( &mut self, - participant: Id, + participant: u16, msg: EncryptionKeyMessage, ) -> M { if self.enc_keys.contains_key(&participant) { @@ -120,62 +362,81 @@ impl Encryption { msg.msg } - fn cipher(&self, participant: Id, encrypt: bool) -> ChaCha20 { - // Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly - // TODO: https://github.com/serai-dex/serai/issues/151 - let mut transcript = RecommendedTranscript::new(b"DKG Encryption v0.2"); - transcript.domain_separate(self.dst); + pub(crate) fn encrypt( + &self, + rng: &mut R, + participant: u16, + msg: Zeroizing, + ) -> EncryptedMessage { + encrypt(rng, self.dst, self.i, self.enc_keys[&participant], msg) + } - let other = self.enc_keys[&participant]; - if encrypt { - transcript.append_message(b"sender", self.enc_pub_key.to_bytes()); - transcript.append_message(b"receiver", other.to_bytes()); - } else { - transcript.append_message(b"sender", other.to_bytes()); - transcript.append_message(b"receiver", self.enc_pub_key.to_bytes()); + pub(crate) fn decrypt( + &self, + rng: &mut R, + batch: &mut BatchVerifier, + // Uses a distinct batch ID so if this batch verifier is reused, we know its the PoP aspect + // which failed, and therefore to use None for the blame + batch_id: I, + from: u16, + mut msg: EncryptedMessage, + ) -> (Zeroizing, EncryptionKeyProof) { + msg.pop.batch_verify( + rng, + batch, + batch_id, + msg.key, + pop_challenge::(msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), + ); + + let key = ecdh::(&self.enc_key, msg.key); + cipher::(self.dst, &key).apply_keystream(msg.msg.as_mut().as_mut()); + ( + msg.msg, + EncryptionKeyProof { + key, + dleq: DLEqProof::prove( + rng, + &mut encryption_key_transcript(), + &[C::generator(), msg.key], + &self.enc_key, + ), + }, + ) + } + + // Given a message, and the intended decryptor, and a proof for its key, decrypt the message. + // Returns None if the key was wrong. + pub(crate) fn decrypt_with_proof( + &self, + from: u16, + decryptor: u16, + mut msg: EncryptedMessage, + // There's no encryption key proof if the accusation is of an invalid signature + proof: Option>, + ) -> Result, DecryptionError> { + if !msg + .pop + .verify(msg.key, pop_challenge::(msg.pop.R, msg.key, from, msg.msg.deref().as_ref())) + { + Err(DecryptionError::InvalidSignature)?; } - let mut shared = Zeroizing::new(other * self.enc_key.deref()).deref().to_bytes(); - transcript.append_message(b"shared_key", shared.as_ref()); - shared.as_mut().zeroize(); + if let Some(proof) = proof { + // Verify this is the decryption key for this message + proof + .dleq + .verify( + &mut encryption_key_transcript(), + &[C::generator(), msg.key], + &[self.enc_keys[&decryptor], *proof.key], + ) + .map_err(|_| DecryptionError::InvalidProof)?; - let zeroize = |buf: &mut [u8]| buf.zeroize(); - - let mut key = Cc20Key::default(); - let mut challenge = transcript.challenge(b"key"); - key.copy_from_slice(&challenge[.. 32]); - zeroize(challenge.as_mut()); - - // The RecommendedTranscript isn't vulnerable to length extension attacks, yet if it was, - // it'd make sense to clone it (and fork it) just to hedge against that - let mut iv = Cc20Iv::default(); - let mut challenge = transcript.challenge(b"iv"); - iv.copy_from_slice(&challenge[.. 12]); - zeroize(challenge.as_mut()); - - // Same commentary as the transcript regarding ZAlloc - // TODO: https://github.com/serai-dex/serai/issues/151 - let res = ChaCha20::new(&key, &iv); - zeroize(key.as_mut()); - zeroize(iv.as_mut()); - res - } - - pub(crate) fn encrypt( - &self, - participant: Id, - mut msg: Zeroizing, - ) -> EncryptedMessage { - self.cipher(participant, true).apply_keystream(msg.as_mut().as_mut()); - EncryptedMessage(msg) - } - - pub(crate) fn decrypt( - &self, - participant: Id, - mut msg: EncryptedMessage, - ) -> Zeroizing { - self.cipher(participant, false).apply_keystream(msg.0.as_mut().as_mut()); - msg.0 + cipher::(self.dst, &proof.key).apply_keystream(msg.msg.as_mut().as_mut()); + Ok(msg.msg) + } else { + Err(DecryptionError::InvalidProof) + } } } diff --git a/crypto/dkg/src/frost.rs b/crypto/dkg/src/frost.rs index 8bcd6bea..efbea287 100644 --- a/crypto/dkg/src/frost.rs +++ b/crypto/dkg/src/frost.rs @@ -1,6 +1,9 @@ -use std::{ +use core::{ marker::PhantomData, ops::Deref, + fmt::{Debug, Formatter}, +}; +use std::{ io::{self, Read, Write}, collections::HashMap, }; @@ -13,7 +16,7 @@ use transcript::{Transcript, RecommendedTranscript}; use group::{ ff::{Field, PrimeField}, - GroupEncoding, + Group, GroupEncoding, }; use ciphersuite::Ciphersuite; use multiexp::{multiexp_vartime, BatchVerifier}; @@ -22,9 +25,14 @@ use schnorr::SchnorrSignature; use crate::{ DkgError, ThresholdParams, ThresholdCore, validate_map, - encryption::{ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption}, + encryption::{ + ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, EncryptionKeyProof, + DecryptionError, + }, }; +type FrostError = DkgError>; + #[allow(non_snake_case)] fn challenge(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F { let mut transcript = RecommendedTranscript::new(b"DKG FROST v0.2"); @@ -33,10 +41,15 @@ fn challenge(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F transcript.append_message(b"participant", l.to_le_bytes()); transcript.append_message(b"nonce", R); transcript.append_message(b"commitments", Am); - C::hash_to_F(b"PoK 0", &transcript.challenge(b"challenge")) + C::hash_to_F(b"DKG-FROST-proof_of_knowledge-0", &transcript.challenge(b"schnorr")) } -/// Commitments message to be broadcast to all other parties. +/// The commitments message, intended to be broadcast to all other parties. +/// Every participant should only provide one set of commitments to all parties. +/// If any participant sends multiple sets of commitments, they are faulty and should be presumed +/// malicious. +/// As this library does not handle networking, it is also unable to detect if any participant is +/// so faulty. That responsibility lies with the caller. #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] pub struct Commitments { commitments: Vec, @@ -119,7 +132,7 @@ impl KeyGenMachine { ); // Additionally create an encryption mechanism to protect the secret shares - let encryption = Encryption::new(b"FROST", rng); + let encryption = Encryption::new(b"FROST", self.params.i, rng); // Step 4: Broadcast let msg = @@ -149,19 +162,39 @@ fn polynomial(coefficients: &[Zeroizing], l: u16) -> share } -/// Secret share to be sent to the party it's intended for over an authenticated channel. -#[derive(Clone, PartialEq, Eq, Debug)] +/// The secret share message, to be sent to the party it's intended for over an authenticated +/// channel. +/// If any participant sends multiple secret shares to another participant, they are faulty. +// This should presumably be written as SecretShare(Zeroizing). +// It's unfortunately not possible as F::Repr doesn't have Zeroize as a bound. +// The encryption system also explicitly uses Zeroizing so it can ensure anything being +// encrypted is within Zeroizing. Accordingly, internally having Zeroizing would be redundant. +#[derive(Clone, PartialEq, Eq)] pub struct SecretShare(F::Repr); +impl AsRef<[u8]> for SecretShare { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} impl AsMut<[u8]> for SecretShare { fn as_mut(&mut self) -> &mut [u8] { self.0.as_mut() } } +impl Debug for SecretShare { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { + fmt.debug_struct("SecretShare").finish_non_exhaustive() + } +} impl Zeroize for SecretShare { fn zeroize(&mut self) { self.0.as_mut().zeroize() } } +// Still manually implement ZeroizeOnDrop to ensure these don't stick around. +// We could replace Zeroizing with a bound M: ZeroizeOnDrop. +// Doing so would potentially fail to highlight thr expected behavior with these and remove a layer +// of depth. impl Drop for SecretShare { fn drop(&mut self) { self.zeroize(); @@ -188,7 +221,7 @@ pub struct SecretShareMachine { context: String, coefficients: Vec>, our_commitments: Vec, - encryption: Encryption, + encryption: Encryption, } impl SecretShareMachine { @@ -198,7 +231,7 @@ impl SecretShareMachine { &mut self, rng: &mut R, mut commitments: HashMap>>, - ) -> Result>, DkgError> { + ) -> Result>, FrostError> { validate_map(&commitments, &(1 ..= self.params.n()).collect::>(), self.params.i())?; let mut batch = BatchVerifier::::new(commitments.len()); @@ -221,21 +254,23 @@ impl SecretShareMachine { }) .collect::>(); - batch.verify_with_vartime_blame().map_err(DkgError::InvalidProofOfKnowledge)?; + batch.verify_with_vartime_blame().map_err(FrostError::InvalidProofOfKnowledge)?; commitments.insert(self.params.i, self.our_commitments.drain(..).collect()); Ok(commitments) } /// Continue generating a key. - /// Takes in everyone else's commitments. Returns a HashMap of secret shares to be sent over - /// authenticated channels to their relevant counterparties. + /// Takes in everyone else's commitments. Returns a HashMap of encrypted secret shares to be sent + /// over authenticated channels to their relevant counterparties. + /// If any participant sends multiple secret shares to another participant, they are faulty. #[allow(clippy::type_complexity)] pub fn generate_secret_shares( mut self, rng: &mut R, commitments: HashMap>>, - ) -> Result<(KeyMachine, HashMap>>), DkgError> { + ) -> Result<(KeyMachine, HashMap>>), FrostError> + { let commitments = self.verify_r1(&mut *rng, commitments)?; // Step 1: Generate secret shares for all other parties @@ -250,7 +285,7 @@ impl SecretShareMachine { let mut share = polynomial(&self.coefficients, l); let share_bytes = Zeroizing::new(SecretShare::(share.to_repr())); share.zeroize(); - res.insert(l, self.encryption.encrypt(l, share_bytes)); + res.insert(l, self.encryption.encrypt(rng, l, share_bytes)); } // Calculate our own share @@ -264,13 +299,18 @@ impl SecretShareMachine { } } -/// Final step of the key generation protocol. +/// Advancement of the the secret share state machine protocol. +/// This machine will 'complete' the protocol, by a local perspective, and can be the last +/// interactive component. In order to be secure, the parties must confirm having successfully +/// completed the protocol (an effort out of scope to this library), yet this is modelled by one +/// more state transition. pub struct KeyMachine { params: ThresholdParams, secret: Zeroizing, commitments: HashMap>, - encryption: Encryption, + encryption: Encryption, } + impl Zeroize for KeyMachine { fn zeroize(&mut self) { self.params.zeroize(); @@ -281,59 +321,84 @@ impl Zeroize for KeyMachine { self.encryption.zeroize(); } } -impl Drop for KeyMachine { - fn drop(&mut self) { - self.zeroize() - } + +// Calculate the exponent for a given participant and apply it to a series of commitments +// Initially used with the actual commitments to verify the secret share, later used with +// stripes to generate the verification shares +fn exponential(i: u16, values: &[C::G]) -> Vec<(C::F, C::G)> { + let i = C::F::from(i.into()); + let mut res = Vec::with_capacity(values.len()); + (0 .. values.len()).into_iter().fold(C::F::one(), |exp, l| { + res.push((exp, values[l])); + exp * i + }); + res +} + +fn share_verification_statements( + target: u16, + commitments: &[C::G], + mut share: Zeroizing, +) -> Vec<(C::F, C::G)> { + // This can be insecurely linearized from n * t to just n using the below sums for a given + // stripe. Doing so uses naive addition which is subject to malleability. The only way to + // ensure that malleability isn't present is to use this n * t algorithm, which runs + // per sender and not as an aggregate of all senders, which also enables blame + let mut values = exponential::(target, commitments); + + // Perform the share multiplication outside of the multiexp to minimize stack copying + // While the multiexp BatchVerifier does zeroize its flattened multiexp, and itself, it still + // converts whatever we give to an iterator and then builds a Vec internally, welcoming copies + let neg_share_pub = C::generator() * -*share; + share.zeroize(); + values.push((C::F::one(), neg_share_pub)); + + values +} + +#[derive(Clone, Copy, Hash, Debug, Zeroize)] +enum BatchId { + Decryption(u16), + Share(u16), } -impl ZeroizeOnDrop for KeyMachine {} impl KeyMachine { - /// Complete key generation. - /// Takes in everyone elses' shares submitted to us. Returns a ThresholdCore object representing - /// the generated keys. Successful protocol completion MUST be confirmed by all parties before - /// these keys may be safely used. - pub fn complete( + /// Calculate our share given the shares sent to us. + /// Returns a BlameMachine usable to determine if faults in the protocol occurred. + /// Will error on, and return a blame proof for, the first-observed case of faulty behavior. + pub fn calculate_share( mut self, rng: &mut R, - mut shares: HashMap>>, - ) -> Result, DkgError> { + mut shares: HashMap>>, + ) -> Result, FrostError> { validate_map(&shares, &(1 ..= self.params.n()).collect::>(), self.params.i())?; - // Calculate the exponent for a given participant and apply it to a series of commitments - // Initially used with the actual commitments to verify the secret share, later used with - // stripes to generate the verification shares - let exponential = |i: u16, values: &[_]| { - let i = C::F::from(i.into()); - let mut res = Vec::with_capacity(self.params.t().into()); - (0 .. usize::from(self.params.t())).into_iter().fold(C::F::one(), |exp, l| { - res.push((exp, values[l])); - exp * i - }); - res - }; - let mut batch = BatchVerifier::new(shares.len()); + let mut blames = HashMap::new(); for (l, share_bytes) in shares.drain() { - let mut share_bytes = self.encryption.decrypt(l, share_bytes); - let mut share = Zeroizing::new( - Option::::from(C::F::from_repr(share_bytes.0)).ok_or(DkgError::InvalidShare(l))?, - ); + let (mut share_bytes, blame) = + self.encryption.decrypt(rng, &mut batch, BatchId::Decryption(l), l, share_bytes); + let share = + Zeroizing::new(Option::::from(C::F::from_repr(share_bytes.0)).ok_or_else(|| { + FrostError::InvalidShare { participant: l, blame: Some(blame.clone()) } + })?); share_bytes.zeroize(); *self.secret += share.deref(); - // This can be insecurely linearized from n * t to just n using the below sums for a given - // stripe. Doing so uses naive addition which is subject to malleability. The only way to - // ensure that malleability isn't present is to use this n * t algorithm, which runs - // per sender and not as an aggregate of all senders, which also enables blame - let mut values = exponential(self.params.i, &self.commitments[&l]); - // multiexp will Zeroize this when it's done with it - values.push((-*share.deref(), C::generator())); - share.zeroize(); - - batch.queue(rng, l, values); + blames.insert(l, blame); + batch.queue( + rng, + BatchId::Share(l), + share_verification_statements::(self.params.i(), &self.commitments[&l], share), + ); } - batch.verify_with_vartime_blame().map_err(DkgError::InvalidShare)?; + batch.verify_with_vartime_blame().map_err(|id| { + let (l, blame) = match id { + BatchId::Decryption(l) => (l, None), + BatchId::Share(l) => (l, Some(blames.remove(&l).unwrap())), + }; + FrostError::InvalidShare { participant: l, blame } + })?; // Stripe commitments per t and sum them in advance. Calculating verification shares relies on // these sums so preprocessing them is a massive speedup @@ -352,16 +417,136 @@ impl KeyMachine { if i == self.params.i() { C::generator() * self.secret.deref() } else { - multiexp_vartime(&exponential(i, &stripes)) + multiexp_vartime(&exponential::(i, &stripes)) }, ); } - Ok(ThresholdCore { - params: self.params, - secret_share: self.secret.clone(), - group_key: stripes[0], - verification_shares, + let KeyMachine { commitments, encryption, params, secret } = self; + Ok(BlameMachine { + commitments, + encryption, + result: ThresholdCore { + params, + secret_share: secret, + group_key: stripes[0], + verification_shares, + }, }) } } + +pub struct BlameMachine { + commitments: HashMap>, + encryption: Encryption, + result: ThresholdCore, +} + +impl Zeroize for BlameMachine { + fn zeroize(&mut self) { + for (_, commitments) in self.commitments.iter_mut() { + commitments.zeroize(); + } + self.encryption.zeroize(); + self.result.zeroize(); + } +} + +impl BlameMachine { + /// Mark the protocol as having been successfully completed, returning the generated keys. + /// This should only be called after having confirmed, with all participants, successful + /// completion. + /// + /// Confirming successful completion is not necessarily as simple as everyone reporting their + /// completion. Everyone must also receive everyone's report of completion, entering into the + /// territory of consensus protocols. This library does not handle that nor does it provide any + /// tooling to do so. This function is solely intended to force users to acknowledge they're + /// completing the protocol, not processing any blame. + pub fn complete(self) -> ThresholdCore { + self.result + } + + fn blame_internal( + &self, + sender: u16, + recipient: u16, + msg: EncryptedMessage>, + proof: Option>, + ) -> u16 { + let share_bytes = match self.encryption.decrypt_with_proof(sender, recipient, msg, proof) { + Ok(share_bytes) => share_bytes, + // If there's an invalid signature, the sender did not send a properly formed message + Err(DecryptionError::InvalidSignature) => return sender, + // Decryption will fail if the provided ECDH key wasn't correct for the given message + Err(DecryptionError::InvalidProof) => return recipient, + }; + + let share = match Option::::from(C::F::from_repr(share_bytes.0)) { + Some(share) => share, + // If this isn't a valid scalar, the sender is faulty + None => return sender, + }; + + // If this isn't a valid share, the sender is faulty + if !bool::from( + multiexp_vartime(&share_verification_statements::( + recipient, + &self.commitments[&sender], + Zeroizing::new(share), + )) + .is_identity(), + ) { + return sender; + } + + // The share was canonical and valid + recipient + } + + /// Given an accusation of fault, determine the faulty party (either the sender, who sent an + /// invalid secret share, or the receiver, who claimed a valid secret share was invalid). No + /// matter which, prevent completion of the machine, forcing an abort of the protocol. + /// + /// The message should be a copy of the encrypted secret share from the accused sender to the + /// accusing recipient. This message must have been authenticated as actually having come from + /// the sender in question. + /// + /// In order to enable detecting multiple faults, an `AdditionalBlameMachine` is returned, which + /// can be used to determine further blame. These machines will process the same blame statements + /// multiple times, always identifying blame. It is the caller's job to ensure they're unique in + /// order to prevent multiple instances of blame over a single incident. + pub fn blame( + self, + sender: u16, + recipient: u16, + msg: EncryptedMessage>, + proof: Option>, + ) -> (AdditionalBlameMachine, u16) { + let faulty = self.blame_internal(sender, recipient, msg, proof); + (AdditionalBlameMachine(self), faulty) + } +} + +#[derive(Zeroize)] +pub struct AdditionalBlameMachine(BlameMachine); +impl AdditionalBlameMachine { + /// Given an accusation of fault, determine the faulty party (either the sender, who sent an + /// invalid secret share, or the receiver, who claimed a valid secret share was invalid). + /// + /// The message should be a copy of the encrypted secret share from the accused sender to the + /// accusing recipient. This message must have been authenticated as actually having come from + /// the sender in question. + /// + /// This will process the same blame statement multiple times, always identifying blame. It is + /// the caller's job to ensure they're unique in order to prevent multiple instances of blame + /// over a single incident. + pub fn blame( + self, + sender: u16, + recipient: u16, + msg: EncryptedMessage>, + proof: Option>, + ) -> u16 { + self.0.blame_internal(sender, recipient, msg, proof) + } +} diff --git a/crypto/dkg/src/lib.rs b/crypto/dkg/src/lib.rs index 2a8106d4..b858eec6 100644 --- a/crypto/dkg/src/lib.rs +++ b/crypto/dkg/src/lib.rs @@ -6,7 +6,10 @@ //! Additional utilities around them, such as promotion from one generator to another, are also //! provided. -use core::{fmt::Debug, ops::Deref}; +use core::{ + fmt::{Debug, Formatter}, + ops::Deref, +}; use std::{io::Read, sync::Arc, collections::HashMap}; use thiserror::Error; @@ -34,8 +37,8 @@ pub mod promote; pub mod tests; /// Various errors possible during key generation/signing. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] -pub enum DkgError { +#[derive(Clone, PartialEq, Eq, Debug, Error)] +pub enum DkgError { #[error("a parameter was 0 (required {0}, participants {1})")] ZeroParameter(u16, u16), #[error("invalid amount of required participants (max {1}, got {0})")] @@ -54,19 +57,19 @@ pub enum DkgError { #[error("invalid proof of knowledge (participant {0})")] InvalidProofOfKnowledge(u16), - #[error("invalid share (participant {0})")] - InvalidShare(u16), + #[error("invalid share (participant {participant}, blame {blame})")] + InvalidShare { participant: u16, blame: Option }, #[error("internal error ({0})")] InternalError(&'static str), } // Validate a map of values to have the expected included participants -pub(crate) fn validate_map( +pub(crate) fn validate_map( map: &HashMap, included: &[u16], ours: u16, -) -> Result<(), DkgError> { +) -> Result<(), DkgError> { if (map.len() + 1) != included.len() { Err(DkgError::InvalidParticipantQuantity(included.len(), map.len() + 1))?; } @@ -100,7 +103,7 @@ pub struct ThresholdParams { } impl ThresholdParams { - pub fn new(t: u16, n: u16, i: u16) -> Result { + pub fn new(t: u16, n: u16, i: u16) -> Result> { if (t == 0) || (n == 0) { Err(DkgError::ZeroParameter(t, n))?; } @@ -149,7 +152,7 @@ pub fn lagrange(i: u16, included: &[u16]) -> F { /// Keys and verification shares generated by a DKG. /// Called core as they're expected to be wrapped into an Arc before usage in various operations. -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, PartialEq, Eq)] pub struct ThresholdCore { /// Threshold Parameters. params: ThresholdParams, @@ -162,6 +165,17 @@ pub struct ThresholdCore { verification_shares: HashMap, } +impl Debug for ThresholdCore { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { + fmt + .debug_struct("ThresholdCore") + .field("params", &self.params) + .field("group_key", &self.group_key) + .field("verification_shares", &self.verification_shares) + .finish_non_exhaustive() + } +} + impl Zeroize for ThresholdCore { fn zeroize(&mut self) { self.params.zeroize(); @@ -179,8 +193,12 @@ impl ThresholdCore { secret_share: Zeroizing, verification_shares: HashMap, ) -> ThresholdCore { - #[cfg(debug_assertions)] - validate_map(&verification_shares, &(0 ..= params.n).collect::>(), 0).unwrap(); + debug_assert!(validate_map::<_, ()>( + &verification_shares, + &(0 ..= params.n).collect::>(), + 0 + ) + .is_ok()); let t = (1 ..= params.t).collect::>(); ThresholdCore { @@ -220,15 +238,15 @@ impl ThresholdCore { serialized } - pub fn deserialize(reader: &mut R) -> Result, DkgError> { + pub fn deserialize(reader: &mut R) -> Result, DkgError<()>> { { let missing = DkgError::InternalError("ThresholdCore serialization is missing its curve"); let different = DkgError::InternalError("deserializing ThresholdCore for another curve"); let mut id_len = [0; 4]; - reader.read_exact(&mut id_len).map_err(|_| missing)?; + reader.read_exact(&mut id_len).map_err(|_| missing.clone())?; if u32::try_from(C::ID.len()).unwrap().to_be_bytes() != id_len { - Err(different)?; + Err(different.clone())?; } let mut id = vec![0; C::ID.len()]; @@ -273,27 +291,42 @@ impl ThresholdCore { /// Threshold keys usable for signing. #[derive(Clone, Debug, Zeroize)] pub struct ThresholdKeys { - /// Core keys. + // Core keys. + // If this is the last reference, the underlying keys will be dropped. When that happens, the + // private key present within it will be zeroed out (as it's within Zeroizing). #[zeroize(skip)] core: Arc>, - /// Offset applied to these keys. + // Offset applied to these keys. pub(crate) offset: Option, } /// View of keys passed to algorithm implementations. -#[derive(Clone, Zeroize)] +#[derive(Clone)] pub struct ThresholdView { offset: C::F, group_key: C::G, included: Vec, secret_share: Zeroizing, - #[zeroize(skip)] original_verification_shares: HashMap, - #[zeroize(skip)] verification_shares: HashMap, } +impl Zeroize for ThresholdView { + fn zeroize(&mut self) { + self.offset.zeroize(); + self.group_key.zeroize(); + self.included.zeroize(); + self.secret_share.zeroize(); + for (_, share) in self.original_verification_shares.iter_mut() { + share.zeroize(); + } + for (_, share) in self.verification_shares.iter_mut() { + share.zeroize(); + } + } +} + impl ThresholdKeys { pub fn new(core: ThresholdCore) -> ThresholdKeys { ThresholdKeys { core: Arc::new(core), offset: None } @@ -338,7 +371,7 @@ impl ThresholdKeys { self.core.serialize() } - pub fn view(&self, included: &[u16]) -> Result, DkgError> { + pub fn view(&self, included: &[u16]) -> Result, DkgError<()>> { if (included.len() < self.params().t.into()) || (usize::from(self.params().n) < included.len()) { Err(DkgError::InvalidSigningSet)?; diff --git a/crypto/dkg/src/promote.rs b/crypto/dkg/src/promote.rs index ded73b16..6645cb04 100644 --- a/crypto/dkg/src/promote.rs +++ b/crypto/dkg/src/promote.rs @@ -44,13 +44,13 @@ pub struct GeneratorProof { impl GeneratorProof { pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.share.to_bytes().as_ref())?; - self.proof.serialize(writer) + self.proof.write(writer) } pub fn read(reader: &mut R) -> io::Result> { Ok(GeneratorProof { share: ::read_G(reader)?, - proof: DLEqProof::deserialize(reader)?, + proof: DLEqProof::read(reader)?, }) } @@ -98,7 +98,7 @@ where pub fn complete( self, proofs: &HashMap>, - ) -> Result, DkgError> { + ) -> Result, DkgError<()>> { let params = self.base.params(); validate_map(proofs, &(1 ..= params.n).collect::>(), params.i)?; diff --git a/crypto/dkg/src/tests/frost.rs b/crypto/dkg/src/tests/frost.rs index 6bbbb3e2..34579758 100644 --- a/crypto/dkg/src/tests/frost.rs +++ b/crypto/dkg/src/tests/frost.rs @@ -4,17 +4,23 @@ use rand_core::{RngCore, CryptoRng}; use crate::{ Ciphersuite, ThresholdParams, ThresholdCore, - frost::KeyGenMachine, + frost::{KeyGenMachine, SecretShare, KeyMachine}, encryption::{EncryptionKeyMessage, EncryptedMessage}, tests::{THRESHOLD, PARTICIPANTS, clone_without}, }; -/// Fully perform the FROST key generation algorithm. -pub fn frost_gen( +// Needed so rustfmt doesn't fail to format on line length issues +type FrostEncryptedMessage = EncryptedMessage::F>>; +type FrostSecretShares = HashMap>; + +// Commit, then return enc key and shares +#[allow(clippy::type_complexity)] +fn commit_enc_keys_and_shares( rng: &mut R, -) -> HashMap> { +) -> (HashMap>, HashMap, HashMap>) { let mut machines = HashMap::new(); let mut commitments = HashMap::new(); + let mut enc_keys = HashMap::new(); for i in 1 ..= PARTICIPANTS { let machine = KeyGenMachine::::new( ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(), @@ -31,10 +37,11 @@ pub fn frost_gen( ) .unwrap(), ); + enc_keys.insert(i, commitments[&i].enc_key()); } let mut secret_shares = HashMap::new(); - let mut machines = machines + let machines = machines .drain() .map(|(l, machine)| { let (machine, mut shares) = @@ -57,19 +64,36 @@ pub fn frost_gen( }) .collect::>(); + (machines, enc_keys, secret_shares) +} + +fn generate_secret_shares( + shares: &HashMap>, + recipient: u16, +) -> FrostSecretShares { + let mut our_secret_shares = HashMap::new(); + for (i, shares) in shares { + if recipient == *i { + continue; + } + our_secret_shares.insert(*i, shares[&recipient].clone()); + } + our_secret_shares +} + +/// Fully perform the FROST key generation algorithm. +pub fn frost_gen( + rng: &mut R, +) -> HashMap> { + let (mut machines, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng); + let mut verification_shares = None; let mut group_key = None; machines .drain() .map(|(i, machine)| { - let mut our_secret_shares = HashMap::new(); - for (l, shares) in &secret_shares { - if i == *l { - continue; - } - our_secret_shares.insert(*l, shares[&i].clone()); - } - let these_keys = machine.complete(rng, our_secret_shares).unwrap(); + let our_secret_shares = generate_secret_shares(&secret_shares, i); + let these_keys = machine.calculate_share(rng, our_secret_shares).unwrap().complete(); // Verify the verification_shares are agreed upon if verification_shares.is_none() { @@ -87,3 +111,188 @@ pub fn frost_gen( }) .collect::>() } + +#[cfg(test)] +mod literal { + use rand_core::OsRng; + + use ciphersuite::Ristretto; + + use crate::{DkgError, encryption::EncryptionKeyProof, frost::BlameMachine}; + + use super::*; + + fn test_blame( + machines: Vec>, + msg: FrostEncryptedMessage, + blame: Option>, + ) { + for machine in machines { + let (additional, blamed) = machine.blame(1, 2, msg.clone(), blame.clone()); + assert_eq!(blamed, 1); + // Verify additional blame also works + assert_eq!(additional.blame(1, 2, msg.clone(), blame.clone()), 1); + } + } + + // TODO: Write a macro which expands to the following + #[test] + fn invalid_encryption_pop_blame() { + let (mut machines, _, mut secret_shares) = + commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); + + // Mutate the PoP of the encrypted message from 1 to 2 + secret_shares.get_mut(&1).unwrap().get_mut(&2).unwrap().invalidate_pop(); + + let mut blame = None; + let machines = machines + .drain() + .filter_map(|(i, machine)| { + let our_secret_shares = generate_secret_shares(&secret_shares, i); + let machine = machine.calculate_share(&mut OsRng, our_secret_shares); + if i == 2 { + assert_eq!(machine.err(), Some(DkgError::InvalidShare { participant: 1, blame: None })); + // Explicitly declare we have a blame object, which happens to be None since invalid PoP + // is self-explainable + blame = Some(None); + None + } else { + Some(machine.unwrap()) + } + }) + .collect::>(); + + test_blame(machines, secret_shares[&1][&2].clone(), blame.unwrap()); + } + + #[test] + fn invalid_ecdh_blame() { + let (mut machines, _, mut secret_shares) = + commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); + + // Mutate the share to trigger a blame event + // Mutates from 2 to 1, as 1 is expected to end up malicious for test_blame to pass + // While here, 2 is malicious, this is so 1 creates the blame proof + // We then malleate 1's blame proof, so 1 ends up malicious + // Doesn't simply invalidate the PoP as that won't have a blame statement + // By mutating the encrypted data, we do ensure a blame statement is created + secret_shares.get_mut(&2).unwrap().get_mut(&1).unwrap().invalidate_msg(&mut OsRng, 2); + + let mut blame = None; + let machines = machines + .drain() + .filter_map(|(i, machine)| { + let our_secret_shares = generate_secret_shares(&secret_shares, i); + let machine = machine.calculate_share(&mut OsRng, our_secret_shares); + if i == 1 { + blame = Some(match machine.err() { + Some(DkgError::InvalidShare { participant: 2, blame: Some(blame) }) => Some(blame), + _ => panic!(), + }); + None + } else { + Some(machine.unwrap()) + } + }) + .collect::>(); + + blame.as_mut().unwrap().as_mut().unwrap().invalidate_key(); + test_blame(machines, secret_shares[&2][&1].clone(), blame.unwrap()); + } + + // This should be largely equivalent to the prior test + #[test] + fn invalid_dleq_blame() { + let (mut machines, _, mut secret_shares) = + commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); + + secret_shares.get_mut(&2).unwrap().get_mut(&1).unwrap().invalidate_msg(&mut OsRng, 2); + + let mut blame = None; + let machines = machines + .drain() + .filter_map(|(i, machine)| { + let our_secret_shares = generate_secret_shares(&secret_shares, i); + let machine = machine.calculate_share(&mut OsRng, our_secret_shares); + if i == 1 { + blame = Some(match machine.err() { + Some(DkgError::InvalidShare { participant: 2, blame: Some(blame) }) => Some(blame), + _ => panic!(), + }); + None + } else { + Some(machine.unwrap()) + } + }) + .collect::>(); + + blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq(); + test_blame(machines, secret_shares[&2][&1].clone(), blame.unwrap()); + } + + #[test] + fn invalid_share_serialization_blame() { + let (mut machines, enc_keys, mut secret_shares) = + commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); + + secret_shares.get_mut(&1).unwrap().get_mut(&2).unwrap().invalidate_share_serialization( + &mut OsRng, + b"FROST", + 1, + enc_keys[&2], + ); + + let mut blame = None; + let machines = machines + .drain() + .filter_map(|(i, machine)| { + let our_secret_shares = generate_secret_shares(&secret_shares, i); + let machine = machine.calculate_share(&mut OsRng, our_secret_shares); + if i == 2 { + blame = Some(match machine.err() { + Some(DkgError::InvalidShare { participant: 1, blame: Some(blame) }) => Some(blame), + _ => panic!(), + }); + None + } else { + Some(machine.unwrap()) + } + }) + .collect::>(); + + test_blame(machines, secret_shares[&1][&2].clone(), blame.unwrap()); + } + + #[test] + fn invalid_share_value_blame() { + let (mut machines, enc_keys, mut secret_shares) = + commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); + + secret_shares.get_mut(&1).unwrap().get_mut(&2).unwrap().invalidate_share_value( + &mut OsRng, + b"FROST", + 1, + enc_keys[&2], + ); + + let mut blame = None; + let machines = machines + .drain() + .filter_map(|(i, machine)| { + let our_secret_shares = generate_secret_shares(&secret_shares, i); + let machine = machine.calculate_share(&mut OsRng, our_secret_shares); + if i == 2 { + blame = Some(match machine.err() { + Some(DkgError::InvalidShare { participant: 1, blame: Some(blame) }) => Some(blame), + _ => panic!(), + }); + None + } else { + Some(machine.unwrap()) + } + }) + .collect::>(); + + test_blame(machines, secret_shares[&1][&2].clone(), blame.unwrap()); + } +} diff --git a/crypto/dleq/src/cross_group/aos.rs b/crypto/dleq/src/cross_group/aos.rs index 008b5f75..e59e8a2b 100644 --- a/crypto/dleq/src/cross_group/aos.rs +++ b/crypto/dleq/src/cross_group/aos.rs @@ -210,7 +210,7 @@ where } #[cfg(feature = "serialize")] - pub(crate) fn serialize(&self, w: &mut W) -> std::io::Result<()> { + pub(crate) fn write(&self, w: &mut W) -> std::io::Result<()> { #[allow(non_snake_case)] match self.Re_0 { Re::R(R0, R1) => { @@ -230,7 +230,7 @@ where #[allow(non_snake_case)] #[cfg(feature = "serialize")] - pub(crate) fn deserialize(r: &mut R, mut Re_0: Re) -> std::io::Result { + pub(crate) fn read(r: &mut R, mut Re_0: Re) -> std::io::Result { match Re_0 { Re::R(ref mut R0, ref mut R1) => { *R0 = read_point(r)?; diff --git a/crypto/dleq/src/cross_group/bits.rs b/crypto/dleq/src/cross_group/bits.rs index 4f14bda7..1995fad1 100644 --- a/crypto/dleq/src/cross_group/bits.rs +++ b/crypto/dleq/src/cross_group/bits.rs @@ -166,17 +166,17 @@ where } #[cfg(feature = "serialize")] - pub(crate) fn serialize(&self, w: &mut W) -> std::io::Result<()> { + pub(crate) fn write(&self, w: &mut W) -> std::io::Result<()> { w.write_all(self.commitments.0.to_bytes().as_ref())?; w.write_all(self.commitments.1.to_bytes().as_ref())?; - self.signature.serialize(w) + self.signature.write(w) } #[cfg(feature = "serialize")] - pub(crate) fn deserialize(r: &mut R) -> std::io::Result { + pub(crate) fn read(r: &mut R) -> std::io::Result { Ok(Bits { commitments: (read_point(r)?, read_point(r)?), - signature: Aos::deserialize(r, BitSignature::from(SIGNATURE).aos_form())?, + signature: Aos::read(r, BitSignature::from(SIGNATURE).aos_form())?, }) } } diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index b7ef05c2..e5095b45 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -367,36 +367,32 @@ where } #[cfg(feature = "serialize")] - pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { + pub fn write(&self, w: &mut W) -> std::io::Result<()> { for bit in &self.bits { - bit.serialize(w)?; + bit.write(w)?; } if let Some(bit) = &self.remainder { - bit.serialize(w)?; + bit.write(w)?; } - self.poks.0.serialize(w)?; - self.poks.1.serialize(w) + self.poks.0.write(w)?; + self.poks.1.write(w) } #[cfg(feature = "serialize")] - pub fn deserialize(r: &mut R) -> std::io::Result { + pub fn read(r: &mut R) -> std::io::Result { let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); let bits_per_group = BitSignature::from(SIGNATURE).bits(); let mut bits = Vec::with_capacity(capacity / bits_per_group); for _ in 0 .. (capacity / bits_per_group) { - bits.push(Bits::deserialize(r)?); + bits.push(Bits::read(r)?); } let mut remainder = None; if (capacity % bits_per_group) != 0 { - remainder = Some(Bits::deserialize(r)?); + remainder = Some(Bits::read(r)?); } - Ok(__DLEqProof { - bits, - remainder, - poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?), - }) + Ok(__DLEqProof { bits, remainder, poks: (SchnorrPoK::read(r)?, SchnorrPoK::read(r)?) }) } } diff --git a/crypto/dleq/src/cross_group/schnorr.rs b/crypto/dleq/src/cross_group/schnorr.rs index f6b79dc3..b1d61d5f 100644 --- a/crypto/dleq/src/cross_group/schnorr.rs +++ b/crypto/dleq/src/cross_group/schnorr.rs @@ -79,13 +79,13 @@ where } #[cfg(feature = "serialize")] - pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { + pub fn write(&self, w: &mut W) -> std::io::Result<()> { w.write_all(self.R.to_bytes().as_ref())?; w.write_all(self.s.to_repr().as_ref()) } #[cfg(feature = "serialize")] - pub fn deserialize(r: &mut R) -> std::io::Result> { + pub fn read(r: &mut R) -> std::io::Result> { Ok(SchnorrPoK { R: read_point(r)?, s: read_scalar(r)? }) } } diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs index 459fe4c9..5372a2d5 100644 --- a/crypto/dleq/src/lib.rs +++ b/crypto/dleq/src/lib.rs @@ -90,10 +90,12 @@ impl DLEqProof { transcript.domain_separate(b"dleq"); for generator in generators { + // R, A Self::transcript(transcript, *generator, *generator * r.deref(), *generator * scalar.deref()); } let c = challenge(transcript); + // r + ca let s = (c * scalar.deref()) + r.deref(); DLEqProof { c, s } @@ -111,6 +113,9 @@ impl DLEqProof { transcript.domain_separate(b"dleq"); for (generator, point) in generators.iter().zip(points) { + // s = r + ca + // sG - cA = R + // R, A Self::transcript(transcript, *generator, (*generator * self.s) - (*point * self.c), *point); } @@ -122,13 +127,20 @@ impl DLEqProof { } #[cfg(feature = "serialize")] - pub fn serialize(&self, w: &mut W) -> io::Result<()> { + pub fn write(&self, w: &mut W) -> io::Result<()> { w.write_all(self.c.to_repr().as_ref())?; w.write_all(self.s.to_repr().as_ref()) } #[cfg(feature = "serialize")] - pub fn deserialize(r: &mut R) -> io::Result> { + pub fn read(r: &mut R) -> io::Result> { Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? }) } + + #[cfg(feature = "serialize")] + pub fn serialize(&self) -> Vec { + let mut res = vec![]; + self.write(&mut res).unwrap(); + res + } } diff --git a/crypto/dleq/src/tests/cross_group/aos.rs b/crypto/dleq/src/tests/cross_group/aos.rs index 4fb43ca1..7971ba52 100644 --- a/crypto/dleq/src/tests/cross_group/aos.rs +++ b/crypto/dleq/src/tests/cross_group/aos.rs @@ -13,8 +13,8 @@ use crate::{ #[cfg(feature = "serialize")] fn test_aos_serialization(proof: Aos, Re_0: Re) { let mut buf = vec![]; - proof.serialize(&mut buf).unwrap(); - let deserialized = Aos::deserialize(&mut std::io::Cursor::new(buf), Re_0).unwrap(); + proof.write(&mut buf).unwrap(); + let deserialized = Aos::read::<&[u8]>(&mut buf.as_ref(), Re_0).unwrap(); assert_eq!(proof, deserialized); } diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index cabbe9ac..d0fa0fcb 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -60,8 +60,8 @@ macro_rules! verify_and_deserialize { #[cfg(feature = "serialize")] { let mut buf = vec![]; - $proof.serialize(&mut buf).unwrap(); - let deserialized = <$type>::deserialize(&mut std::io::Cursor::new(&buf)).unwrap(); + $proof.write(&mut buf).unwrap(); + let deserialized = <$type>::read::<&[u8]>(&mut buf.as_ref()).unwrap(); assert_eq!($proof, deserialized); } }; @@ -96,7 +96,7 @@ macro_rules! test_dleq { #[cfg(feature = "serialize")] { let mut buf = vec![]; - proofs[0].serialize(&mut buf).unwrap(); + proofs[0].write(&mut buf).unwrap(); println!("{} had a proof size of {} bytes", $str, buf.len()); } } diff --git a/crypto/dleq/src/tests/mod.rs b/crypto/dleq/src/tests/mod.rs index ac3ff11e..336c0a1b 100644 --- a/crypto/dleq/src/tests/mod.rs +++ b/crypto/dleq/src/tests/mod.rs @@ -88,9 +88,8 @@ fn test_dleq() { #[cfg(feature = "serialize")] { let mut buf = vec![]; - proof.serialize(&mut buf).unwrap(); - let deserialized = - DLEqProof::::deserialize(&mut std::io::Cursor::new(&buf)).unwrap(); + proof.write(&mut buf).unwrap(); + let deserialized = DLEqProof::::read::<&[u8]>(&mut buf.as_ref()).unwrap(); assert_eq!(proof, deserialized); } } diff --git a/crypto/frost/src/nonce.rs b/crypto/frost/src/nonce.rs index 2c009a46..9d8ac586 100644 --- a/crypto/frost/src/nonce.rs +++ b/crypto/frost/src/nonce.rs @@ -119,7 +119,7 @@ impl NonceCommitments { let mut dleqs = None; if generators.len() >= 2 { let mut verify = |i| -> io::Result<_> { - let dleq = DLEqProof::deserialize(reader)?; + let dleq = DLEqProof::read(reader)?; dleq .verify( &mut dleq_transcript::(context), @@ -140,8 +140,8 @@ impl NonceCommitments { generator.write(writer)?; } if let Some(dleqs) = &self.dleqs { - dleqs[0].serialize(writer)?; - dleqs[1].serialize(writer)?; + dleqs[0].write(writer)?; + dleqs[1].write(writer)?; } Ok(()) } @@ -184,7 +184,7 @@ impl Commitments { if let Some(dleqs) = &nonce.dleqs { let mut transcript_dleq = |label, dleq: &DLEqProof| { let mut buf = vec![]; - dleq.serialize(&mut buf).unwrap(); + dleq.write(&mut buf).unwrap(); t.append_message(label, &buf); }; transcript_dleq(b"dleq_D", &dleqs[0]); diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 1a409c7f..183ddd6b 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -7,11 +7,14 @@ use std::{ use rand_core::{RngCore, CryptoRng, SeedableRng}; use rand_chacha::ChaCha20Rng; -use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; +use zeroize::{Zeroize, Zeroizing}; use transcript::Transcript; -use group::{ff::PrimeField, GroupEncoding}; +use group::{ + ff::{Field, PrimeField}, + GroupEncoding, +}; use multiexp::BatchVerifier; use crate::{ @@ -46,6 +49,7 @@ impl Writable for Vec { /// Pairing of an Algorithm with a ThresholdKeys instance and this specific signing set. #[derive(Clone, Zeroize)] pub struct Params> { + // Skips the algorithm due to being too large a bound to feasibly enforce on users #[zeroize(skip)] algorithm: A, keys: ThresholdKeys, @@ -78,8 +82,11 @@ impl Writable for Preprocess { /// A cached preprocess. A preprocess MUST only be used once. Reuse will enable third-party /// recovery of your private key share. Additionally, this MUST be handled with the same security /// as your private key share, as knowledge of it also enables recovery. -#[derive(Zeroize, ZeroizeOnDrop)] -pub struct CachedPreprocess(pub [u8; 32]); +// Directly exposes the [u8; 32] member to void needing to route through std::io interfaces. +// Still uses Zeroizing internally so when users grab it, they have a higher likelihood of +// appreciating how to handle it and don't immediately start copying it just by grabbing it. +#[derive(Zeroize)] +pub struct CachedPreprocess(pub Zeroizing<[u8; 32]>); /// Trait for the initial state machine of a two-round signing protocol. pub trait PreprocessMachine { @@ -110,11 +117,11 @@ impl> AlgorithmMachine { fn seeded_preprocess( self, - seed: Zeroizing, + seed: CachedPreprocess, ) -> (AlgorithmSignMachine, Preprocess) { let mut params = self.params; - let mut rng = ChaCha20Rng::from_seed(seed.0); + let mut rng = ChaCha20Rng::from_seed(*seed.0); // Get a challenge to the existing transcript for use when proving for the commitments let commitments_challenge = params.algorithm.transcript().challenge(b"commitments"); let (nonces, commitments) = Commitments::new::<_, A::Transcript>( @@ -153,7 +160,7 @@ impl> AlgorithmMachine { commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"), params: self.params, - seed: Zeroizing::new(CachedPreprocess([0; 32])), + seed: CachedPreprocess(Zeroizing::new([0; 32])), nonces, preprocess, @@ -174,7 +181,7 @@ impl> PreprocessMachine for AlgorithmMachine { self, rng: &mut R, ) -> (Self::SignMachine, Preprocess) { - let mut seed = Zeroizing::new(CachedPreprocess([0; 32])); + let mut seed = CachedPreprocess(Zeroizing::new([0; 32])); rng.fill_bytes(seed.0.as_mut()); self.seeded_preprocess(seed) } @@ -188,6 +195,12 @@ impl Writable for SignatureShare { writer.write_all(self.0.to_repr().as_ref()) } } +#[cfg(any(test, feature = "tests"))] +impl SignatureShare { + pub(crate) fn invalidate(&mut self) { + self.0 += C::F::one(); + } +} /// Trait for the second machine of a two-round signing protocol. pub trait SignMachine: Sized { @@ -206,14 +219,14 @@ pub trait SignMachine: Sized { /// of it enables recovery of your private key share. Third-party recovery of a cached preprocess /// also enables recovery of your private key share, so this MUST be treated with the same /// security as your private key share. - fn cache(self) -> Zeroizing; + fn cache(self) -> CachedPreprocess; /// Create a sign machine from a cached preprocess. After this, the preprocess should be fully /// deleted, as it must never be reused. It is fn from_cache( params: Self::Params, keys: Self::Keys, - cache: Zeroizing, + cache: CachedPreprocess, ) -> Result; /// Read a Preprocess message. Despite taking self, this does not save the preprocess. @@ -235,10 +248,11 @@ pub trait SignMachine: Sized { #[derive(Zeroize)] pub struct AlgorithmSignMachine> { params: Params, - seed: Zeroizing, + seed: CachedPreprocess, commitments_challenge: ::Challenge, pub(crate) nonces: Vec>, + // Skips the preprocess due to being too large a bound to feasibly enforce on users #[zeroize(skip)] pub(crate) preprocess: Preprocess, pub(crate) blame_entropy: [u8; 32], @@ -251,14 +265,14 @@ impl> SignMachine for AlgorithmSignMachi type SignatureShare = SignatureShare; type SignatureMachine = AlgorithmSignatureMachine; - fn cache(self) -> Zeroizing { + fn cache(self) -> CachedPreprocess { self.seed } fn from_cache( algorithm: A, keys: ThresholdKeys, - cache: Zeroizing, + cache: CachedPreprocess, ) -> Result { let (machine, _) = AlgorithmMachine::new(algorithm, keys)?.seeded_preprocess(cache); Ok(machine) diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index 3e3cfd48..02dc0268 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -59,7 +59,9 @@ pub fn algorithm_machines>( .collect() } -fn sign_internal< +// Run the commit step and generate signature shares +#[allow(clippy::type_complexity)] +pub(crate) fn commit_and_shares< R: RngCore + CryptoRng, M: PreprocessMachine, F: FnMut(&mut R, &mut HashMap), @@ -68,7 +70,10 @@ fn sign_internal< mut machines: HashMap, mut cache: F, msg: &[u8], -) -> M::Signature { +) -> ( + HashMap>::SignatureMachine>, + HashMap>::SignatureShare>, +) { let mut commitments = HashMap::new(); let mut machines = machines .drain() @@ -86,7 +91,7 @@ fn sign_internal< cache(rng, &mut machines); let mut shares = HashMap::new(); - let mut machines = machines + let machines = machines .drain() .map(|(i, machine)| { let (machine, share) = machine.sign(clone_without(&commitments, &i), msg).unwrap(); @@ -99,6 +104,21 @@ fn sign_internal< }) .collect::>(); + (machines, shares) +} + +fn sign_internal< + R: RngCore + CryptoRng, + M: PreprocessMachine, + F: FnMut(&mut R, &mut HashMap), +>( + rng: &mut R, + machines: HashMap, + cache: F, + msg: &[u8], +) -> M::Signature { + let (mut machines, shares) = commit_and_shares(rng, machines, cache, msg); + let mut signature = None; for (i, machine) in machines.drain() { let sig = machine.complete(clone_without(&shares, &i)).unwrap(); diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 3f6223c8..cc17121d 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -13,13 +13,13 @@ use dkg::tests::key_gen; use crate::{ curve::Curve, - ThresholdCore, ThresholdKeys, + ThresholdCore, ThresholdKeys, FrostError, algorithm::{Schnorr, Hram}, sign::{ Nonce, GeneratorCommitments, NonceCommitments, Commitments, Writable, Preprocess, SignMachine, SignatureMachine, AlgorithmMachine, }, - tests::{clone_without, recover_key, algorithm_machines, sign}, + tests::{clone_without, recover_key, algorithm_machines, commit_and_shares, sign}, }; pub struct Vectors { @@ -127,6 +127,27 @@ pub fn test_with_vectors>( assert!(sig.verify(keys[&1].group_key(), H::hram(&sig.R, &keys[&1].group_key(), MSG))); } + // Test blame on an invalid Schnorr signature share + { + let keys = key_gen(&mut *rng); + let machines = algorithm_machines(&mut *rng, Schnorr::::new(), &keys); + const MSG: &[u8] = b"Hello, World!"; + + let (mut machines, mut shares) = commit_and_shares(&mut *rng, machines, |_, _| {}, MSG); + let faulty = *shares.keys().into_iter().next().unwrap(); + shares.get_mut(&faulty).unwrap().invalidate(); + + for (i, machine) in machines.drain() { + if i == faulty { + continue; + } + assert_eq!( + machine.complete(clone_without(&shares, &i)).err(), + Some(FrostError::InvalidShare(faulty)) + ); + } + } + // Test against the vectors let keys = vectors_to_multisig_keys::(&vectors); let group_key = diff --git a/crypto/transcript/src/merlin.rs b/crypto/transcript/src/merlin.rs index c42eee46..5fe7eb68 100644 --- a/crypto/transcript/src/merlin.rs +++ b/crypto/transcript/src/merlin.rs @@ -7,7 +7,7 @@ pub struct MerlinTranscript(pub merlin::Transcript); // Merlin doesn't implement Debug so provide a stub which won't panic impl Debug for MerlinTranscript { fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { - fmt.debug_struct("MerlinTranscript").finish() + fmt.debug_struct("MerlinTranscript").finish_non_exhaustive() } } diff --git a/docs/cryptography/Distributed Key Generation.md b/docs/cryptography/Distributed Key Generation.md index 6d6818a4..fae5ff90 100644 --- a/docs/cryptography/Distributed Key Generation.md +++ b/docs/cryptography/Distributed Key Generation.md @@ -9,7 +9,27 @@ This results in a two-round protocol. ### Encryption In order to protect the secret shares during communication, the `dkg` library -additionally sends an encryption key. These encryption keys are used in an ECDH -to derive a shared key. This key is then hashed to obtain two keys and IVs, one -for sending and one for receiving, with the given counterparty. Chacha20 is used -as the stream cipher. +establishes a public key for encryption at the start of a given protocol. +Every encrypted message (such as the secret shares) then includes a per-message +encryption key. These two keys are used in an Elliptic-curve Diffie-Hellman +handshake to derive a shared key. This shared key is then hashed to obtain a key +and IV for use in a ChaCha20 stream cipher instance, which is xor'd against a +message to encrypt it. + +### Blame + +Since each message has a distinct key attached, and accordingly a distinct +shared key, it's possible to reveal the shared key for a specific message +without revealing any other message's decryption keys. This is utilized when a +participant misbehaves. A participant who receives an invalid encrypted message +publishes its key, able to without concern for side effects, With the key +published, all participants can decrypt the message in order to decide blame. + +While key reuse by a participant is considered as them revealing the messages +themselves, and therefore out of scope, there is an attack where a malicious +adversary claims another participant's encryption key. They'll fail to encrypt +their message, and the recipient will issue a blame statement. This blame +statement, intended to reveal the malicious adversary, also reveals the message +by the participant whose keys were co-opted. To resolve this, a +proof-of-possession is also included with encrypted messages, ensuring only +those actually with per-message keys can claim to use them.