diff --git a/Cargo.lock b/Cargo.lock index edc46693..57e438de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4642,7 +4642,6 @@ dependencies = [ "dalek-ff-group", "digest 0.10.7", "dkg", - "dleq", "flexible-transcript", "hex", "minimal-ed448", @@ -4679,7 +4678,6 @@ dependencies = [ "curve25519-dalek", "dalek-ff-group", "digest_auth", - "dleq", "flexible-transcript", "group", "hex", diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index 9c78e431..357803c9 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -43,7 +43,6 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features = # Needed for multisig transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true } -dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true } monero-generators = { path = "generators", version = "0.4", default-features = false } @@ -91,7 +90,6 @@ std = [ "multiexp/std", "transcript/std", - "dleq/std", "monero-generators/std", @@ -106,7 +104,7 @@ std = [ cache-distribution = ["async-lock"] http-rpc = ["digest_auth", "simple-request", "tokio"] -multisig = ["transcript", "frost", "dleq", "std"] +multisig = ["transcript", "frost", "std"] binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"] experimental = [] diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 3fe65254..042d964a 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -27,8 +27,6 @@ use crate::{ mod multisig; #[cfg(feature = "multisig")] pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig}; -#[cfg(feature = "multisig")] -pub(crate) use multisig::add_key_image_share; /// Errors returned when CLSAG signing fails. #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -279,8 +277,10 @@ impl Clsag { nonce.deref() * hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]), ); - clsag.s[usize::from(inputs[i].2.decoys.i)] = - (-((p * inputs[i].0.deref()) + c)) + nonce.deref(); + // Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring + // member's commitment and our input commitment (which will only have a known discrete log + // over G if the amounts cancel out) + clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce.deref() - ((p * inputs[i].0.deref()) + c); inputs[i].0.zeroize(); nonce.zeroize(); diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 85748b78..e9234979 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -1,5 +1,8 @@ use core::{ops::Deref, fmt::Debug}; -use std_shims::io::{self, Read, Write}; +use std_shims::{ + io::{self, Read, Write}, + collections::HashMap, +}; use std::sync::{Arc, RwLock}; use rand_core::{RngCore, CryptoRng, SeedableRng}; @@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint}; -use group::{ff::Field, Group, GroupEncoding}; +use group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, +}; use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group as dfg; -use dleq::DLEqProof; use frost::{ dkg::lagrange, curve::Ed25519, @@ -26,10 +31,6 @@ use crate::ringct::{ clsag::{ClsagInput, Clsag}, }; -fn dleq_transcript() -> RecommendedTranscript { - RecommendedTranscript::new(b"monero_key_image_dleq") -} - impl ClsagInput { fn transcript(&self, transcript: &mut T) { // Doesn't domain separate as this is considered part of the larger CLSAG proof @@ -43,6 +44,7 @@ impl ClsagInput { // They're just a unreliable reference to this data which will be included in the message // if in use transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]); + // This also transcripts the key image generator since it's derived from this key transcript.append_message(b"key", pair[0].compress().to_bytes()); transcript.append_message(b"commitment", pair[1].compress().to_bytes()) } @@ -70,13 +72,11 @@ impl ClsagDetails { #[derive(Clone, PartialEq, Eq, Zeroize, Debug)] pub struct ClsagAddendum { pub(crate) key_image: dfg::EdwardsPoint, - dleq: DLEqProof, } impl WriteAddendum for ClsagAddendum { fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(self.key_image.compress().to_bytes().as_ref())?; - self.dleq.write(writer) + writer.write_all(self.key_image.compress().to_bytes().as_ref()) } } @@ -97,9 +97,8 @@ pub struct ClsagMultisig { transcript: RecommendedTranscript, pub(crate) H: EdwardsPoint, - // Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires - // an extra round - image: EdwardsPoint, + key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>, + image: Option, details: Arc>>, @@ -117,7 +116,8 @@ impl ClsagMultisig { transcript, H: hash_to_point(&output_key), - image: EdwardsPoint::identity(), + key_image_shares: HashMap::new(), + image: None, details, @@ -135,20 +135,6 @@ impl ClsagMultisig { } } -pub(crate) fn add_key_image_share( - image: &mut EdwardsPoint, - generator: EdwardsPoint, - offset: Scalar, - included: &[Participant], - participant: Participant, - share: EdwardsPoint, -) { - if image.is_identity().into() { - *image = generator * offset; - } - *image += share * lagrange::(participant, included).0; -} - impl Algorithm for ClsagMultisig { type Transcript = RecommendedTranscript; type Addendum = ClsagAddendum; @@ -160,23 +146,10 @@ impl Algorithm for ClsagMultisig { fn preprocess_addendum( &mut self, - rng: &mut R, + _rng: &mut R, keys: &ThresholdKeys, ) -> ClsagAddendum { - ClsagAddendum { - key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(), - dleq: DLEqProof::prove( - rng, - // Doesn't take in a larger transcript object due to the usage of this - // Every prover would immediately write their own DLEq proof, when they can only do so in - // the proper order if they want to reach consensus - // It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to - // try to merge later in some form, when it should instead just merge xH (as it does) - &mut dleq_transcript(), - &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)], - keys.secret_share(), - ), - } + ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() } } fn read_addendum(&self, reader: &mut R) -> io::Result { @@ -190,7 +163,7 @@ impl Algorithm for ClsagMultisig { Err(io::Error::other("non-canonical key image"))?; } - Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::::read(reader)? }) + Ok(ClsagAddendum { key_image: xH }) } fn process_addendum( @@ -199,33 +172,29 @@ impl Algorithm for ClsagMultisig { l: Participant, addendum: ClsagAddendum, ) -> Result<(), FrostError> { - // TODO: This check is faulty if two shares are additive inverses of each other - if self.image.is_identity().into() { + if self.image.is_none() { self.transcript.domain_separate(b"CLSAG"); + // Transcript the ring self.input().transcript(&mut self.transcript); + // Transcript the mask self.transcript.append_message(b"mask", self.mask().to_bytes()); + + // Init the image to the offset + self.image = Some(dfg::EdwardsPoint(self.H) * view.offset()); } + // Transcript this participant's contribution self.transcript.append_message(b"participant", l.to_bytes()); - - addendum - .dleq - .verify( - &mut dleq_transcript(), - &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)], - &[view.original_verification_share(l), addendum.key_image], - ) - .map_err(|_| FrostError::InvalidPreprocess(l))?; - self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes()); - add_key_image_share( - &mut self.image, - self.H, - view.offset().0, - view.included(), - l, - addendum.key_image.0, - ); + + // Accumulate the interpolated share + let interpolated_key_image_share = + addendum.key_image * lagrange::(l, view.included()); + *self.image.as_mut().unwrap() += interpolated_key_image_share; + + self + .key_image_shares + .insert(view.verification_share(l).to_bytes(), interpolated_key_image_share); Ok(()) } @@ -253,7 +222,7 @@ impl Algorithm for ClsagMultisig { #[allow(non_snake_case)] let (clsag, pseudo_out, p, c) = Clsag::sign_core( &mut rng, - &self.image, + &self.image.expect("verifying a share despite never processing any addendums").0, &self.input(), self.mask(), self.msg.as_ref().unwrap(), @@ -262,7 +231,8 @@ impl Algorithm for ClsagMultisig { ); self.interim = Some(Interim { p, c, clsag, pseudo_out }); - (-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref() + // r - p x, where p is the challenge for the keys + *nonces[0] - dfg::Scalar(p) * view.secret_share().deref() } #[must_use] @@ -274,11 +244,13 @@ impl Algorithm for ClsagMultisig { ) -> Option { let interim = self.interim.as_ref().unwrap(); let mut clsag = interim.clsag.clone(); + // We produced shares as `r - p x`, yet the signature is `r - p x - c x` + // Substract `c x` (saved as `c`) now clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c; if clsag .verify( &self.input().decoys.ring, - &self.image, + &self.image.expect("verifying a signature despite never processing any addendums").0, &interim.pseudo_out, self.msg.as_ref().unwrap(), ) @@ -296,10 +268,61 @@ impl Algorithm for ClsagMultisig { share: dfg::Scalar, ) -> Result, ()> { let interim = self.interim.as_ref().unwrap(); - Ok(vec![ + + // For a share `r - p x`, the following two equalities should hold: + // - `(r - p x)G == R.0 - pV`, where `V = xG` + // - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share) + // + // This is effectively a discrete log equality proof for: + // V, K over G, H + // with nonces + // R.0, R.1 + // and solution + // s + // + // Which is a batch-verifiable rewrite of the traditional CP93 proof + // (and also writable as Generalized Schnorr Protocol) + // + // That means that given a proper challenge, this alone can be certainly argued to prove the + // key image share is well-formed and the provided signature so proves for that. + + // This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of + // the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically + // is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be + // extracted, and the nonces as used in CLSAG are also part of its prover data/transcript). + + let key_image_share = self.key_image_shares[&verification_share.to_bytes()]; + + // Hash every variable relevant here, using the hahs output as the random weight + let mut weight_transcript = + RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share"); + weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes()); + weight_transcript.append_message(b"H", self.H.to_bytes()); + weight_transcript.append_message(b"xG", verification_share.to_bytes()); + weight_transcript.append_message(b"xH", key_image_share.to_bytes()); + weight_transcript.append_message(b"rG", nonces[0][0].to_bytes()); + weight_transcript.append_message(b"rH", nonces[0][1].to_bytes()); + weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr()); + weight_transcript.append_message(b"s", share.to_repr()); + let weight = weight_transcript.challenge(b"weight"); + let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into())); + + let part_one = vec![ (share, dfg::EdwardsPoint::generator()), - (dfg::Scalar(interim.p), verification_share), + // -(R.0 - pV) == -R.0 + pV (-dfg::Scalar::ONE, nonces[0][0]), - ]) + (dfg::Scalar(interim.p), verification_share), + ]; + + let mut part_two = vec![ + (weight * share, dfg::EdwardsPoint(self.H)), + // -(R.1 - pK) == -R.1 + pK + (-weight, nonces[0][1]), + (weight * dfg::Scalar(interim.p), key_image_share), + ]; + + let mut all = part_one; + all.append(&mut part_two); + Ok(all) } } diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 02626e6a..a5be404a 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript}; use frost::{ curve::Ed25519, Participant, FrostError, ThresholdKeys, + dkg::lagrange, sign::{ Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, @@ -27,7 +28,7 @@ use frost::{ use crate::{ random_scalar, ringct::{ - clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share}, + clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig}, RctPrunable, }, transaction::{Input, Transaction}, @@ -261,8 +262,13 @@ impl SignMachine for TransactionSignMachine { included.push(self.i); included.sort_unstable(); - // Convert the unified commitments to a Vec of the individual commitments + // Start calculating the key images, as needed on the TX level let mut images = vec![EdwardsPoint::identity(); self.clsags.len()]; + for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) { + *image = generator * offset; + } + + // Convert the serialized nonces commitments to a parallelized Vec let mut commitments = (0 .. self.clsags.len()) .map(|c| { included @@ -291,14 +297,7 @@ impl SignMachine for TransactionSignMachine { // provides the easiest API overall, as this is where the TX is (which needs the key // images in its message), along with where the outputs are determined (where our // outputs may need these in order to guarantee uniqueness) - add_key_image_share( - &mut images[c], - self.key_images[c].0, - self.key_images[c].1, - &included, - *l, - preprocess.addendum.key_image.0, - ); + images[c] += preprocess.addendum.key_image.0 * lagrange::(*l, &included).0; Ok((*l, preprocess)) }) diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 128a3667..b89d5290 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -38,7 +38,6 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] } -dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] } dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] } diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index f2da59ea..0b0abd6c 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -39,6 +39,13 @@ pub trait Algorithm: Send + Sync + Clone { /// Obtain the list of nonces to generate, as specified by the generators to create commitments /// against per-nonce. + /// + /// The Algorithm is responsible for all transcripting of these nonce specifications/generators. + /// + /// The prover will be passed the commitments, and the commitments will be sent to all other + /// participants. No guarantees the commitments are internally consistent (have the same discrete + /// logarithm across generators) are made. Any Algorithm which specifies multiple generators for + /// a single nonce must handle that itself. fn nonces(&self) -> Vec>; /// Generate an addendum to FROST"s preprocessing stage. diff --git a/crypto/frost/src/nonce.rs b/crypto/frost/src/nonce.rs index 8638baff..f76f9bc4 100644 --- a/crypto/frost/src/nonce.rs +++ b/crypto/frost/src/nonce.rs @@ -1,13 +1,9 @@ // FROST defines its nonce as sum(Di, Ei * bi) -// Monero needs not just the nonce over G however, yet also over H -// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once // -// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount -// of nonces, each against an arbitrary list of generators +// In order for this library to be robust, it supports generating an arbitrary amount of nonces, +// each against an arbitrary list of generators // // Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b) -// When representations across multiple generators are provided, a DLEq proof is also provided to -// confirm their integrity use core::ops::Deref; use std::{ @@ -24,32 +20,8 @@ use transcript::Transcript; use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding}; use multiexp::multiexp_vartime; -use dleq::MultiDLEqProof; - use crate::{curve::Curve, Participant}; -// Transcript used to aggregate binomial nonces for usage within a single DLEq proof. -fn aggregation_transcript(context: &[u8]) -> T { - let mut transcript = T::new(b"FROST DLEq Aggregation v0.5"); - transcript.append_message(b"context", context); - transcript -} - -// Every participant proves for their commitments at the start of the protocol -// These proofs are verified sequentially, requiring independent transcripts -// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is -// challenged in order to create a commitment to it, carried in each independent transcript -// (effectively forking the original transcript) -// -// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be -// constructed). For higher level protocols, the transcript may have contextual info these proofs -// will then be bound to -fn dleq_transcript(context: &[u8]) -> T { - let mut transcript = T::new(b"FROST Commitments DLEq v0.5"); - transcript.append_message(b"context", context); - transcript -} - // Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper // This is considered a single nonce as r = d + be #[derive(Clone, Zeroize)] @@ -69,7 +41,7 @@ impl GeneratorCommitments { } } -// A single nonce's commitments and relevant proofs +// A single nonce's commitments #[derive(Clone, PartialEq, Eq)] pub(crate) struct NonceCommitments { // Called generators as these commitments are indexed by generator later on @@ -121,12 +93,6 @@ impl NonceCommitments { t.append_message(b"commitment_E", commitments.0[1].to_bytes()); } } - - fn aggregation_factor(&self, context: &[u8]) -> C::F { - let mut transcript = aggregation_transcript::(context); - self.transcript(&mut transcript); - ::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref()) - } } /// Commitments for all the nonces across all their generators. @@ -135,51 +101,26 @@ pub(crate) struct Commitments { // Called nonces as these commitments are indexed by nonce // So to get the commitments for the first nonce, it'd be commitments.nonces[0] pub(crate) nonces: Vec>, - // DLEq Proof proving that each set of commitments were generated using a single pair of discrete - // logarithms - pub(crate) dleq: Option>, } impl Commitments { - pub(crate) fn new( + pub(crate) fn new( rng: &mut R, secret_share: &Zeroizing, planned_nonces: &[Vec], - context: &[u8], ) -> (Vec>, Commitments) { let mut nonces = vec![]; let mut commitments = vec![]; - let mut dleq_generators = vec![]; - let mut dleq_nonces = vec![]; for generators in planned_nonces { let (nonce, these_commitments): (Nonce, _) = NonceCommitments::new(&mut *rng, secret_share, generators); - if generators.len() > 1 { - dleq_generators.push(generators.clone()); - dleq_nonces.push(Zeroizing::new( - (these_commitments.aggregation_factor::(context) * nonce.0[1].deref()) + - nonce.0[0].deref(), - )); - } - nonces.push(nonce); commitments.push(these_commitments); } - let dleq = if !dleq_generators.is_empty() { - Some(MultiDLEqProof::prove( - rng, - &mut dleq_transcript::(context), - &dleq_generators, - &dleq_nonces, - )) - } else { - None - }; - - (nonces, Commitments { nonces: commitments, dleq }) + (nonces, Commitments { nonces: commitments }) } pub(crate) fn transcript(&self, t: &mut T) { @@ -187,58 +128,20 @@ impl Commitments { for nonce in &self.nonces { nonce.transcript(t); } - - // Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in - // an exact order - // This means it shouldn't be possible for variadic generators to cause conflicts - if let Some(dleq) = &self.dleq { - t.append_message(b"dleq", dleq.serialize()); - } } - pub(crate) fn read( - reader: &mut R, - generators: &[Vec], - context: &[u8], - ) -> io::Result { + pub(crate) fn read(reader: &mut R, generators: &[Vec]) -> io::Result { let nonces = (0 .. generators.len()) .map(|i| NonceCommitments::read(reader, &generators[i])) .collect::>, _>>()?; - let mut dleq_generators = vec![]; - let mut dleq_nonces = vec![]; - for (generators, nonce) in generators.iter().cloned().zip(&nonces) { - if generators.len() > 1 { - let binding = nonce.aggregation_factor::(context); - let mut aggregated = vec![]; - for commitments in &nonce.generators { - aggregated.push(commitments.0[0] + (commitments.0[1] * binding)); - } - dleq_generators.push(generators); - dleq_nonces.push(aggregated); - } - } - - let dleq = if !dleq_generators.is_empty() { - let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?; - dleq - .verify(&mut dleq_transcript::(context), &dleq_generators, &dleq_nonces) - .map_err(|_| io::Error::other("invalid DLEq proof"))?; - Some(dleq) - } else { - None - }; - - Ok(Commitments { nonces, dleq }) + Ok(Commitments { nonces }) } pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { for nonce in &self.nonces { nonce.write(writer)?; } - if let Some(dleq) = &self.dleq { - dleq.write(writer)?; - } Ok(()) } } diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index a716dc58..73ea0a7d 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -125,14 +125,8 @@ impl> AlgorithmMachine { let mut params = self.params; let mut rng = ChaCha20Rng::from_seed(*seed.0); - // Get a challenge to the existing transcript for use when proving for the commitments - let commitments_challenge = params.algorithm.transcript().challenge(b"commitments"); - let (nonces, commitments) = Commitments::new::<_, A::Transcript>( - &mut rng, - params.keys.secret_share(), - ¶ms.algorithm.nonces(), - commitments_challenge.as_ref(), - ); + let (nonces, commitments) = + Commitments::new::<_>(&mut rng, params.keys.secret_share(), ¶ms.algorithm.nonces()); let addendum = params.algorithm.preprocess_addendum(&mut rng, ¶ms.keys); let preprocess = Preprocess { commitments, addendum }; @@ -141,27 +135,18 @@ impl> AlgorithmMachine { let mut blame_entropy = [0; 32]; rng.fill_bytes(&mut blame_entropy); ( - AlgorithmSignMachine { - params, - seed, - commitments_challenge, - nonces, - preprocess: preprocess.clone(), - blame_entropy, - }, + AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy }, preprocess, ) } #[cfg(any(test, feature = "tests"))] pub(crate) fn unsafe_override_preprocess( - mut self, + self, nonces: Vec>, preprocess: Preprocess, ) -> AlgorithmSignMachine { AlgorithmSignMachine { - commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"), - params: self.params, seed: CachedPreprocess(Zeroizing::new([0; 32])), @@ -255,8 +240,6 @@ pub struct AlgorithmSignMachine> { params: Params, seed: CachedPreprocess, - #[zeroize(skip)] - commitments_challenge: ::Challenge, pub(crate) nonces: Vec>, // Skips the preprocess due to being too large a bound to feasibly enforce on users #[zeroize(skip)] @@ -285,11 +268,7 @@ impl> SignMachine for AlgorithmSignMachi fn read_preprocess(&self, reader: &mut R) -> io::Result { Ok(Preprocess { - commitments: Commitments::read::<_, A::Transcript>( - reader, - &self.params.algorithm.nonces(), - self.commitments_challenge.as_ref(), - )?, + commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?, addendum: self.params.algorithm.read_addendum(reader)?, }) } diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index e457c703..f93a5fbf 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -12,7 +12,7 @@ use crate::{ /// Tests for the nonce handling code. pub mod nonces; -use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof}; +use nonces::test_multi_nonce; /// Vectorized test suite to ensure consistency. pub mod vectors; @@ -267,6 +267,4 @@ pub fn test_ciphersuite>(rng: &mut test_schnorr_blame::(rng); test_multi_nonce::(rng); - test_invalid_commitment::(rng); - test_invalid_dleq_proof::(rng); } diff --git a/crypto/frost/src/tests/nonces.rs b/crypto/frost/src/tests/nonces.rs index ee060bef..7b1480e9 100644 --- a/crypto/frost/src/tests/nonces.rs +++ b/crypto/frost/src/tests/nonces.rs @@ -9,14 +9,12 @@ use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::{ff::Field, Group, GroupEncoding}; -use dleq::MultiDLEqProof; pub use dkg::tests::{key_gen, recover_key}; use crate::{ Curve, Participant, ThresholdView, ThresholdKeys, FrostError, algorithm::Algorithm, - sign::{Writable, SignMachine}, - tests::{algorithm_machines, preprocess, sign}, + tests::{algorithm_machines, sign}, }; #[derive(Clone)] @@ -157,75 +155,3 @@ pub fn test_multi_nonce(rng: &mut R) { let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); sign(&mut *rng, &MultiNonce::::new(), keys.clone(), machines, &[]); } - -/// Test malleating a commitment for a nonce across generators causes the preprocess to error. -pub fn test_invalid_commitment(rng: &mut R) { - let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); - let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); - - // Select a random participant to give an invalid commitment - let participants = preprocesses.keys().collect::>(); - let faulty = *participants - [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; - - // Grab their preprocess - let mut preprocess = preprocesses.remove(&faulty).unwrap(); - - // Mutate one of the commitments - let nonce = - preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap(); - let generators_len = nonce.generators.len(); - nonce.generators[usize::try_from(rng.next_u64()).unwrap() % generators_len].0 - [usize::try_from(rng.next_u64()).unwrap() % 2] = C::G::random(&mut *rng); - - // The commitments are validated at time of deserialization (read_preprocess) - // Accordingly, serialize it and read it again to make sure that errors - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); -} - -/// Test malleating the DLEq proof for a preprocess causes it to error. -pub fn test_invalid_dleq_proof(rng: &mut R) { - let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); - let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); - - // Select a random participant to give an invalid DLEq proof - let participants = preprocesses.keys().collect::>(); - let faulty = *participants - [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; - - // Invalidate it by replacing it with a completely different proof - let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))]; - let mut preprocess = preprocesses.remove(&faulty).unwrap(); - preprocess.commitments.dleq = Some(MultiDLEqProof::prove( - &mut *rng, - &mut RecommendedTranscript::new(b"Invalid DLEq Proof"), - &nonces::(), - &dlogs, - )); - - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); - - // Also test None for a proof will cause an error - preprocess.commitments.dleq = None; - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); -} diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 3356a6cd..7be6478a 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -14,7 +14,7 @@ use ciphersuite::group::{ff::PrimeField, GroupEncoding}; use crate::{ curve::Curve, Participant, ThresholdCore, ThresholdKeys, - algorithm::{IetfTranscript, Hram, IetfSchnorr}, + algorithm::{Hram, IetfSchnorr}, sign::{ Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, @@ -191,7 +191,6 @@ pub fn test_with_vectors>( nonces: vec![NonceCommitments { generators: vec![GeneratorCommitments(these_commitments)], }], - dleq: None, }, addendum: (), }; @@ -301,12 +300,8 @@ pub fn test_with_vectors>( } // Also test it at the Commitments level - let (generated_nonces, commitments) = Commitments::::new::<_, IetfTranscript>( - &mut TransparentRng(randomness), - &share, - &[vec![C::generator()]], - &[], - ); + let (generated_nonces, commitments) = + Commitments::::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]); assert_eq!(generated_nonces.len(), 1); assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]);