Merge branch 'develop' into tendermint

This commit is contained in:
Luke Parker 2022-10-29 05:10:21 -04:00
commit aec36377c0
No known key found for this signature in database
GPG key ID: F9F1386DB1E119B6
60 changed files with 2892 additions and 2145 deletions

121
Cargo.lock generated
View file

@ -43,7 +43,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8"
dependencies = [
"cfg-if",
"cipher",
"cipher 0.3.0",
"cpufeatures",
"opaque-debug 0.3.0",
]
@ -56,7 +56,7 @@ checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6"
dependencies = [
"aead",
"aes",
"cipher",
"cipher 0.3.0",
"ctr",
"ghash",
"subtle",
@ -818,11 +818,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6"
dependencies = [
"cfg-if",
"cipher",
"cipher 0.3.0",
"cpufeatures",
"zeroize",
]
[[package]]
name = "chacha20"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fc89c7c5b9e7a02dfe45cd2367bae382f9ed31c61ca8debe5f827c420a2f08"
dependencies = [
"cfg-if",
"cipher 0.4.3",
"cpufeatures",
]
[[package]]
name = "chacha20poly1305"
version = "0.9.1"
@ -830,8 +841,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5"
dependencies = [
"aead",
"chacha20",
"cipher",
"chacha20 0.8.2",
"cipher 0.3.0",
"poly1305",
"zeroize",
]
@ -873,6 +884,36 @@ dependencies = [
"generic-array 0.14.6",
]
[[package]]
name = "cipher"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e"
dependencies = [
"crypto-common",
"inout",
"zeroize",
]
[[package]]
name = "ciphersuite"
version = "0.1.1"
dependencies = [
"dalek-ff-group",
"digest 0.10.5",
"elliptic-curve",
"ff",
"group",
"k256",
"minimal-ed448",
"p256",
"rand_core 0.6.4",
"sha2 0.10.6",
"sha3",
"subtle",
"zeroize",
]
[[package]]
name = "clang-sys"
version = "1.4.0"
@ -1361,7 +1402,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea"
dependencies = [
"cipher",
"cipher 0.3.0",
]
[[package]]
@ -1612,9 +1653,29 @@ dependencies = [
"winapi",
]
[[package]]
name = "dkg"
version = "0.1.0"
dependencies = [
"chacha20 0.9.0",
"ciphersuite",
"digest 0.10.5",
"dleq",
"flexible-transcript",
"group",
"hex",
"hkdf",
"multiexp",
"rand_core 0.6.4",
"schnorr-signatures",
"subtle",
"thiserror",
"zeroize",
]
[[package]]
name = "dleq"
version = "0.1.1"
version = "0.1.2"
dependencies = [
"blake2",
"dalek-ff-group",
@ -2975,6 +3036,15 @@ version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0"
[[package]]
name = "hkdf"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437"
dependencies = [
"hmac 0.12.1",
]
[[package]]
name = "hmac"
version = "0.8.1"
@ -3454,6 +3524,15 @@ dependencies = [
"synstructure",
]
[[package]]
name = "inout"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
dependencies = [
"generic-array 0.14.6",
]
[[package]]
name = "instant"
version = "0.1.12"
@ -4492,23 +4571,23 @@ dependencies = [
[[package]]
name = "modular-frost"
version = "0.2.4"
version = "0.3.0"
dependencies = [
"chacha20 0.9.0",
"ciphersuite",
"dalek-ff-group",
"digest 0.10.5",
"dkg",
"dleq",
"elliptic-curve",
"ff",
"flexible-transcript",
"group",
"hex",
"k256",
"hkdf",
"minimal-ed448",
"multiexp",
"p256",
"rand_core 0.6.4",
"schnorr-signatures",
"serde_json",
"sha2 0.10.6",
"sha3",
"subtle",
"thiserror",
"zeroize",
@ -6389,7 +6468,7 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c0fbb5f676da676c260ba276a8f43a8dc67cf02d1438423aeb1c677a7212686"
dependencies = [
"cipher",
"cipher 0.3.0",
]
[[package]]
@ -7260,6 +7339,18 @@ dependencies = [
"windows-sys 0.36.1",
]
[[package]]
name = "schnorr-signatures"
version = "0.1.0"
dependencies = [
"ciphersuite",
"dalek-ff-group",
"group",
"multiexp",
"rand_core 0.6.4",
"zeroize",
]
[[package]]
name = "schnorrkel"
version = "0.9.1"

View file

@ -4,10 +4,13 @@ members = [
"crypto/dalek-ff-group",
"crypto/ed448",
"crypto/ciphersuite",
"crypto/multiexp",
"crypto/schnorr",
"crypto/dleq",
"crypto/dkg",
"crypto/frost",
"coins/ethereum",

View file

@ -24,7 +24,7 @@ sha3 = "0.10"
group = "0.12"
k256 = { version = "0.11", features = ["arithmetic", "keccak256", "ecdsa"] }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1"] }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
eyre = "0.6"

View file

@ -34,7 +34,7 @@ dalek-ff-group = { path = "../../crypto/dalek-ff-group", version = "0.1" }
multiexp = { path = "../../crypto/multiexp", version = "0.2", features = ["batch"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.1", features = ["recommended"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.2", features = ["ed25519"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.3", features = ["ed25519"], optional = true }
dleq = { path = "../../crypto/dleq", version = "0.1", features = ["serialize"], optional = true }
monero-generators = { path = "generators", version = "0.1" }
@ -55,7 +55,7 @@ monero-generators = { path = "generators", version = "0.1" }
[dev-dependencies]
tokio = { version = "1", features = ["full"] }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.2", features = ["ed25519", "tests"] }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.3", features = ["ed25519", "tests"] }
[features]
multisig = ["rand_chacha", "blake2", "transcript", "frost", "dleq"]

View file

@ -1,73 +0,0 @@
use std::io::Read;
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
use group::{Group, GroupEncoding};
use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group as dfg;
use dleq::DLEqProof;
#[derive(Clone, Error, Debug)]
pub(crate) enum MultisigError {
#[error("invalid discrete log equality proof")]
InvalidDLEqProof(u16),
}
fn transcript() -> RecommendedTranscript {
RecommendedTranscript::new(b"monero_key_image_dleq")
}
#[allow(non_snake_case)]
pub(crate) fn write_dleq<R: RngCore + CryptoRng>(
rng: &mut R,
H: EdwardsPoint,
mut x: Scalar,
) -> Vec<u8> {
let mut res = Vec::with_capacity(64);
DLEqProof::prove(
rng,
// Doesn't take in a larger transcript object due to the usage of this
// Every prover would immediately write their own DLEq proof, when they can only do so in
// the proper order if they want to reach consensus
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to try to
// merge later in some form, when it should instead just merge xH (as it does)
&mut transcript(),
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)],
dfg::Scalar(x),
)
.serialize(&mut res)
.unwrap();
x.zeroize();
res
}
#[allow(non_snake_case)]
pub(crate) fn read_dleq<Re: Read>(
serialized: &mut Re,
H: EdwardsPoint,
l: u16,
xG: dfg::EdwardsPoint,
) -> Result<dfg::EdwardsPoint, MultisigError> {
let mut bytes = [0; 32];
serialized.read_exact(&mut bytes).map_err(|_| MultisigError::InvalidDLEqProof(l))?;
// dfg ensures the point is torsion free
let xH = Option::<dfg::EdwardsPoint>::from(dfg::EdwardsPoint::from_bytes(&bytes))
.ok_or(MultisigError::InvalidDLEqProof(l))?;
// Ensure this is a canonical point
if xH.to_bytes() != bytes {
Err(MultisigError::InvalidDLEqProof(l))?;
}
DLEqProof::<dfg::EdwardsPoint>::deserialize(serialized)
.map_err(|_| MultisigError::InvalidDLEqProof(l))?
.verify(&mut transcript(), &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)], &[xG, xH])
.map_err(|_| MultisigError::InvalidDLEqProof(l))?;
Ok(xH)
}

View file

@ -33,9 +33,6 @@ use curve25519_dalek::{
pub use monero_generators::H;
#[cfg(feature = "multisig")]
pub(crate) mod frost;
mod serialize;
/// RingCT structs and functionality.

View file

@ -22,7 +22,7 @@ use crate::{
#[cfg(feature = "multisig")]
mod multisig;
#[cfg(feature = "multisig")]
pub use multisig::{ClsagDetails, ClsagMultisig};
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
lazy_static! {
static ref INV_EIGHT: Scalar = Scalar::from(8u8).invert();

View file

@ -1,6 +1,6 @@
use core::fmt::Debug;
use std::{
io::Read,
io::{self, Read, Write},
sync::{Arc, RwLock},
};
@ -16,20 +16,26 @@ use curve25519_dalek::{
edwards::EdwardsPoint,
};
use group::Group;
use group::{Group, GroupEncoding};
use transcript::{Transcript, RecommendedTranscript};
use frost::{curve::Ed25519, FrostError, FrostView, algorithm::Algorithm};
use dalek_ff_group as dfg;
use crate::{
frost::{write_dleq, read_dleq},
ringct::{
hash_to_point,
clsag::{ClsagInput, Clsag},
},
use dleq::DLEqProof;
use frost::{
curve::Ed25519,
FrostError, ThresholdView,
algorithm::{WriteAddendum, Algorithm},
};
use crate::ringct::{
hash_to_point,
clsag::{ClsagInput, Clsag},
};
fn dleq_transcript() -> RecommendedTranscript {
RecommendedTranscript::new(b"monero_key_image_dleq")
}
impl ClsagInput {
fn transcript<T: Transcript>(&self, transcript: &mut T) {
// Doesn't domain separate as this is considered part of the larger CLSAG proof
@ -54,7 +60,7 @@ impl ClsagInput {
}
}
/// CLSAG Input and the mask to use for it.
/// CLSAG input and the mask to use for it.
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
pub struct ClsagDetails {
input: ClsagInput,
@ -67,6 +73,20 @@ impl ClsagDetails {
}
}
/// Addendum produced during the FROST signing process with relevant data.
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
pub struct ClsagAddendum {
pub(crate) key_image: dfg::EdwardsPoint,
dleq: DLEqProof<dfg::EdwardsPoint>,
}
impl WriteAddendum for ClsagAddendum {
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.key_image.compress().to_bytes().as_ref())?;
self.dleq.serialize(writer)
}
}
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Debug)]
struct Interim {
@ -113,10 +133,6 @@ impl ClsagMultisig {
}
}
pub(crate) const fn serialized_len() -> usize {
32 + (2 * 32)
}
fn input(&self) -> ClsagInput {
(*self.details.read().unwrap()).as_ref().unwrap().input.clone()
}
@ -128,6 +144,7 @@ impl ClsagMultisig {
impl Algorithm<Ed25519> for ClsagMultisig {
type Transcript = RecommendedTranscript;
type Addendum = ClsagAddendum;
type Signature = (Clsag, EdwardsPoint);
fn nonces(&self) -> Vec<Vec<dfg::EdwardsPoint>> {
@ -137,19 +154,43 @@ impl Algorithm<Ed25519> for ClsagMultisig {
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
view: &FrostView<Ed25519>,
) -> Vec<u8> {
let mut serialized = Vec::with_capacity(Self::serialized_len());
serialized.extend((view.secret_share().0 * self.H).compress().to_bytes());
serialized.extend(write_dleq(rng, self.H, view.secret_share().0));
serialized
view: &ThresholdView<Ed25519>,
) -> ClsagAddendum {
ClsagAddendum {
key_image: dfg::EdwardsPoint(self.H * view.secret_share().0),
dleq: DLEqProof::prove(
rng,
// Doesn't take in a larger transcript object due to the usage of this
// Every prover would immediately write their own DLEq proof, when they can only do so in
// the proper order if they want to reach consensus
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to
// try to merge later in some form, when it should instead just merge xH (as it does)
&mut dleq_transcript(),
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
dfg::Scalar(view.secret_share().0),
),
}
}
fn process_addendum<Re: Read>(
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
let mut bytes = [0; 32];
reader.read_exact(&mut bytes)?;
// dfg ensures the point is torsion free
let xH = Option::<dfg::EdwardsPoint>::from(dfg::EdwardsPoint::from_bytes(&bytes))
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid key image"))?;
// Ensure this is a canonical point
if xH.to_bytes() != bytes {
Err(io::Error::new(io::ErrorKind::Other, "non-canonical key image"))?;
}
Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::<dfg::EdwardsPoint>::deserialize(reader)? })
}
fn process_addendum(
&mut self,
view: &FrostView<Ed25519>,
view: &ThresholdView<Ed25519>,
l: u16,
serialized: &mut Re,
addendum: ClsagAddendum,
) -> Result<(), FrostError> {
if self.image.is_identity() {
self.transcript.domain_separate(b"CLSAG");
@ -158,11 +199,20 @@ impl Algorithm<Ed25519> for ClsagMultisig {
}
self.transcript.append_message(b"participant", &l.to_be_bytes());
let image = read_dleq(serialized, self.H, l, view.verification_share(l))
.map_err(|_| FrostError::InvalidCommitment(l))?
.0;
self.transcript.append_message(b"key_image_share", image.compress().to_bytes().as_ref());
self.image += image;
addendum
.dleq
.verify(
&mut dleq_transcript(),
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
&[view.verification_share(l), addendum.key_image],
)
.map_err(|_| FrostError::InvalidPreprocess(l))?;
self
.transcript
.append_message(b"key_image_share", addendum.key_image.compress().to_bytes().as_ref());
self.image += addendum.key_image.0;
Ok(())
}
@ -173,7 +223,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
fn sign_share(
&mut self,
view: &FrostView<Ed25519>,
view: &ThresholdView<Ed25519>,
nonce_sums: &[Vec<dfg::EdwardsPoint>],
nonces: &[dfg::Scalar],
msg: &[u8],

View file

@ -19,10 +19,7 @@ use crate::{
},
};
#[cfg(feature = "multisig")]
use crate::{
frost::MultisigError,
ringct::clsag::{ClsagDetails, ClsagMultisig},
};
use crate::ringct::clsag::{ClsagDetails, ClsagMultisig};
#[cfg(feature = "multisig")]
use frost::tests::{key_gen, algorithm_machines, sign};
@ -79,7 +76,7 @@ fn clsag() {
#[cfg(feature = "multisig")]
#[test]
fn clsag_multisig() -> Result<(), MultisigError> {
fn clsag_multisig() {
let keys = key_gen::<_, Ed25519>(&mut OsRng);
let randomness = random_scalar(&mut OsRng);
@ -125,6 +122,4 @@ fn clsag_multisig() -> Result<(), MultisigError> {
),
&[1; 32],
);
Ok(())
}

View file

@ -1,5 +1,5 @@
use std::{
io::{Read, Cursor},
io::{self, Read},
sync::{Arc, RwLock},
collections::HashMap,
};
@ -7,26 +7,22 @@ use std::{
use rand_core::{RngCore, CryptoRng, SeedableRng};
use rand_chacha::ChaCha20Rng;
use curve25519_dalek::{
traits::Identity,
scalar::Scalar,
edwards::{EdwardsPoint, CompressedEdwardsY},
};
use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::EdwardsPoint};
use transcript::{Transcript, RecommendedTranscript};
use frost::{
curve::Ed25519,
FrostError, FrostKeys,
FrostError, ThresholdKeys,
sign::{
PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine,
AlgorithmSignatureMachine,
Writable, Preprocess, SignatureShare, PreprocessMachine, SignMachine, SignatureMachine,
AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
},
};
use crate::{
random_scalar,
ringct::{
clsag::{ClsagInput, ClsagDetails, ClsagMultisig},
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig},
RctPrunable,
},
transaction::{Input, Transaction},
@ -58,7 +54,7 @@ pub struct TransactionSignMachine {
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
clsags: Vec<AlgorithmSignMachine<Ed25519, ClsagMultisig>>,
our_preprocess: Vec<u8>,
our_preprocess: Vec<Preprocess<Ed25519, ClsagAddendum>>,
}
pub struct TransactionSignatureMachine {
@ -72,7 +68,7 @@ impl SignableTransaction {
pub async fn multisig(
self,
rpc: &Rpc,
keys: FrostKeys<Ed25519>,
keys: ThresholdKeys<Ed25519>,
mut transcript: RecommendedTranscript,
height: usize,
mut included: Vec<u16>,
@ -166,28 +162,26 @@ impl SignableTransaction {
}
impl PreprocessMachine for TransactionMachine {
type Preprocess = Vec<Preprocess<Ed25519, ClsagAddendum>>;
type Signature = Transaction;
type SignMachine = TransactionSignMachine;
fn preprocess<R: RngCore + CryptoRng>(
mut self,
rng: &mut R,
) -> (TransactionSignMachine, Vec<u8>) {
) -> (TransactionSignMachine, Self::Preprocess) {
// Iterate over each CLSAG calling preprocess
let mut serialized = Vec::with_capacity(
// D_{G, H}, E_{G, H}, DLEqs, key image addendum
self.clsags.len() * ((2 * (32 + 32)) + (2 * (32 + 32)) + ClsagMultisig::serialized_len()),
);
let mut preprocesses = Vec::with_capacity(self.clsags.len());
let clsags = self
.clsags
.drain(..)
.map(|clsag| {
let (clsag, preprocess) = clsag.preprocess(rng);
serialized.extend(&preprocess);
preprocesses.push(preprocess);
clsag
})
.collect();
let our_preprocess = serialized.clone();
let our_preprocess = preprocesses.clone();
// We could add further entropy here, and previous versions of this library did so
// As of right now, the multisig's key, the inputs being spent, and the FROST data itself
@ -212,33 +206,35 @@ impl PreprocessMachine for TransactionMachine {
our_preprocess,
},
serialized,
preprocesses,
)
}
}
impl SignMachine<Transaction> for TransactionSignMachine {
type Preprocess = Vec<Preprocess<Ed25519, ClsagAddendum>>;
type SignatureShare = Vec<SignatureShare<Ed25519>>;
type SignatureMachine = TransactionSignatureMachine;
fn sign<Re: Read>(
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
self.clsags.iter().map(|clsag| clsag.read_preprocess(reader)).collect()
}
fn sign(
mut self,
mut commitments: HashMap<u16, Re>,
mut commitments: HashMap<u16, Self::Preprocess>,
msg: &[u8],
) -> Result<(TransactionSignatureMachine, Vec<u8>), FrostError> {
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
if !msg.is_empty() {
Err(FrostError::InternalError(
"message was passed to the TransactionMachine when it generates its own",
))?;
}
// FROST commitments and their DLEqs, and the image and its DLEq
const CLSAG_LEN: usize = (2 * (32 + 32)) + (2 * (32 + 32)) + ClsagMultisig::serialized_len();
// Convert the unified commitments to a Vec of the individual commitments
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
let mut commitments = (0 .. self.clsags.len())
.map(|c| {
let mut buf = [0; CLSAG_LEN];
self
.included
.iter()
@ -248,31 +244,27 @@ impl SignMachine<Transaction> for TransactionSignMachine {
// transcripts cloned from this TX's initial premise's transcript. For our TX
// transcript to have the CLSAG data for entropy, it'll have to be added ourselves here
self.transcript.append_message(b"participant", &(*l).to_be_bytes());
if *l == self.i {
buf.copy_from_slice(self.our_preprocess.drain(.. CLSAG_LEN).as_slice());
let preprocess = if *l == self.i {
self.our_preprocess[c].clone()
} else {
commitments
.get_mut(l)
.ok_or(FrostError::MissingParticipant(*l))?
.read_exact(&mut buf)
.map_err(|_| FrostError::InvalidCommitment(*l))?;
commitments.get_mut(l).ok_or(FrostError::MissingParticipant(*l))?[c].clone()
};
{
let mut buf = vec![];
preprocess.write(&mut buf).unwrap();
self.transcript.append_message(b"preprocess", &buf);
}
self.transcript.append_message(b"preprocess", &buf);
// While here, calculate the key image
// Clsag will parse/calculate/validate this as needed, yet doing so here as well
// provides the easiest API overall, as this is where the TX is (which needs the key
// images in its message), along with where the outputs are determined (where our
// outputs may need these in order to guarantee uniqueness)
images[c] += CompressedEdwardsY(
buf[(CLSAG_LEN - 96) .. (CLSAG_LEN - 64)]
.try_into()
.map_err(|_| FrostError::InvalidCommitment(*l))?,
)
.decompress()
.ok_or(FrostError::InvalidCommitment(*l))?;
images[c] += preprocess.addendum.key_image.0;
Ok((*l, Cursor::new(buf)))
Ok((*l, preprocess))
})
.collect::<Result<HashMap<_, _>, _>>()
})
@ -346,37 +338,39 @@ impl SignMachine<Transaction> for TransactionSignMachine {
let msg = tx.signature_hash();
// Iterate over each CLSAG calling sign
let mut serialized = Vec::with_capacity(self.clsags.len() * 32);
let mut shares = Vec::with_capacity(self.clsags.len());
let clsags = self
.clsags
.drain(..)
.map(|clsag| {
let (clsag, share) = clsag.sign(commitments.remove(0), &msg)?;
serialized.extend(&share);
shares.push(share);
Ok(clsag)
})
.collect::<Result<_, _>>()?;
Ok((TransactionSignatureMachine { tx, clsags }, serialized))
Ok((TransactionSignatureMachine { tx, clsags }, shares))
}
}
impl SignatureMachine<Transaction> for TransactionSignatureMachine {
fn complete<Re: Read>(self, mut shares: HashMap<u16, Re>) -> Result<Transaction, FrostError> {
type SignatureShare = Vec<SignatureShare<Ed25519>>;
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
self.clsags.iter().map(|clsag| clsag.read_share(reader)).collect()
}
fn complete(
mut self,
shares: HashMap<u16, Self::SignatureShare>,
) -> Result<Transaction, FrostError> {
let mut tx = self.tx;
match tx.rct_signatures.prunable {
RctPrunable::Null => panic!("Signing for RctPrunable::Null"),
RctPrunable::Clsag { ref mut clsags, ref mut pseudo_outs, .. } => {
for clsag in self.clsags {
for (c, clsag) in self.clsags.drain(..).enumerate() {
let (clsag, pseudo_out) = clsag.complete(
shares
.iter_mut()
.map(|(l, shares)| {
let mut buf = [0; 32];
shares.read_exact(&mut buf).map_err(|_| FrostError::InvalidShare(*l))?;
Ok((*l, Cursor::new(buf)))
})
.collect::<Result<HashMap<_, _>, _>>()?,
shares.iter().map(|(l, shares)| (*l, shares[c].clone())).collect::<HashMap<_, _>>(),
)?;
clsags.push(clsag);
pseudo_outs.push(pseudo_out);

View file

@ -0,0 +1,49 @@
[package]
name = "ciphersuite"
version = "0.1.1"
description = "Ciphersuites built around ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ciphersuite", "ff", "group"]
edition = "2021"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
rand_core = "0.6"
zeroize = { version = "1.5", features = ["zeroize_derive"] }
subtle = "2"
digest = "0.10"
sha2 = { version = "0.10", optional = true }
sha3 = { version = "0.10", optional = true }
ff = { version = "0.12", features = ["bits"] }
group = "0.12"
dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2", optional = true }
elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true }
p256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true }
k256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true }
minimal-ed448 = { path = "../ed448", version = "0.1", optional = true }
[features]
std = []
dalek = ["sha2", "dalek-ff-group"]
ed25519 = ["dalek"]
ristretto = ["dalek"]
kp256 = ["sha2", "elliptic-curve"]
p256 = ["kp256", "dep:p256"]
secp256k1 = ["kp256", "k256"]
ed448 = ["sha3", "minimal-ed448"]
default = ["std"]

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021-2022 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,3 @@
# Ciphersuite
Ciphersuites for elliptic curves premised on ff/group.

View file

@ -0,0 +1,44 @@
use zeroize::Zeroize;
use sha2::{Digest, Sha512};
use group::Group;
use dalek_ff_group::Scalar;
use crate::Ciphersuite;
macro_rules! dalek_curve {
(
$feature: literal,
$Ciphersuite: ident,
$Point: ident,
$ID: literal
) => {
use dalek_ff_group::$Point;
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Ciphersuite;
impl Ciphersuite for $Ciphersuite {
type F = Scalar;
type G = $Point;
type H = Sha512;
const ID: &'static [u8] = $ID;
fn generator() -> Self::G {
$Point::generator()
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
Scalar::from_hash(Sha512::new_with_prefix(&[dst, data].concat()))
}
}
};
}
#[cfg(any(test, feature = "ristretto"))]
dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto");
#[cfg(feature = "ed25519")]
dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519");

View file

@ -0,0 +1,67 @@
use zeroize::Zeroize;
use digest::{
typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput,
ExtendableOutput, XofReader, HashMarker, Digest,
};
use sha3::Shake256;
use group::Group;
use minimal_ed448::{scalar::Scalar, point::Point};
use crate::Ciphersuite;
// Re-define Shake256 as a traditional Digest to meet API expectations
#[derive(Clone, Default)]
pub struct Shake256_114(Shake256);
impl BlockSizeUser for Shake256_114 {
type BlockSize = <Shake256 as BlockSizeUser>::BlockSize;
fn block_size() -> usize {
Shake256::block_size()
}
}
impl OutputSizeUser for Shake256_114 {
type OutputSize = U114;
fn output_size() -> usize {
114
}
}
impl Update for Shake256_114 {
fn update(&mut self, data: &[u8]) {
self.0.update(data);
}
fn chain(mut self, data: impl AsRef<[u8]>) -> Self {
Update::update(&mut self, data.as_ref());
self
}
}
impl FixedOutput for Shake256_114 {
fn finalize_fixed(self) -> Output<Self> {
let mut res = Default::default();
FixedOutput::finalize_into(self, &mut res);
res
}
fn finalize_into(self, out: &mut Output<Self>) {
let mut reader = self.0.finalize_xof();
reader.read(out);
}
}
impl HashMarker for Shake256_114 {}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ed448;
impl Ciphersuite for Ed448 {
type F = Scalar;
type G = Point;
type H = Shake256_114;
const ID: &'static [u8] = b"ed448";
fn generator() -> Self::G {
Point::generator()
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
Scalar::wide_reduce(Self::H::digest(&[dst, data].concat()).as_ref().try_into().unwrap())
}
}

View file

@ -0,0 +1,72 @@
use zeroize::Zeroize;
use sha2::{Digest, Sha256};
use group::ff::{Field, PrimeField};
use elliptic_curve::{
generic_array::GenericArray,
bigint::{Encoding, U384},
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
};
use crate::Ciphersuite;
macro_rules! kp_curve {
(
$feature: literal,
$lib: ident,
$Ciphersuite: ident,
$ID: literal
) => {
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Ciphersuite;
impl Ciphersuite for $Ciphersuite {
type F = $lib::Scalar;
type G = $lib::ProjectivePoint;
type H = Sha256;
const ID: &'static [u8] = $ID;
fn generator() -> Self::G {
$lib::ProjectivePoint::GENERATOR
}
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
let mut dst = dst;
let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-".as_ref(), dst].concat());
if dst.len() > 255 {
dst = oversize.as_ref();
}
// While one of these two libraries does support directly hashing to the Scalar field, the
// other doesn't. While that's probably an oversight, this is a universally working method
let mut modulus = [0; 48];
modulus[16 ..].copy_from_slice(&(Self::F::zero() - Self::F::one()).to_bytes());
let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE);
let mut unreduced = U384::from_be_bytes({
let mut bytes = [0; 48];
ExpandMsgXmd::<Sha256>::expand_message(&[msg], dst, 48).unwrap().fill_bytes(&mut bytes);
bytes
})
.reduce(&modulus)
.unwrap()
.to_be_bytes();
let mut array = *GenericArray::from_slice(&unreduced[16 ..]);
let res = $lib::Scalar::from_repr(array).unwrap();
unreduced.zeroize();
array.zeroize();
res
}
}
};
}
#[cfg(feature = "p256")]
kp_curve!("p256", p256, P256, b"P-256");
#[cfg(feature = "secp256k1")]
kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1");

View file

@ -0,0 +1,106 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
use core::fmt::Debug;
#[cfg(feature = "std")]
use std::io::{self, Read};
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use subtle::ConstantTimeEq;
use digest::{core_api::BlockSizeUser, Digest};
use group::{
ff::{Field, PrimeField, PrimeFieldBits},
Group, GroupOps,
prime::PrimeGroup,
};
#[cfg(feature = "std")]
use group::GroupEncoding;
#[cfg(feature = "dalek")]
mod dalek;
#[cfg(feature = "ristretto")]
pub use dalek::Ristretto;
#[cfg(feature = "ed25519")]
pub use dalek::Ed25519;
#[cfg(feature = "kp256")]
mod kp256;
#[cfg(feature = "secp256k1")]
pub use kp256::Secp256k1;
#[cfg(feature = "p256")]
pub use kp256::P256;
#[cfg(feature = "ed448")]
mod ed448;
#[cfg(feature = "ed448")]
pub use ed448::*;
/// Unified trait defining a ciphersuite around an elliptic curve.
pub trait Ciphersuite: Clone + Copy + PartialEq + Eq + Debug + Zeroize {
/// Scalar field element type.
// This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses
type F: PrimeField + PrimeFieldBits + Zeroize;
/// Group element type.
type G: Group<Scalar = Self::F> + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq;
/// Hash algorithm used with this curve.
// Requires BlockSizeUser so it can be used within Hkdf which requies that.
type H: Clone + BlockSizeUser + Digest;
/// ID for this curve.
const ID: &'static [u8];
/// Generator for the group.
// While group does provide this in its API, privacy coins may want to use a custom basepoint
fn generator() -> Self::G;
/// Hash the provided dst and message to a scalar.
#[allow(non_snake_case)]
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F;
/// Generate a random non-zero scalar.
#[allow(non_snake_case)]
fn random_nonzero_F<R: RngCore + CryptoRng>(rng: &mut R) -> Self::F {
let mut res;
while {
res = Self::F::random(&mut *rng);
res.ct_eq(&Self::F::zero()).into()
} {}
res
}
/// Read a canonical scalar from something implementing std::io::Read.
#[cfg(feature = "std")]
#[allow(non_snake_case)]
fn read_F<R: Read>(reader: &mut R) -> io::Result<Self::F> {
let mut encoding = <Self::F as PrimeField>::Repr::default();
reader.read_exact(encoding.as_mut())?;
// ff mandates this is canonical
let res = Option::<Self::F>::from(Self::F::from_repr(encoding))
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "non-canonical scalar"));
for b in encoding.as_mut() {
b.zeroize();
}
res
}
/// Read a canonical point from something implementing std::io::Read.
#[cfg(feature = "std")]
#[allow(non_snake_case)]
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
let mut encoding = <Self::G as GroupEncoding>::Repr::default();
reader.read_exact(encoding.as_mut())?;
let point = Option::<Self::G>::from(Self::G::from_bytes(&encoding))
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid point"))?;
if point.to_bytes().as_ref() != encoding.as_ref() {
Err(io::Error::new(io::ErrorKind::Other, "non-canonical point"))?;
}
Ok(point)
}
}

42
crypto/dkg/Cargo.toml Normal file
View file

@ -0,0 +1,42 @@
[package]
name = "dkg"
version = "0.1.0"
description = "Distributed key generation over ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
thiserror = "1"
rand_core = "0.6"
zeroize = { version = "1.5", features = ["zeroize_derive"] }
subtle = "2"
hex = "0.4"
digest = "0.10"
hkdf = "0.12"
chacha20 = { version = "0.9", features = ["zeroize"] }
group = "0.12"
ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["std"] }
transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"], version = "^0.1.3" }
multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "0.1.0" }
dleq = { path = "../dleq", version = "^0.1.2", features = ["serialize"] }
[features]
tests = []

21
crypto/dkg/LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021-2022 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

12
crypto/dkg/README.md Normal file
View file

@ -0,0 +1,12 @@
# Distributed Key Generation
A collection of implementations of various distributed key generation protocols.
All included protocols resolve into the provided `Threshold` types, intended to
enable their modularity.
Additional utilities around them, such as promotion from one generator to
another, are also provided.
Currently included is the two-round protocol from the
[FROST paper](https://eprint.iacr.org/2020/852).

458
crypto/dkg/src/frost.rs Normal file
View file

@ -0,0 +1,458 @@
use std::{
marker::PhantomData,
io::{self, Read, Write},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop};
use digest::Digest;
use hkdf::{Hkdf, hmac::SimpleHmac};
use chacha20::{
cipher::{crypto_common::KeyIvInit, StreamCipher},
Key as Cc20Key, Nonce as Cc20Iv, ChaCha20,
};
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
use ciphersuite::Ciphersuite;
use multiexp::{multiexp_vartime, BatchVerifier};
use schnorr::SchnorrSignature;
use crate::{DkgError, ThresholdParams, ThresholdCore, validate_map};
#[allow(non_snake_case)]
fn challenge<C: Ciphersuite>(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F {
const DST: &[u8] = b"FROST Schnorr Proof of Knowledge";
// Hashes the context to get a fixed size value out of it
let mut transcript = C::H::digest(context.as_bytes()).as_ref().to_vec();
transcript.extend(l.to_be_bytes());
transcript.extend(R);
transcript.extend(Am);
C::hash_to_F(DST, &transcript)
}
/// Commitments message to be broadcast to all other parties.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct Commitments<C: Ciphersuite> {
commitments: Vec<C::G>,
enc_key: C::G,
cached_msg: Vec<u8>,
sig: SchnorrSignature<C>,
}
impl<C: Ciphersuite> Drop for Commitments<C> {
fn drop(&mut self) {
self.zeroize();
}
}
impl<C: Ciphersuite> ZeroizeOnDrop for Commitments<C> {}
impl<C: Ciphersuite> Commitments<C> {
pub fn read<R: Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self> {
let mut commitments = Vec::with_capacity(params.t().into());
let mut cached_msg = vec![];
#[allow(non_snake_case)]
let mut read_G = || -> io::Result<C::G> {
let mut buf = <C::G as GroupEncoding>::Repr::default();
reader.read_exact(buf.as_mut())?;
let point = C::read_G(&mut buf.as_ref())?;
cached_msg.extend(buf.as_ref());
Ok(point)
};
for _ in 0 .. params.t() {
commitments.push(read_G()?);
}
let enc_key = read_G()?;
Ok(Commitments { commitments, enc_key, cached_msg, sig: SchnorrSignature::read(reader)? })
}
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.cached_msg)?;
self.sig.write(writer)
}
pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
}
/// State machine to begin the key generation protocol.
pub struct KeyGenMachine<C: Ciphersuite> {
params: ThresholdParams,
context: String,
_curve: PhantomData<C>,
}
impl<C: Ciphersuite> KeyGenMachine<C> {
/// Creates a new machine to generate a key for the specified curve in the specified multisig.
// The context string should be unique among multisigs.
pub fn new(params: ThresholdParams, context: String) -> KeyGenMachine<C> {
KeyGenMachine { params, context, _curve: PhantomData }
}
/// Start generating a key according to the FROST DKG spec.
/// Returns a commitments message to be sent to all parties over an authenticated channel. If any
/// party submits multiple sets of commitments, they MUST be treated as malicious.
pub fn generate_coefficients<R: RngCore + CryptoRng>(
self,
rng: &mut R,
) -> (SecretShareMachine<C>, Commitments<C>) {
let t = usize::from(self.params.t);
let mut coefficients = Vec::with_capacity(t);
let mut commitments = Vec::with_capacity(t);
let mut cached_msg = vec![];
for i in 0 .. t {
// Step 1: Generate t random values to form a polynomial with
coefficients.push(C::random_nonzero_F(&mut *rng));
// Step 3: Generate public commitments
commitments.push(C::generator() * coefficients[i]);
cached_msg.extend(commitments[i].to_bytes().as_ref());
}
// Generate an encryption key for transmitting the secret shares
// It would probably be perfectly fine to use one of our polynomial elements, yet doing so
// puts the integrity of FROST at risk. While there's almost no way it could, as it's used in
// an ECDH with validated group elemnents, better to avoid any questions on it
let enc_key = C::random_nonzero_F(&mut *rng);
let pub_enc_key = C::generator() * enc_key;
cached_msg.extend(pub_enc_key.to_bytes().as_ref());
// Step 2: Provide a proof of knowledge
let mut r = C::random_nonzero_F(rng);
let sig = SchnorrSignature::<C>::sign(
coefficients[0],
// This could be deterministic as the PoK is a singleton never opened up to cooperative
// discussion
// There's no reason to spend the time and effort to make this deterministic besides a
// general obsession with canonicity and determinism though
r,
challenge::<C>(
&self.context,
self.params.i(),
(C::generator() * r).to_bytes().as_ref(),
&cached_msg,
),
);
r.zeroize();
// Step 4: Broadcast
(
SecretShareMachine {
params: self.params,
context: self.context,
coefficients,
our_commitments: commitments.clone(),
enc_key,
},
Commitments { commitments, enc_key: pub_enc_key, cached_msg, sig },
)
}
}
fn polynomial<F: PrimeField>(coefficients: &[F], l: u16) -> F {
let l = F::from(u64::from(l));
let mut share = F::zero();
for (idx, coefficient) in coefficients.iter().rev().enumerate() {
share += coefficient;
if idx != (coefficients.len() - 1) {
share *= l;
}
}
share
}
/// Secret share to be sent to the party it's intended for over an authenticated channel.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct SecretShare<F: PrimeField>(F::Repr);
impl<F: PrimeField> Zeroize for SecretShare<F> {
fn zeroize(&mut self) {
self.0.as_mut().zeroize()
}
}
impl<F: PrimeField> Drop for SecretShare<F> {
fn drop(&mut self) {
self.zeroize();
}
}
impl<F: PrimeField> ZeroizeOnDrop for SecretShare<F> {}
impl<F: PrimeField> SecretShare<F> {
pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
let mut repr = F::Repr::default();
reader.read_exact(repr.as_mut())?;
Ok(SecretShare(repr))
}
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.0.as_ref())
}
pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
}
fn create_ciphers<C: Ciphersuite>(
mut sender: <C::G as GroupEncoding>::Repr,
receiver: &mut <C::G as GroupEncoding>::Repr,
ecdh: &mut <C::G as GroupEncoding>::Repr,
) -> (ChaCha20, ChaCha20) {
let directional = |sender: &mut <C::G as GroupEncoding>::Repr| {
let mut key = Cc20Key::default();
key.copy_from_slice(
&Hkdf::<C::H, SimpleHmac<C::H>>::extract(
Some(b"key"),
&[sender.as_ref(), ecdh.as_ref()].concat(),
)
.0
.as_ref()[.. 32],
);
let mut iv = Cc20Iv::default();
iv.copy_from_slice(
&Hkdf::<C::H, SimpleHmac<C::H>>::extract(
Some(b"iv"),
&[sender.as_ref(), ecdh.as_ref()].concat(),
)
.0
.as_ref()[.. 12],
);
sender.as_mut().zeroize();
let res = ChaCha20::new(&key, &iv);
<Cc20Key as AsMut<[u8]>>::as_mut(&mut key).zeroize();
<Cc20Iv as AsMut<[u8]>>::as_mut(&mut iv).zeroize();
res
};
let res = (directional(&mut sender), directional(receiver));
ecdh.as_mut().zeroize();
res
}
/// Advancement of the key generation state machine.
#[derive(Zeroize)]
pub struct SecretShareMachine<C: Ciphersuite> {
params: ThresholdParams,
context: String,
coefficients: Vec<C::F>,
our_commitments: Vec<C::G>,
enc_key: C::F,
}
impl<C: Ciphersuite> Drop for SecretShareMachine<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Ciphersuite> ZeroizeOnDrop for SecretShareMachine<C> {}
impl<C: Ciphersuite> SecretShareMachine<C> {
/// Verify the data from the previous round (canonicity, PoKs, message authenticity)
fn verify_r1<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
mut commitments: HashMap<u16, Commitments<C>>,
) -> Result<(HashMap<u16, Vec<C::G>>, HashMap<u16, C::G>), DkgError> {
validate_map(&commitments, &(1 ..= self.params.n()).collect::<Vec<_>>(), self.params.i())?;
let mut enc_keys = HashMap::new();
let mut batch = BatchVerifier::<u16, C::G>::new(commitments.len());
let mut commitments = commitments
.drain()
.map(|(l, mut msg)| {
enc_keys.insert(l, msg.enc_key);
msg.enc_key.zeroize();
// Step 5: Validate each proof of knowledge
// This is solely the prep step for the latter batch verification
msg.sig.batch_verify(
rng,
&mut batch,
l,
msg.commitments[0],
challenge::<C>(&self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg),
);
(l, msg.commitments.drain(..).collect::<Vec<_>>())
})
.collect::<HashMap<_, _>>();
batch.verify_with_vartime_blame().map_err(DkgError::InvalidProofOfKnowledge)?;
commitments.insert(self.params.i, self.our_commitments.drain(..).collect());
Ok((commitments, enc_keys))
}
/// Continue generating a key.
/// Takes in everyone else's commitments. Returns a HashMap of secret shares to be sent over
/// authenticated channels to their relevant counterparties.
pub fn generate_secret_shares<R: RngCore + CryptoRng>(
mut self,
rng: &mut R,
commitments: HashMap<u16, Commitments<C>>,
) -> Result<(KeyMachine<C>, HashMap<u16, SecretShare<C::F>>), DkgError> {
let (commitments, mut enc_keys) = self.verify_r1(&mut *rng, commitments)?;
// Step 1: Generate secret shares for all other parties
let mut sender = (C::generator() * self.enc_key).to_bytes();
let mut ciphers = HashMap::new();
let mut res = HashMap::new();
for l in 1 ..= self.params.n() {
// Don't insert our own shares to the byte buffer which is meant to be sent around
// An app developer could accidentally send it. Best to keep this black boxed
if l == self.params.i() {
continue;
}
let (mut cipher_send, cipher_recv) = {
let receiver = enc_keys.get_mut(&l).unwrap();
let mut ecdh = (*receiver * self.enc_key).to_bytes();
create_ciphers::<C>(sender, &mut receiver.to_bytes(), &mut ecdh)
};
let mut share = polynomial(&self.coefficients, l);
let mut share_bytes = share.to_repr();
share.zeroize();
cipher_send.apply_keystream(share_bytes.as_mut());
drop(cipher_send);
ciphers.insert(l, cipher_recv);
res.insert(l, SecretShare::<C::F>(share_bytes));
share_bytes.as_mut().zeroize();
}
self.enc_key.zeroize();
sender.as_mut().zeroize();
// Calculate our own share
let share = polynomial(&self.coefficients, self.params.i());
self.coefficients.zeroize();
Ok((KeyMachine { params: self.params, secret: share, commitments, ciphers }, res))
}
}
/// Final step of the key generation protocol.
pub struct KeyMachine<C: Ciphersuite> {
params: ThresholdParams,
secret: C::F,
ciphers: HashMap<u16, ChaCha20>,
commitments: HashMap<u16, Vec<C::G>>,
}
impl<C: Ciphersuite> Zeroize for KeyMachine<C> {
fn zeroize(&mut self) {
self.params.zeroize();
self.secret.zeroize();
// cipher implements ZeroizeOnDrop and zeroizes on drop, yet doesn't implement Zeroize
// The following is redundant, as Rust should automatically handle dropping it, yet it shows
// awareness of this quirk and at least attempts to be comprehensive
for (_, cipher) in self.ciphers.drain() {
drop(cipher);
}
for (_, commitments) in self.commitments.iter_mut() {
commitments.zeroize();
}
}
}
impl<C: Ciphersuite> Drop for KeyMachine<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Ciphersuite> ZeroizeOnDrop for KeyMachine<C> {}
impl<C: Ciphersuite> KeyMachine<C> {
/// Complete key generation.
/// Takes in everyone elses' shares submitted to us. Returns a ThresholdCore object representing
/// the generated keys. Successful protocol completion MUST be confirmed by all parties before
/// these keys may be safely used.
pub fn complete<R: RngCore + CryptoRng>(
mut self,
rng: &mut R,
mut shares: HashMap<u16, SecretShare<C::F>>,
) -> Result<ThresholdCore<C>, DkgError> {
let mut secret_share = self.secret;
self.secret.zeroize();
validate_map(&shares, &(1 ..= self.params.n()).collect::<Vec<_>>(), self.params.i())?;
// Calculate the exponent for a given participant and apply it to a series of commitments
// Initially used with the actual commitments to verify the secret share, later used with
// stripes to generate the verification shares
let exponential = |i: u16, values: &[_]| {
let i = C::F::from(i.into());
let mut res = Vec::with_capacity(self.params.t().into());
(0 .. usize::from(self.params.t())).into_iter().fold(C::F::one(), |exp, l| {
res.push((exp, values[l]));
exp * i
});
res
};
let mut batch = BatchVerifier::new(shares.len());
for (l, mut share_bytes) in shares.drain() {
let mut cipher = self.ciphers.remove(&l).unwrap();
cipher.apply_keystream(share_bytes.0.as_mut());
drop(cipher);
let mut share: C::F =
Option::from(C::F::from_repr(share_bytes.0)).ok_or(DkgError::InvalidShare(l))?;
share_bytes.zeroize();
secret_share += share;
// This can be insecurely linearized from n * t to just n using the below sums for a given
// stripe. Doing so uses naive addition which is subject to malleability. The only way to
// ensure that malleability isn't present is to use this n * t algorithm, which runs
// per sender and not as an aggregate of all senders, which also enables blame
let mut values = exponential(self.params.i, &self.commitments[&l]);
values.push((-share, C::generator()));
share.zeroize();
batch.queue(rng, l, values);
}
batch.verify_with_vartime_blame().map_err(DkgError::InvalidShare)?;
// Stripe commitments per t and sum them in advance. Calculating verification shares relies on
// these sums so preprocessing them is a massive speedup
// If these weren't just sums, yet the tables used in multiexp, this would be further optimized
// As of right now, each multiexp will regenerate them
let mut stripes = Vec::with_capacity(usize::from(self.params.t()));
for t in 0 .. usize::from(self.params.t()) {
stripes.push(self.commitments.values().map(|commitments| commitments[t]).sum());
}
// Calculate each user's verification share
let mut verification_shares = HashMap::new();
for i in 1 ..= self.params.n() {
verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes)));
}
// Removing this check would enable optimizing the above from t + (n * t) to t + ((n - 1) * t)
debug_assert_eq!(C::generator() * secret_share, verification_shares[&self.params.i()]);
Ok(ThresholdCore {
params: self.params,
secret_share,
group_key: stripes[0],
verification_shares,
})
}
}

399
crypto/dkg/src/lib.rs Normal file
View file

@ -0,0 +1,399 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
//! A collection of implementations of various distributed key generation protocols.
//! They all resolve into the provided Threshold types intended to enable their modularity.
//! Additional utilities around them, such as promotion from one generator to another, are also
//! provided.
use core::fmt::Debug;
use std::{io::Read, sync::Arc, collections::HashMap};
use thiserror::Error;
use zeroize::{Zeroize, ZeroizeOnDrop};
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
use ciphersuite::Ciphersuite;
/// The distributed key generation protocol described in the
/// [FROST paper](https://eprint.iacr.org/2020/852).
pub mod frost;
/// Promote keys between ciphersuites.
pub mod promote;
/// Tests for application-provided curves and algorithms.
#[cfg(any(test, feature = "tests"))]
pub mod tests;
// Validate a map of values to have the expected included participants
pub(crate) fn validate_map<T>(
map: &HashMap<u16, T>,
included: &[u16],
ours: u16,
) -> Result<(), DkgError> {
if (map.len() + 1) != included.len() {
Err(DkgError::InvalidParticipantQuantity(included.len(), map.len() + 1))?;
}
for included in included {
if *included == ours {
if map.contains_key(included) {
Err(DkgError::DuplicatedIndex(*included))?;
}
continue;
}
if !map.contains_key(included) {
Err(DkgError::MissingParticipant(*included))?;
}
}
Ok(())
}
/// Parameters for a multisig.
// These fields should not be made public as they should be static
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct ThresholdParams {
/// Participants needed to sign on behalf of the group.
t: u16,
/// Amount of participants.
n: u16,
/// Index of the participant being acted for.
i: u16,
}
impl ThresholdParams {
pub fn new(t: u16, n: u16, i: u16) -> Result<ThresholdParams, DkgError> {
if (t == 0) || (n == 0) {
Err(DkgError::ZeroParameter(t, n))?;
}
// When t == n, this shouldn't be used (MuSig2 and other variants of MuSig exist for a reason),
// but it's not invalid to do so
if t > n {
Err(DkgError::InvalidRequiredQuantity(t, n))?;
}
if (i == 0) || (i > n) {
Err(DkgError::InvalidParticipantIndex(n, i))?;
}
Ok(ThresholdParams { t, n, i })
}
pub fn t(&self) -> u16 {
self.t
}
pub fn n(&self) -> u16 {
self.n
}
pub fn i(&self) -> u16 {
self.i
}
}
/// Various errors possible during key generation/signing.
#[derive(Copy, Clone, Error, Debug)]
pub enum DkgError {
#[error("a parameter was 0 (required {0}, participants {1})")]
ZeroParameter(u16, u16),
#[error("invalid amount of required participants (max {1}, got {0})")]
InvalidRequiredQuantity(u16, u16),
#[error("invalid participant index (0 < index <= {0}, yet index is {1})")]
InvalidParticipantIndex(u16, u16),
#[error("invalid signing set")]
InvalidSigningSet,
#[error("invalid participant quantity (expected {0}, got {1})")]
InvalidParticipantQuantity(usize, usize),
#[error("duplicated participant index ({0})")]
DuplicatedIndex(u16),
#[error("missing participant {0}")]
MissingParticipant(u16),
#[error("invalid proof of knowledge (participant {0})")]
InvalidProofOfKnowledge(u16),
#[error("invalid share (participant {0})")]
InvalidShare(u16),
#[error("internal error ({0})")]
InternalError(&'static str),
}
/// Calculate the lagrange coefficient for a signing set.
pub fn lagrange<F: PrimeField>(i: u16, included: &[u16]) -> F {
let mut num = F::one();
let mut denom = F::one();
for l in included {
if i == *l {
continue;
}
let share = F::from(u64::try_from(*l).unwrap());
num *= share;
denom *= share - F::from(u64::try_from(i).unwrap());
}
// Safe as this will only be 0 if we're part of the above loop
// (which we have an if case to avoid)
num * denom.invert().unwrap()
}
/// Keys and verification shares generated by a DKG.
/// Called core as they're expected to be wrapped into an Arc before usage in various operations.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct ThresholdCore<C: Ciphersuite> {
/// Threshold Parameters.
params: ThresholdParams,
/// Secret share key.
secret_share: C::F,
/// Group key.
group_key: C::G,
/// Verification shares.
verification_shares: HashMap<u16, C::G>,
}
impl<C: Ciphersuite> Zeroize for ThresholdCore<C> {
fn zeroize(&mut self) {
self.params.zeroize();
self.secret_share.zeroize();
self.group_key.zeroize();
for (_, share) in self.verification_shares.iter_mut() {
share.zeroize();
}
}
}
impl<C: Ciphersuite> Drop for ThresholdCore<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Ciphersuite> ZeroizeOnDrop for ThresholdCore<C> {}
impl<C: Ciphersuite> ThresholdCore<C> {
pub(crate) fn new(
params: ThresholdParams,
secret_share: C::F,
verification_shares: HashMap<u16, C::G>,
) -> ThresholdCore<C> {
#[cfg(debug_assertions)]
validate_map(&verification_shares, &(0 ..= params.n).collect::<Vec<_>>(), 0).unwrap();
let t = (1 ..= params.t).collect::<Vec<_>>();
ThresholdCore {
params,
secret_share,
group_key: t.iter().map(|i| verification_shares[i] * lagrange::<C::F>(*i, &t)).sum(),
verification_shares,
}
}
pub fn params(&self) -> ThresholdParams {
self.params
}
pub fn secret_share(&self) -> C::F {
self.secret_share
}
pub fn group_key(&self) -> C::G {
self.group_key
}
pub(crate) fn verification_shares(&self) -> HashMap<u16, C::G> {
self.verification_shares.clone()
}
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = vec![];
serialized.extend(u32::try_from(C::ID.len()).unwrap().to_be_bytes());
serialized.extend(C::ID);
serialized.extend(self.params.t.to_be_bytes());
serialized.extend(self.params.n.to_be_bytes());
serialized.extend(self.params.i.to_be_bytes());
serialized.extend(self.secret_share.to_repr().as_ref());
for l in 1 ..= self.params.n {
serialized.extend(self.verification_shares[&l].to_bytes().as_ref());
}
serialized
}
pub fn deserialize<R: Read>(reader: &mut R) -> Result<ThresholdCore<C>, DkgError> {
{
let missing = DkgError::InternalError("ThresholdCore serialization is missing its curve");
let different = DkgError::InternalError("deserializing ThresholdCore for another curve");
let mut id_len = [0; 4];
reader.read_exact(&mut id_len).map_err(|_| missing)?;
if u32::try_from(C::ID.len()).unwrap().to_be_bytes() != id_len {
Err(different)?;
}
let mut id = vec![0; C::ID.len()];
reader.read_exact(&mut id).map_err(|_| missing)?;
if id != C::ID {
Err(different)?;
}
}
let (t, n, i) = {
let mut read_u16 = || {
let mut value = [0; 2];
reader
.read_exact(&mut value)
.map_err(|_| DkgError::InternalError("missing participant quantities"))?;
Ok(u16::from_be_bytes(value))
};
(read_u16()?, read_u16()?, read_u16()?)
};
let secret_share =
C::read_F(reader).map_err(|_| DkgError::InternalError("invalid secret share"))?;
let mut verification_shares = HashMap::new();
for l in 1 ..= n {
verification_shares.insert(
l,
<C as Ciphersuite>::read_G(reader)
.map_err(|_| DkgError::InternalError("invalid verification share"))?,
);
}
Ok(ThresholdCore::new(
ThresholdParams::new(t, n, i).map_err(|_| DkgError::InternalError("invalid parameters"))?,
secret_share,
verification_shares,
))
}
}
/// Threshold keys usable for signing.
#[derive(Clone, Debug, Zeroize)]
pub struct ThresholdKeys<C: Ciphersuite> {
/// Core keys.
#[zeroize(skip)]
core: Arc<ThresholdCore<C>>,
/// Offset applied to these keys.
pub(crate) offset: Option<C::F>,
}
// Manually implement Drop due to https://github.com/RustCrypto/utils/issues/786
impl<C: Ciphersuite> Drop for ThresholdKeys<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Ciphersuite> ZeroizeOnDrop for ThresholdKeys<C> {}
/// View of keys passed to algorithm implementations.
#[derive(Clone, Zeroize)]
pub struct ThresholdView<C: Ciphersuite> {
group_key: C::G,
#[zeroize(skip)]
included: Vec<u16>,
secret_share: C::F,
#[zeroize(skip)]
verification_shares: HashMap<u16, C::G>,
}
impl<C: Ciphersuite> Drop for ThresholdView<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Ciphersuite> ZeroizeOnDrop for ThresholdView<C> {}
impl<C: Ciphersuite> ThresholdKeys<C> {
pub fn new(core: ThresholdCore<C>) -> ThresholdKeys<C> {
ThresholdKeys { core: Arc::new(core), offset: None }
}
/// Offset the keys by a given scalar to allow for account and privacy schemes.
/// This offset is ephemeral and will not be included when these keys are serialized.
/// Keys offset multiple times will form a new offset of their sum.
pub fn offset(&self, offset: C::F) -> ThresholdKeys<C> {
let mut res = self.clone();
// Carry any existing offset
// Enables schemes like Monero's subaddresses which have a per-subaddress offset and then a
// one-time-key offset
res.offset = Some(offset + res.offset.unwrap_or_else(C::F::zero));
res
}
/// Returns the current offset in-use for these keys.
pub fn current_offset(&self) -> Option<C::F> {
self.offset
}
pub fn params(&self) -> ThresholdParams {
self.core.params
}
pub fn secret_share(&self) -> C::F {
self.core.secret_share
}
/// Returns the group key with any offset applied.
pub fn group_key(&self) -> C::G {
self.core.group_key + (C::generator() * self.offset.unwrap_or_else(C::F::zero))
}
/// Returns all participants' verification shares without any offsetting.
pub(crate) fn verification_shares(&self) -> HashMap<u16, C::G> {
self.core.verification_shares()
}
pub fn serialize(&self) -> Vec<u8> {
self.core.serialize()
}
pub fn view(&self, included: &[u16]) -> Result<ThresholdView<C>, DkgError> {
if (included.len() < self.params().t.into()) || (usize::from(self.params().n) < included.len())
{
Err(DkgError::InvalidSigningSet)?;
}
let offset_share = self.offset.unwrap_or_else(C::F::zero) *
C::F::from(included.len().try_into().unwrap()).invert().unwrap();
let offset_verification_share = C::generator() * offset_share;
Ok(ThresholdView {
group_key: self.group_key(),
secret_share: (self.secret_share() * lagrange::<C::F>(self.params().i, included)) +
offset_share,
verification_shares: self
.verification_shares()
.iter()
.map(|(l, share)| {
(*l, (*share * lagrange::<C::F>(*l, included)) + offset_verification_share)
})
.collect(),
included: included.to_vec(),
})
}
}
impl<C: Ciphersuite> ThresholdView<C> {
pub fn group_key(&self) -> C::G {
self.group_key
}
pub fn included(&self) -> Vec<u16> {
self.included.clone()
}
pub fn secret_share(&self) -> C::F {
self.secret_share
}
pub fn verification_share(&self, l: u16) -> C::G {
self.verification_shares[&l]
}
}

View file

@ -9,50 +9,24 @@ use rand_core::{RngCore, CryptoRng};
use group::GroupEncoding;
use ciphersuite::Ciphersuite;
use transcript::{Transcript, RecommendedTranscript};
use dleq::DLEqProof;
use crate::{
curve::{CurveError, Curve},
FrostError, FrostCore, FrostKeys, validate_map,
};
use crate::{DkgError, ThresholdCore, ThresholdKeys, validate_map};
/// Promote a set of keys to another Curve definition.
pub trait CurvePromote<C2: Curve> {
/// Promote a set of keys to another Ciphersuite definition.
pub trait CiphersuitePromote<C2: Ciphersuite> {
#[doc(hidden)]
#[allow(non_snake_case)]
fn _bound_C2(_c2: C2) {
panic!()
}
fn promote(self) -> FrostKeys<C2>;
fn promote(self) -> ThresholdKeys<C2>;
}
// Implement promotion to different ciphersuites, panicking if the generators are different
// Commented due to lack of practical benefit. While it'd have interoperability benefits, those
// would have their own DKG process which isn't compatible anyways. This becomes unsafe code
// that'll never be used but we're bound to support
/*
impl<C1: Curve, C2: Curve> CurvePromote<C2> for FrostKeys<C1>
where
C2: Curve<F = C1::F, G = C1::G>,
{
fn promote(self) -> FrostKeys<C2> {
assert_eq!(C::GENERATOR, C2::GENERATOR);
FrostKeys {
core: Arc::new(FrostCore {
params: self.core.params,
secret_share: self.core.secret_share,
group_key: self.core.group_key,
verification_shares: self.core.verification_shares(),
}),
offset: None,
}
}
}
*/
fn transcript<G: GroupEncoding>(key: G, i: u16) -> RecommendedTranscript {
let mut transcript = RecommendedTranscript::new(b"FROST Generator Update");
transcript.append_message(b"group_key", key.to_bytes().as_ref());
@ -62,43 +36,49 @@ fn transcript<G: GroupEncoding>(key: G, i: u16) -> RecommendedTranscript {
/// Proof of valid promotion to another generator.
#[derive(Clone, Copy)]
pub struct GeneratorProof<C: Curve> {
pub struct GeneratorProof<C: Ciphersuite> {
share: C::G,
proof: DLEqProof<C::G>,
}
impl<C: Curve> GeneratorProof<C> {
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
impl<C: Ciphersuite> GeneratorProof<C> {
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.share.to_bytes().as_ref())?;
self.proof.serialize(writer)
}
pub fn deserialize<R: Read>(reader: &mut R) -> Result<GeneratorProof<C>, CurveError> {
pub fn read<R: Read>(reader: &mut R) -> io::Result<GeneratorProof<C>> {
Ok(GeneratorProof {
share: C::read_G(reader)?,
proof: DLEqProof::deserialize(reader).map_err(|_| CurveError::InvalidScalar)?,
share: <C as Ciphersuite>::read_G(reader)?,
proof: DLEqProof::deserialize(reader)?,
})
}
pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
}
/// Promote a set of keys from one curve to another, where the elliptic curve is the same.
/// Since the Curve trait additionally specifies a generator, this provides an O(n) way to update
/// the generator used with keys. The key generation protocol itself is exponential.
pub struct GeneratorPromotion<C1: Curve, C2: Curve> {
base: FrostKeys<C1>,
/// Since the Ciphersuite trait additionally specifies a generator, this provides an O(n) way to
/// update the generator used with keys. The key generation protocol itself is exponential.
pub struct GeneratorPromotion<C1: Ciphersuite, C2: Ciphersuite> {
base: ThresholdKeys<C1>,
proof: GeneratorProof<C1>,
_c2: PhantomData<C2>,
}
impl<C1: Curve, C2: Curve> GeneratorPromotion<C1, C2>
impl<C1: Ciphersuite, C2: Ciphersuite> GeneratorPromotion<C1, C2>
where
C2: Curve<F = C1::F, G = C1::G>,
C2: Ciphersuite<F = C1::F, G = C1::G>,
{
/// Begin promoting keys from one curve to another. Returns a proof this share was properly
/// promoted.
pub fn promote<R: RngCore + CryptoRng>(
rng: &mut R,
base: FrostKeys<C1>,
base: ThresholdKeys<C1>,
) -> (GeneratorPromotion<C1, C2>, GeneratorProof<C1>) {
// Do a DLEqProof for the new generator
let proof = GeneratorProof {
@ -118,7 +98,7 @@ where
pub fn complete(
self,
proofs: &HashMap<u16, GeneratorProof<C1>>,
) -> Result<FrostKeys<C2>, FrostError> {
) -> Result<ThresholdKeys<C2>, DkgError> {
let params = self.base.params();
validate_map(proofs, &(1 ..= params.n).collect::<Vec<_>>(), params.i)?;
@ -135,12 +115,12 @@ where
&[C1::generator(), C2::generator()],
&[original_shares[&i], proof.share],
)
.map_err(|_| FrostError::InvalidProofOfKnowledge(i))?;
.map_err(|_| DkgError::InvalidProofOfKnowledge(i))?;
verification_shares.insert(i, proof.share);
}
Ok(FrostKeys {
core: Arc::new(FrostCore::new(params, self.base.secret_share(), verification_shares)),
Ok(ThresholdKeys {
core: Arc::new(ThresholdCore::new(params, self.base.secret_share(), verification_shares)),
offset: None,
})
}

View file

@ -0,0 +1,81 @@
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng};
use crate::{
Ciphersuite, ThresholdParams, ThresholdCore,
frost::{SecretShare, Commitments, KeyGenMachine},
tests::{THRESHOLD, PARTICIPANTS, clone_without},
};
/// Fully perform the FROST key generation algorithm.
pub fn frost_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<u16, ThresholdCore<C>> {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
for i in 1 ..= PARTICIPANTS {
let machine = KeyGenMachine::<C>::new(
ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(),
"DKG Test Key Generation".to_string(),
);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(
i,
Commitments::read::<&[u8]>(
&mut these_commitments.serialize().as_ref(),
ThresholdParams { t: THRESHOLD, n: PARTICIPANTS, i: 1 },
)
.unwrap(),
);
}
let mut secret_shares = HashMap::new();
let mut machines = machines
.drain()
.map(|(l, machine)| {
let (machine, mut shares) =
machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap();
let shares = shares
.drain()
.map(|(l, share)| {
(l, SecretShare::<C::F>::read::<&[u8]>(&mut share.serialize().as_ref()).unwrap())
})
.collect::<HashMap<_, _>>();
secret_shares.insert(l, shares);
(l, machine)
})
.collect::<HashMap<_, _>>();
let mut verification_shares = None;
let mut group_key = None;
machines
.drain()
.map(|(i, machine)| {
let mut our_secret_shares = HashMap::new();
for (l, shares) in &secret_shares {
if i == *l {
continue;
}
our_secret_shares.insert(*l, shares[&i].clone());
}
let these_keys = machine.complete(rng, our_secret_shares).unwrap();
// Verify the verification_shares are agreed upon
if verification_shares.is_none() {
verification_shares = Some(these_keys.verification_shares());
}
assert_eq!(verification_shares.as_ref().unwrap(), &these_keys.verification_shares());
// Verify the group keys are agreed upon
if group_key.is_none() {
group_key = Some(these_keys.group_key());
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
(i, these_keys)
})
.collect::<HashMap<_, _>>()
}

View file

@ -0,0 +1,69 @@
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng};
use group::ff::Field;
use ciphersuite::Ciphersuite;
use crate::{ThresholdCore, ThresholdKeys, lagrange};
/// FROST generation test.
pub mod frost;
use frost::frost_gen;
// Promotion test.
mod promote;
use promote::test_generator_promotion;
/// Constant amount of participants to use when testing.
pub const PARTICIPANTS: u16 = 5;
/// Constant threshold of participants to use when signing.
pub const THRESHOLD: u16 = ((PARTICIPANTS / 3) * 2) + 1;
/// Clone a map without a specific value.
pub fn clone_without<K: Clone + std::cmp::Eq + std::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
/// Recover the secret from a collection of keys.
pub fn recover_key<C: Ciphersuite>(keys: &HashMap<u16, ThresholdKeys<C>>) -> C::F {
let first = keys.values().next().expect("no keys provided");
assert!(keys.len() >= first.params().t().into(), "not enough keys provided");
let included = keys.keys().cloned().collect::<Vec<_>>();
let group_private = keys.iter().fold(C::F::zero(), |accum, (i, keys)| {
accum + (keys.secret_share() * lagrange::<C::F>(*i, &included))
});
assert_eq!(C::generator() * group_private, first.group_key(), "failed to recover keys");
group_private
}
/// Generate threshold keys for tests.
pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<u16, ThresholdKeys<C>> {
let res = frost_gen(rng)
.drain()
.map(|(i, core)| {
assert_eq!(
&ThresholdCore::<C>::deserialize::<&[u8]>(&mut core.serialize().as_ref()).unwrap(),
&core
);
(i, ThresholdKeys::new(core))
})
.collect();
assert_eq!(C::generator() * recover_key(&res), res[&1].group_key());
res
}
/// Run the test suite on a ciphersuite.
pub fn test_ciphersuite<R: RngCore + CryptoRng, C: Ciphersuite>(rng: &mut R) {
key_gen::<_, C>(rng);
test_generator_promotion::<_, C>(rng);
}

View file

@ -0,0 +1,60 @@
use std::{marker::PhantomData, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use group::Group;
use ciphersuite::Ciphersuite;
use crate::{
promote::{GeneratorPromotion, GeneratorProof},
tests::{clone_without, key_gen, recover_key},
};
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
struct AltGenerator<C: Ciphersuite> {
_curve: PhantomData<C>,
}
impl<C: Ciphersuite> Ciphersuite for AltGenerator<C> {
type F = C::F;
type G = C::G;
type H = C::H;
const ID: &'static [u8] = b"Alternate Ciphersuite";
fn generator() -> Self::G {
C::G::generator() * <C as Ciphersuite>::hash_to_F(b"DKG Promotion Test", b"generator")
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
<C as Ciphersuite>::hash_to_F(dst, data)
}
}
// Test promotion of threshold keys to another generator
pub(crate) fn test_generator_promotion<R: RngCore + CryptoRng, C: Ciphersuite>(rng: &mut R) {
let keys = key_gen::<_, C>(&mut *rng);
let mut promotions = HashMap::new();
let mut proofs = HashMap::new();
for (i, keys) in &keys {
let (promotion, proof) =
GeneratorPromotion::<_, AltGenerator<C>>::promote(&mut *rng, keys.clone());
promotions.insert(*i, promotion);
proofs.insert(*i, GeneratorProof::<C>::read::<&[u8]>(&mut proof.serialize().as_ref()).unwrap());
}
let new_group_key = AltGenerator::<C>::generator() * recover_key(&keys);
for (i, promoting) in promotions.drain() {
let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap();
assert_eq!(keys[&i].params(), promoted.params());
assert_eq!(keys[&i].secret_share(), promoted.secret_share());
assert_eq!(new_group_key, promoted.group_key());
for (l, verification_share) in promoted.verification_shares() {
assert_eq!(AltGenerator::<C>::generator() * keys[&l].secret_share(), verification_share);
}
}
}

View file

@ -1,6 +1,6 @@
[package]
name = "dleq"
version = "0.1.1"
version = "0.1.2"
description = "Implementation of single and cross-curve Discrete Log Equality proofs"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq"

View file

@ -61,7 +61,7 @@ pub enum DLEqError {
InvalidProof,
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct DLEqProof<G: PrimeGroup> {
c: G::Scalar,
s: G::Scalar,

View file

@ -1,6 +1,12 @@
#![no_std]
mod backend;
pub mod scalar;
pub use scalar::Scalar;
pub mod field;
pub use field::FieldElement;
pub mod point;
pub use point::Point;

View file

@ -1,6 +1,6 @@
[package]
name = "modular-frost"
version = "0.2.4"
version = "0.3.0"
description = "Modular implementation of FROST over ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/frost"
@ -22,40 +22,37 @@ subtle = "2"
hex = "0.4"
sha2 = { version = "0.10", optional = true }
sha3 = { version = "0.10", optional = true }
digest = "0.10"
hkdf = "0.12"
chacha20 = { version = "0.9", features = ["zeroize"] }
ff = "0.12"
group = "0.12"
dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2", optional = true }
elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true }
p256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true }
k256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true }
minimal-ed448 = { path = "../ed448", version = "0.1", optional = true }
ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["std"] }
transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"], version = "^0.1.3" }
multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] }
dleq = { path = "../dleq", version = "0.1", features = ["serialize"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "0.1.0" }
dleq = { path = "../dleq", version = "^0.1.2", features = ["serialize"] }
dkg = { path = "../dkg", version = "0.1.0" }
[dev-dependencies]
sha2 = "0.10"
dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2" }
serde_json = "1"
[features]
dalek = ["sha2", "dalek-ff-group"]
ed25519 = ["dalek"]
ristretto = ["dalek"]
ed25519 = ["dalek-ff-group", "ciphersuite/ed25519"]
ristretto = ["dalek-ff-group", "ciphersuite/ristretto"]
kp256 = ["sha2", "elliptic-curve"]
p256 = ["kp256", "dep:p256"]
secp256k1 = ["kp256", "k256"]
secp256k1 = ["ciphersuite/secp256k1"]
p256 = ["ciphersuite/p256"]
ed448 = ["sha3", "minimal-ed448"]
ed448 = ["minimal-ed448", "ciphersuite/ed448"]
tests = []
tests = ["dkg/tests"]

View file

@ -10,4 +10,4 @@ integrating with existing systems.
This library offers ciphersuites compatible with the
[IETF draft](https://github.com/cfrg/draft-irtf-cfrg-frost). Currently, version
10 is supported.
11 is supported.

View file

@ -1,41 +1,61 @@
use core::{marker::PhantomData, fmt::Debug};
use std::io::Read;
use std::io::{self, Read, Write};
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use crate::{Curve, FrostError, FrostView, schnorr};
use crate::{Curve, FrostError, ThresholdView};
pub use schnorr::SchnorrSignature;
/// Write an addendum to a writer.
pub trait WriteAddendum {
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()>;
}
impl WriteAddendum for () {
fn write<W: Write>(&self, _: &mut W) -> io::Result<()> {
Ok(())
}
}
/// Trait alias for the requirements to be used as an addendum.
pub trait Addendum: Clone + PartialEq + Debug + WriteAddendum {}
impl<A: Clone + PartialEq + Debug + WriteAddendum> Addendum for A {}
/// Algorithm trait usable by the FROST signing machine to produce signatures..
pub trait Algorithm<C: Curve>: Clone {
/// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible
/// transcript included in this crate.
type Transcript: Transcript + Clone + Debug;
type Transcript: Clone + Debug + Transcript;
/// Serializable addendum, used in algorithms requiring more data than just the nonces.
type Addendum: Addendum;
/// The resulting type of the signatures this algorithm will produce.
type Signature: Clone + PartialEq + Debug;
/// Obtain a mutable borrow of the underlying transcript.
fn transcript(&mut self) -> &mut Self::Transcript;
/// Obtain the list of nonces to generate, as specified by the basepoints to create commitments.
/// against per-nonce. These are not committed to by FROST on the underlying transcript.
/// Obtain the list of nonces to generate, as specified by the generators to create commitments
/// against per-nonce
fn nonces(&self) -> Vec<Vec<C::G>>;
/// Generate an addendum to FROST"s preprocessing stage.
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
params: &FrostView<C>,
) -> Vec<u8>;
params: &ThresholdView<C>,
) -> Self::Addendum;
/// Proccess the addendum for the specified participant. Guaranteed to be ordered.
fn process_addendum<Re: Read>(
/// Read an addendum from a reader.
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<Self::Addendum>;
/// Proccess the addendum for the specified participant. Guaranteed to be called in order.
fn process_addendum(
&mut self,
params: &FrostView<C>,
params: &ThresholdView<C>,
l: u16,
reader: &mut Re,
reader: Self::Addendum,
) -> Result<(), FrostError>;
/// Sign a share with the given secret/nonce.
@ -44,7 +64,7 @@ pub trait Algorithm<C: Curve>: Clone {
/// The nonce will already have been processed into the combined form d + (e * p).
fn sign_share(
&mut self,
params: &FrostView<C>,
params: &ThresholdView<C>,
nonce_sums: &[Vec<C::G>],
nonces: &[C::F],
msg: &[u8],
@ -116,6 +136,7 @@ impl<C: Curve, H: Hram<C>> Schnorr<C, H> {
impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
type Transcript = IetfTranscript;
type Addendum = ();
type Signature = SchnorrSignature<C>;
fn transcript(&mut self) -> &mut Self::Transcript {
@ -126,51 +147,36 @@ impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
vec![vec![C::generator()]]
}
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
_: &mut R,
_: &FrostView<C>,
) -> Vec<u8> {
vec![]
fn preprocess_addendum<R: RngCore + CryptoRng>(&mut self, _: &mut R, _: &ThresholdView<C>) {}
fn read_addendum<R: Read>(&self, _: &mut R) -> io::Result<Self::Addendum> {
Ok(())
}
fn process_addendum<Re: Read>(
&mut self,
_: &FrostView<C>,
_: u16,
_: &mut Re,
) -> Result<(), FrostError> {
fn process_addendum(&mut self, _: &ThresholdView<C>, _: u16, _: ()) -> Result<(), FrostError> {
Ok(())
}
fn sign_share(
&mut self,
params: &FrostView<C>,
params: &ThresholdView<C>,
nonce_sums: &[Vec<C::G>],
nonces: &[C::F],
msg: &[u8],
) -> C::F {
let c = H::hram(&nonce_sums[0][0], &params.group_key(), msg);
self.c = Some(c);
schnorr::sign::<C>(params.secret_share(), nonces[0], c).s
SchnorrSignature::<C>::sign(params.secret_share(), nonces[0], c).s
}
#[must_use]
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature> {
let sig = SchnorrSignature { R: nonces[0][0], s: sum };
if schnorr::verify::<C>(group_key, self.c.unwrap(), &sig) {
Some(sig)
} else {
None
}
Some(sig).filter(|sig| sig.verify(group_key, self.c.unwrap()))
}
#[must_use]
fn verify_share(&self, verification_share: C::G, nonces: &[Vec<C::G>], share: C::F) -> bool {
schnorr::verify::<C>(
verification_share,
self.c.unwrap(),
&SchnorrSignature { R: nonces[0][0], s: share },
)
SchnorrSignature::<C> { R: nonces[0][0], s: share }.verify(verification_share, self.c.unwrap())
}
}

View file

@ -1,10 +1,9 @@
use zeroize::Zeroize;
use digest::Digest;
use sha2::{Digest, Sha512};
use group::Group;
use dalek_ff_group::Scalar;
use ciphersuite::Ciphersuite;
use crate::{curve::Curve, algorithm::Hram};
macro_rules! dalek_curve {
@ -13,49 +12,22 @@ macro_rules! dalek_curve {
$Curve: ident,
$Hram: ident,
$Point: ident,
$ID: literal,
$CONTEXT: literal,
$chal: literal,
$chal: literal
) => {
use dalek_ff_group::$Point;
#[cfg_attr(docsrs, doc(cfg(feature = $feature)))]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Curve;
impl $Curve {
fn hash(dst: &[u8], data: &[u8]) -> Sha512 {
Sha512::new().chain_update(&[$CONTEXT.as_ref(), dst, data].concat())
}
}
pub use ciphersuite::$Curve;
impl Curve for $Curve {
type F = Scalar;
type G = $Point;
const ID: &'static [u8] = $ID;
fn generator() -> Self::G {
$Point::generator()
}
fn hash_to_vec(dst: &[u8], data: &[u8]) -> Vec<u8> {
Self::hash(dst, data).finalize().to_vec()
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
Scalar::from_hash(Self::hash(dst, data))
}
const CONTEXT: &'static [u8] = $CONTEXT;
}
#[cfg_attr(docsrs, doc(cfg(feature = $feature)))]
#[derive(Copy, Clone)]
pub struct $Hram;
impl Hram<$Curve> for $Hram {
#[allow(non_snake_case)]
fn hram(R: &$Point, A: &$Point, m: &[u8]) -> Scalar {
let mut hash = Sha512::new();
fn hram(R: &<$Curve as Ciphersuite>::G, A: &<$Curve as Ciphersuite>::G, m: &[u8]) -> Scalar {
let mut hash = <$Curve as Ciphersuite>::H::new();
if $chal.len() != 0 {
hash.update(&[$CONTEXT.as_ref(), $chal].concat());
}
@ -67,24 +39,8 @@ macro_rules! dalek_curve {
};
}
#[cfg(any(test, feature = "ristretto"))]
dalek_curve!(
"ristretto",
Ristretto,
IetfRistrettoHram,
RistrettoPoint,
b"ristretto",
b"FROST-RISTRETTO255-SHA512-v11",
b"chal",
);
#[cfg(feature = "ristretto")]
dalek_curve!("ristretto", Ristretto, IetfRistrettoHram, b"FROST-RISTRETTO255-SHA512-v11", b"chal");
#[cfg(feature = "ed25519")]
dalek_curve!(
"ed25519",
Ed25519,
IetfEd25519Hram,
EdwardsPoint,
b"edwards25519",
b"FROST-ED25519-SHA512-v11",
b"",
);
dalek_curve!("ed25519", Ed25519, IetfEd25519Hram, b"FROST-ED25519-SHA512-v11", b"");

View file

@ -1,41 +1,17 @@
use zeroize::Zeroize;
use digest::Digest;
use sha3::{digest::ExtendableOutput, Shake256};
use group::GroupEncoding;
use group::{Group, GroupEncoding};
use minimal_ed448::{scalar::Scalar, point::Point};
use minimal_ed448::{Scalar, Point};
pub use ciphersuite::{Shake256_114, Ed448};
use crate::{curve::Curve, algorithm::Hram};
const CONTEXT: &[u8] = b"FROST-ED448-SHAKE256-v11";
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ed448;
impl Ed448 {
fn hash(prefix: &[u8], context: &[u8], dst: &[u8], data: &[u8]) -> [u8; 114] {
let mut res = [0; 114];
Shake256::digest_xof(&[prefix, context, dst, data].concat(), &mut res);
res
}
}
impl Curve for Ed448 {
type F = Scalar;
type G = Point;
const ID: &'static [u8] = b"ed448";
fn generator() -> Self::G {
Point::generator()
}
fn hash_to_vec(dst: &[u8], data: &[u8]) -> Vec<u8> {
Self::hash(b"", CONTEXT, dst, data).as_ref().to_vec()
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
Scalar::wide_reduce(Self::hash(b"", CONTEXT, dst, data))
}
const CONTEXT: &'static [u8] = CONTEXT;
}
#[derive(Copy, Clone)]
@ -43,12 +19,19 @@ pub struct Ietf8032Ed448Hram;
impl Ietf8032Ed448Hram {
#[allow(non_snake_case)]
pub fn hram(context: &[u8], R: &Point, A: &Point, m: &[u8]) -> Scalar {
Scalar::wide_reduce(Ed448::hash(
&[b"SigEd448".as_ref(), &[0, u8::try_from(context.len()).unwrap()]].concat(),
context,
b"",
&[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat(),
))
Scalar::wide_reduce(
Shake256_114::digest(
&[
&[b"SigEd448".as_ref(), &[0, u8::try_from(context.len()).unwrap()]].concat(),
context,
&[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat(),
]
.concat(),
)
.as_ref()
.try_into()
.unwrap(),
)
}
}

View file

@ -1,17 +1,6 @@
use zeroize::Zeroize;
use group::GroupEncoding;
use sha2::{Digest, Sha256};
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
use elliptic_curve::{
generic_array::GenericArray,
bigint::{Encoding, U384},
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
};
use ciphersuite::Ciphersuite;
use crate::{curve::Curve, algorithm::Hram};
@ -19,87 +8,37 @@ macro_rules! kp_curve {
(
$feature: literal,
$lib: ident,
$Curve: ident,
$Hram: ident,
$ID: literal,
$CONTEXT: literal
) => {
#[cfg_attr(docsrs, doc(cfg(feature = $feature)))]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Curve;
impl $Curve {
fn hash(dst: &[u8], data: &[u8]) -> Sha256 {
Sha256::new().chain_update(&[$CONTEXT.as_ref(), dst, data].concat())
}
}
pub use ciphersuite::$Curve;
impl Curve for $Curve {
type F = $lib::Scalar;
type G = $lib::ProjectivePoint;
const ID: &'static [u8] = $ID;
fn generator() -> Self::G {
$lib::ProjectivePoint::GENERATOR
}
fn hash_to_vec(dst: &[u8], data: &[u8]) -> Vec<u8> {
Self::hash(dst, data).finalize().to_vec()
}
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
let mut dst = &[$CONTEXT, dst].concat();
let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-".as_ref(), dst].concat()).to_vec();
if dst.len() > 255 {
dst = &oversize;
}
// While one of these two libraries does support directly hashing to the Scalar field, the
// other doesn't. While that's probably an oversight, this is a universally working method
let mut modulus = vec![0; 16];
modulus.extend((Self::F::zero() - Self::F::one()).to_bytes());
let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE);
let mut unreduced = U384::from_be_bytes({
let mut bytes = [0; 48];
ExpandMsgXmd::<Sha256>::expand_message(&[msg], dst, 48).unwrap().fill_bytes(&mut bytes);
bytes
})
.reduce(&modulus)
.unwrap()
.to_be_bytes();
let mut array = *GenericArray::from_slice(&unreduced[16 ..]);
let res = $lib::Scalar::from_repr(array).unwrap();
unreduced.zeroize();
array.zeroize();
res
}
const CONTEXT: &'static [u8] = $CONTEXT;
}
#[cfg_attr(docsrs, doc(cfg(feature = $feature)))]
#[derive(Clone)]
pub struct $Hram;
impl Hram<$Curve> for $Hram {
#[allow(non_snake_case)]
fn hram(R: &$lib::ProjectivePoint, A: &$lib::ProjectivePoint, m: &[u8]) -> $lib::Scalar {
$Curve::hash_to_F(b"chal", &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat())
fn hram(
R: &<$Curve as Ciphersuite>::G,
A: &<$Curve as Ciphersuite>::G,
m: &[u8],
) -> <$Curve as Ciphersuite>::F {
<$Curve as Curve>::hash_to_F(
b"chal",
&[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat(),
)
}
}
};
}
#[cfg(feature = "p256")]
kp_curve!("p256", p256, P256, IetfP256Hram, b"P-256", b"FROST-P256-SHA256-v11");
kp_curve!("p256", P256, IetfP256Hram, b"FROST-P256-SHA256-v11");
#[cfg(feature = "secp256k1")]
kp_curve!(
"secp256k1",
k256,
Secp256k1,
IetfSecp256k1Hram,
b"secp256k1",
b"FROST-secp256k1-SHA256-v11"
);
kp_curve!("secp256k1", Secp256k1, IetfSecp256k1Hram, b"FROST-secp256k1-SHA256-v11");

View file

@ -1,24 +1,27 @@
use core::fmt::Debug;
use std::io::Read;
use thiserror::Error;
use std::io::{self, Read};
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use subtle::ConstantTimeEq;
use ff::{Field, PrimeField, PrimeFieldBits};
use group::{Group, GroupOps, GroupEncoding, prime::PrimeGroup};
use digest::Digest;
#[cfg(any(test, feature = "dalek"))]
use group::{
ff::{Field, PrimeField},
Group,
};
pub use ciphersuite::Ciphersuite;
#[cfg(any(feature = "ristretto", feature = "ed25519"))]
mod dalek;
#[cfg(any(test, feature = "ristretto"))]
#[cfg(feature = "ristretto")]
pub use dalek::{Ristretto, IetfRistrettoHram};
#[cfg(feature = "ed25519")]
pub use dalek::{Ed25519, IetfEd25519Hram};
#[cfg(feature = "kp256")]
#[cfg(any(feature = "secp256k1", feature = "p256"))]
mod kp256;
#[cfg(feature = "secp256k1")]
pub use kp256::{Secp256k1, IetfSecp256k1Hram};
@ -30,42 +33,23 @@ mod ed448;
#[cfg(feature = "ed448")]
pub use ed448::{Ed448, Ietf8032Ed448Hram, IetfEd448Hram};
/// Set of errors for curve-related operations, namely encoding and decoding.
#[derive(Clone, Error, Debug)]
pub enum CurveError {
#[error("invalid scalar")]
InvalidScalar,
#[error("invalid point")]
InvalidPoint,
}
/// Unified trait to manage an elliptic curve.
// This should be moved into its own crate if the need for generic cryptography over ff/group
// continues, which is the exact reason ff/group exists (to provide a generic interface)
// elliptic-curve exists, yet it doesn't really serve the same role, nor does it use &[u8]/Vec<u8>
// It uses GenericArray which will hopefully be deprecated as Rust evolves and doesn't offer enough
// advantages in the modern day to be worth the hassle -- Kayaba
pub trait Curve: Clone + Copy + PartialEq + Eq + Debug + Zeroize {
/// Scalar field element type.
// This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses
type F: PrimeField + PrimeFieldBits + Zeroize;
/// Group element type.
type G: Group<Scalar = Self::F> + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq;
/// ID for this curve.
const ID: &'static [u8];
/// Generator for the group.
// While group does provide this in its API, privacy coins may want to use a custom basepoint
fn generator() -> Self::G;
/// FROST Ciphersuite, except for the signing algorithm specific H2, making this solely the curve,
/// its associated hash function, and the functions derived from it.
pub trait Curve: Ciphersuite {
/// Context string for this curve.
const CONTEXT: &'static [u8];
/// Hash the given dst and data to a byte vector. Used to instantiate H4 and H5.
fn hash_to_vec(dst: &[u8], data: &[u8]) -> Vec<u8>;
fn hash_to_vec(dst: &[u8], data: &[u8]) -> Vec<u8> {
Self::H::digest(&[Self::CONTEXT, dst, data].concat()).as_ref().to_vec()
}
/// Field element from hash. Used during key gen and by other crates under Serai as a general
/// utility. Used to instantiate H1 and H3.
#[allow(non_snake_case)]
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F;
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
<Self as Ciphersuite>::hash_to_F(&[Self::CONTEXT, dst].concat(), msg)
}
/// Hash the message for the binding factor. H4 from the IETF draft.
fn hash_msg(msg: &[u8]) -> Vec<u8> {
@ -79,17 +63,7 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug + Zeroize {
/// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft.
fn hash_binding_factor(binding: &[u8]) -> Self::F {
Self::hash_to_F(b"rho", binding)
}
#[allow(non_snake_case)]
fn random_F<R: RngCore + CryptoRng>(rng: &mut R) -> Self::F {
let mut res;
while {
res = Self::F::random(&mut *rng);
res.ct_eq(&Self::F::zero()).into()
} {}
res
<Self as Curve>::hash_to_F(b"rho", binding)
}
/// Securely generate a random nonce. H3 from the IETF draft.
@ -103,7 +77,7 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug + Zeroize {
let mut res;
while {
seed.extend(repr.as_ref());
res = Self::hash_to_F(b"nonce", &seed);
res = <Self as Curve>::hash_to_F(b"nonce", &seed);
res.ct_eq(&Self::F::zero()).into()
} {
rng.fill_bytes(&mut seed);
@ -117,40 +91,11 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug + Zeroize {
}
#[allow(non_snake_case)]
fn F_len() -> usize {
<Self::F as PrimeField>::Repr::default().as_ref().len()
}
#[allow(non_snake_case)]
fn G_len() -> usize {
<Self::G as GroupEncoding>::Repr::default().as_ref().len()
}
#[allow(non_snake_case)]
fn read_F<R: Read>(r: &mut R) -> Result<Self::F, CurveError> {
let mut encoding = <Self::F as PrimeField>::Repr::default();
r.read_exact(encoding.as_mut()).map_err(|_| CurveError::InvalidScalar)?;
// ff mandates this is canonical
let res =
Option::<Self::F>::from(Self::F::from_repr(encoding)).ok_or(CurveError::InvalidScalar);
for b in encoding.as_mut() {
b.zeroize();
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
let res = <Self as Ciphersuite>::read_G(reader)?;
if res.is_identity().into() {
Err(io::Error::new(io::ErrorKind::Other, "identity point"))?;
}
res
}
#[allow(non_snake_case)]
fn read_G<R: Read>(r: &mut R) -> Result<Self::G, CurveError> {
let mut encoding = <Self::G as GroupEncoding>::Repr::default();
r.read_exact(encoding.as_mut()).map_err(|_| CurveError::InvalidPoint)?;
let point =
Option::<Self::G>::from(Self::G::from_bytes(&encoding)).ok_or(CurveError::InvalidPoint)?;
// Ban the identity, per the FROST spec, and non-canonical points
if (point.is_identity().into()) || (point.to_bytes().as_ref() != encoding.as_ref()) {
Err(CurveError::InvalidPoint)?;
}
Ok(point)
Ok(res)
}
}

View file

@ -1,357 +0,0 @@
use std::{
marker::PhantomData,
io::{Read, Cursor},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop};
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
use multiexp::{multiexp_vartime, BatchVerifier};
use crate::{
curve::Curve,
FrostError, FrostParams, FrostCore,
schnorr::{self, SchnorrSignature},
validate_map,
};
#[allow(non_snake_case)]
fn challenge<C: Curve>(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F {
const DST: &[u8] = b"FROST Schnorr Proof of Knowledge";
// Uses hash_msg to get a fixed size value out of the context string
let mut transcript = C::hash_msg(context.as_bytes());
transcript.extend(l.to_be_bytes());
transcript.extend(R);
transcript.extend(Am);
C::hash_to_F(DST, &transcript)
}
// Implements steps 1 through 3 of round 1 of FROST DKG. Returns the coefficients, commitments, and
// the serialized commitments to be broadcasted over an authenticated channel to all parties
fn generate_key_r1<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &FrostParams,
context: &str,
) -> (Vec<C::F>, Vec<C::G>, Vec<u8>) {
let t = usize::from(params.t);
let mut coefficients = Vec::with_capacity(t);
let mut commitments = Vec::with_capacity(t);
let mut serialized = Vec::with_capacity((C::G_len() * t) + C::G_len() + C::F_len());
for i in 0 .. t {
// Step 1: Generate t random values to form a polynomial with
coefficients.push(C::random_F(&mut *rng));
// Step 3: Generate public commitments
commitments.push(C::generator() * coefficients[i]);
// Serialize them for publication
serialized.extend(commitments[i].to_bytes().as_ref());
}
// Step 2: Provide a proof of knowledge
let mut r = C::random_F(rng);
serialized.extend(
schnorr::sign::<C>(
coefficients[0],
// This could be deterministic as the PoK is a singleton never opened up to cooperative
// discussion
// There's no reason to spend the time and effort to make this deterministic besides a
// general obsession with canonicity and determinism though
r,
challenge::<C>(context, params.i(), (C::generator() * r).to_bytes().as_ref(), &serialized),
)
.serialize(),
);
r.zeroize();
// Step 4: Broadcast
(coefficients, commitments, serialized)
}
// Verify the received data from the first round of key generation
fn verify_r1<Re: Read, R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &FrostParams,
context: &str,
our_commitments: Vec<C::G>,
mut serialized: HashMap<u16, Re>,
) -> Result<HashMap<u16, Vec<C::G>>, FrostError> {
validate_map(&serialized, &(1 ..= params.n()).collect::<Vec<_>>(), params.i())?;
let mut commitments = HashMap::new();
commitments.insert(params.i, our_commitments);
let mut signatures = Vec::with_capacity(usize::from(params.n() - 1));
for l in 1 ..= params.n() {
if l == params.i {
continue;
}
let invalid = FrostError::InvalidCommitment(l);
// Read the entire list of commitments as the key we're providing a PoK for (A) and the message
#[allow(non_snake_case)]
let mut Am = vec![0; usize::from(params.t()) * C::G_len()];
serialized.get_mut(&l).unwrap().read_exact(&mut Am).map_err(|_| invalid)?;
let mut these_commitments = vec![];
let mut cursor = Cursor::new(&Am);
for _ in 0 .. usize::from(params.t()) {
these_commitments.push(C::read_G(&mut cursor).map_err(|_| invalid)?);
}
// Don't bother validating our own proof of knowledge
if l != params.i() {
let cursor = serialized.get_mut(&l).unwrap();
#[allow(non_snake_case)]
let R = C::read_G(cursor).map_err(|_| FrostError::InvalidProofOfKnowledge(l))?;
let s = C::read_F(cursor).map_err(|_| FrostError::InvalidProofOfKnowledge(l))?;
// Step 5: Validate each proof of knowledge
// This is solely the prep step for the latter batch verification
signatures.push((
l,
these_commitments[0],
challenge::<C>(context, l, R.to_bytes().as_ref(), &Am),
SchnorrSignature::<C> { R, s },
));
}
commitments.insert(l, these_commitments);
}
schnorr::batch_verify(rng, &signatures).map_err(FrostError::InvalidProofOfKnowledge)?;
Ok(commitments)
}
fn polynomial<F: PrimeField>(coefficients: &[F], l: u16) -> F {
let l = F::from(u64::from(l));
let mut share = F::zero();
for (idx, coefficient) in coefficients.iter().rev().enumerate() {
share += coefficient;
if idx != (coefficients.len() - 1) {
share *= l;
}
}
share
}
// Implements round 1, step 5 and round 2, step 1 of FROST key generation
// Returns our secret share part, commitments for the next step, and a vector for each
// counterparty to receive
fn generate_key_r2<Re: Read, R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &FrostParams,
context: &str,
coefficients: &mut Vec<C::F>,
our_commitments: Vec<C::G>,
commitments: HashMap<u16, Re>,
) -> Result<(C::F, HashMap<u16, Vec<C::G>>, HashMap<u16, Vec<u8>>), FrostError> {
let commitments = verify_r1::<_, _, C>(rng, params, context, our_commitments, commitments)?;
// Step 1: Generate secret shares for all other parties
let mut res = HashMap::new();
for l in 1 ..= params.n() {
// Don't insert our own shares to the byte buffer which is meant to be sent around
// An app developer could accidentally send it. Best to keep this black boxed
if l == params.i() {
continue;
}
res.insert(l, polynomial(coefficients, l).to_repr().as_ref().to_vec());
}
// Calculate our own share
let share = polynomial(coefficients, params.i());
coefficients.zeroize();
Ok((share, commitments, res))
}
/// Finishes round 2 and returns both the secret share and the serialized public key.
/// This key MUST NOT be considered usable until all parties confirm they have completed the
/// protocol without issue.
fn complete_r2<Re: Read, R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: FrostParams,
mut secret_share: C::F,
commitments: &mut HashMap<u16, Vec<C::G>>,
mut serialized: HashMap<u16, Re>,
) -> Result<FrostCore<C>, FrostError> {
validate_map(&serialized, &(1 ..= params.n()).collect::<Vec<_>>(), params.i())?;
// Step 2. Verify each share
let mut shares = HashMap::new();
// TODO: Clear serialized
for (l, share) in serialized.iter_mut() {
shares.insert(*l, C::read_F(share).map_err(|_| FrostError::InvalidShare(*l))?);
}
// Calculate the exponent for a given participant and apply it to a series of commitments
// Initially used with the actual commitments to verify the secret share, later used with stripes
// to generate the verification shares
let exponential = |i: u16, values: &[_]| {
let i = C::F::from(i.into());
let mut res = Vec::with_capacity(params.t().into());
(0 .. usize::from(params.t())).into_iter().fold(C::F::one(), |exp, l| {
res.push((exp, values[l]));
exp * i
});
res
};
let mut batch = BatchVerifier::new(shares.len());
for (l, share) in shares.iter_mut() {
if *l == params.i() {
continue;
}
secret_share += *share;
// This can be insecurely linearized from n * t to just n using the below sums for a given
// stripe. Doing so uses naive addition which is subject to malleability. The only way to
// ensure that malleability isn't present is to use this n * t algorithm, which runs
// per sender and not as an aggregate of all senders, which also enables blame
let mut values = exponential(params.i, &commitments[l]);
values.push((-*share, C::generator()));
share.zeroize();
batch.queue(rng, *l, values);
}
batch.verify_with_vartime_blame().map_err(FrostError::InvalidCommitment)?;
// Stripe commitments per t and sum them in advance. Calculating verification shares relies on
// these sums so preprocessing them is a massive speedup
// If these weren't just sums, yet the tables used in multiexp, this would be further optimized
// As of right now, each multiexp will regenerate them
let mut stripes = Vec::with_capacity(usize::from(params.t()));
for t in 0 .. usize::from(params.t()) {
stripes.push(commitments.values().map(|commitments| commitments[t]).sum());
}
// Calculate each user's verification share
let mut verification_shares = HashMap::new();
for i in 1 ..= params.n() {
verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes)));
}
// Removing this check would enable optimizing the above from t + (n * t) to t + ((n - 1) * t)
debug_assert_eq!(C::generator() * secret_share, verification_shares[&params.i()]);
Ok(FrostCore { params, secret_share, group_key: stripes[0], verification_shares })
}
/// State machine to begin the key generation protocol.
pub struct KeyGenMachine<C: Curve> {
params: FrostParams,
context: String,
_curve: PhantomData<C>,
}
/// Advancement of the key generation state machine.
#[derive(Zeroize)]
pub struct SecretShareMachine<C: Curve> {
#[zeroize(skip)]
params: FrostParams,
context: String,
coefficients: Vec<C::F>,
#[zeroize(skip)]
our_commitments: Vec<C::G>,
}
impl<C: Curve> Drop for SecretShareMachine<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Curve> ZeroizeOnDrop for SecretShareMachine<C> {}
/// Final step of the key generation protocol.
#[derive(Zeroize)]
pub struct KeyMachine<C: Curve> {
#[zeroize(skip)]
params: FrostParams,
secret: C::F,
#[zeroize(skip)]
commitments: HashMap<u16, Vec<C::G>>,
}
impl<C: Curve> Drop for KeyMachine<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Curve> ZeroizeOnDrop for KeyMachine<C> {}
impl<C: Curve> KeyGenMachine<C> {
/// Creates a new machine to generate a key for the specified curve in the specified multisig.
// The context string should be unique among multisigs.
pub fn new(params: FrostParams, context: String) -> KeyGenMachine<C> {
KeyGenMachine { params, context, _curve: PhantomData }
}
/// Start generating a key according to the FROST DKG spec.
/// Returns a serialized list of commitments to be sent to all parties over an authenticated
/// channel. If any party submits multiple sets of commitments, they MUST be treated as
/// malicious.
pub fn generate_coefficients<R: RngCore + CryptoRng>(
self,
rng: &mut R,
) -> (SecretShareMachine<C>, Vec<u8>) {
let (coefficients, our_commitments, serialized) =
generate_key_r1::<_, C>(rng, &self.params, &self.context);
(
SecretShareMachine {
params: self.params,
context: self.context,
coefficients,
our_commitments,
},
serialized,
)
}
}
impl<C: Curve> SecretShareMachine<C> {
/// Continue generating a key.
/// Takes in everyone else's commitments. Returns a HashMap of byte vectors representing secret
/// shares. These MUST be encrypted and only then sent to their respective participants.
pub fn generate_secret_shares<Re: Read, R: RngCore + CryptoRng>(
mut self,
rng: &mut R,
commitments: HashMap<u16, Re>,
) -> Result<(KeyMachine<C>, HashMap<u16, Vec<u8>>), FrostError> {
let (secret, commitments, shares) = generate_key_r2::<_, _, C>(
rng,
&self.params,
&self.context,
&mut self.coefficients,
self.our_commitments.clone(),
commitments,
)?;
Ok((KeyMachine { params: self.params, secret, commitments }, shares))
}
}
impl<C: Curve> KeyMachine<C> {
/// Complete key generation.
/// Takes in everyone elses' shares submitted to us. Returns a FrostCore object representing the
/// generated keys. Successful protocol completion MUST be confirmed by all parties before these
/// keys may be safely used.
pub fn complete<Re: Read, R: RngCore + CryptoRng>(
mut self,
rng: &mut R,
shares: HashMap<u16, Re>,
) -> Result<FrostCore<C>, FrostError> {
complete_r2(rng, self.params, self.secret, &mut self.commitments, shares)
}
}

View file

@ -11,33 +11,23 @@
//!
//! This library offers ciphersuites compatible with the
//! [IETF draft](https://github.com/cfrg/draft-irtf-cfrg-frost). Currently, version
//! 10 is supported.
//! 11 is supported.
use core::fmt::{self, Debug};
use std::{io::Read, sync::Arc, collections::HashMap};
use core::fmt::Debug;
use std::collections::HashMap;
use thiserror::Error;
use zeroize::{Zeroize, ZeroizeOnDrop};
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
mod schnorr;
/// Distributed key generation protocol.
pub use dkg::{self, ThresholdParams, ThresholdCore, ThresholdKeys, ThresholdView};
/// Curve trait and provided curves/HRAMs, forming various ciphersuites.
pub mod curve;
use curve::Curve;
/// Distributed key generation protocol.
pub mod key_gen;
/// Promote keys between curves.
pub mod promote;
/// Algorithm for the signing process.
pub mod algorithm;
mod nonce;
/// Threshold signing protocol.
pub mod sign;
@ -45,7 +35,7 @@ pub mod sign;
#[cfg(any(test, feature = "tests"))]
pub mod tests;
// Validate a map of serialized values to have the expected included participants
// Validate a map of values to have the expected included participants
pub(crate) fn validate_map<T>(
map: &HashMap<u16, T>,
included: &[u16],
@ -71,59 +61,11 @@ pub(crate) fn validate_map<T>(
Ok(())
}
/// Parameters for a multisig.
// These fields can not be made public as they should be static
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct FrostParams {
/// Participants needed to sign on behalf of the group.
t: u16,
/// Amount of participants.
n: u16,
/// Index of the participant being acted for.
i: u16,
}
impl FrostParams {
pub fn new(t: u16, n: u16, i: u16) -> Result<FrostParams, FrostError> {
if (t == 0) || (n == 0) {
Err(FrostError::ZeroParameter(t, n))?;
}
// When t == n, this shouldn't be used (MuSig2 and other variants of MuSig exist for a reason),
// but it's not invalid to do so
if t > n {
Err(FrostError::InvalidRequiredQuantity(t, n))?;
}
if (i == 0) || (i > n) {
Err(FrostError::InvalidParticipantIndex(n, i))?;
}
Ok(FrostParams { t, n, i })
}
pub fn t(&self) -> u16 {
self.t
}
pub fn n(&self) -> u16 {
self.n
}
pub fn i(&self) -> u16 {
self.i
}
}
/// Various errors possible during key generation/signing.
/// Various errors possible during signing.
#[derive(Copy, Clone, Error, Debug)]
pub enum FrostError {
#[error("a parameter was 0 (required {0}, participants {1})")]
ZeroParameter(u16, u16),
#[error("too many participants (max {1}, got {0})")]
TooManyParticipants(usize, u16),
#[error("invalid amount of required participants (max {1}, got {0})")]
InvalidRequiredQuantity(u16, u16),
#[error("invalid participant index (0 < index <= {0}, yet index is {1})")]
InvalidParticipantIndex(u16, u16),
#[error("invalid signing set ({0})")]
InvalidSigningSet(&'static str),
#[error("invalid participant quantity (expected {0}, got {1})")]
@ -132,290 +74,12 @@ pub enum FrostError {
DuplicatedIndex(u16),
#[error("missing participant {0}")]
MissingParticipant(u16),
#[error("invalid commitment (participant {0})")]
InvalidCommitment(u16),
#[error("invalid proof of knowledge (participant {0})")]
InvalidProofOfKnowledge(u16),
#[error("invalid preprocess (participant {0})")]
InvalidPreprocess(u16),
#[error("invalid share (participant {0})")]
InvalidShare(u16),
#[error("internal error ({0})")]
InternalError(&'static str),
}
/// Calculate the lagrange coefficient for a signing set.
pub fn lagrange<F: PrimeField>(i: u16, included: &[u16]) -> F {
let mut num = F::one();
let mut denom = F::one();
for l in included {
if i == *l {
continue;
}
let share = F::from(u64::try_from(*l).unwrap());
num *= share;
denom *= share - F::from(u64::try_from(i).unwrap());
}
// Safe as this will only be 0 if we're part of the above loop
// (which we have an if case to avoid)
num * denom.invert().unwrap()
}
/// Core keys generated by performing a FROST keygen protocol.
#[derive(Clone, PartialEq, Eq, Zeroize)]
pub struct FrostCore<C: Curve> {
/// FROST Parameters.
#[zeroize(skip)]
params: FrostParams,
/// Secret share key.
secret_share: C::F,
/// Group key.
group_key: C::G,
/// Verification shares.
#[zeroize(skip)]
verification_shares: HashMap<u16, C::G>,
}
impl<C: Curve> Drop for FrostCore<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Curve> ZeroizeOnDrop for FrostCore<C> {}
impl<C: Curve> Debug for FrostCore<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FrostCore")
.field("params", &self.params)
.field("group_key", &self.group_key)
.field("verification_shares", &self.verification_shares)
.finish()
}
}
impl<C: Curve> FrostCore<C> {
pub(crate) fn new(
params: FrostParams,
secret_share: C::F,
verification_shares: HashMap<u16, C::G>,
) -> FrostCore<C> {
#[cfg(debug_assertions)]
validate_map(&verification_shares, &(0 ..= params.n).collect::<Vec<_>>(), 0).unwrap();
let t = (1 ..= params.t).collect::<Vec<_>>();
FrostCore {
params,
secret_share,
group_key: t.iter().map(|i| verification_shares[i] * lagrange::<C::F>(*i, &t)).sum(),
verification_shares,
}
}
pub fn params(&self) -> FrostParams {
self.params
}
#[cfg(any(test, feature = "tests"))]
pub(crate) fn secret_share(&self) -> C::F {
self.secret_share
}
pub fn group_key(&self) -> C::G {
self.group_key
}
pub(crate) fn verification_shares(&self) -> HashMap<u16, C::G> {
self.verification_shares.clone()
}
pub fn serialized_len(n: u16) -> usize {
8 + C::ID.len() + (3 * 2) + C::F_len() + C::G_len() + (usize::from(n) * C::G_len())
}
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = Vec::with_capacity(FrostCore::<C>::serialized_len(self.params.n));
serialized.extend(u32::try_from(C::ID.len()).unwrap().to_be_bytes());
serialized.extend(C::ID);
serialized.extend(self.params.t.to_be_bytes());
serialized.extend(self.params.n.to_be_bytes());
serialized.extend(self.params.i.to_be_bytes());
serialized.extend(self.secret_share.to_repr().as_ref());
for l in 1 ..= self.params.n {
serialized.extend(self.verification_shares[&l].to_bytes().as_ref());
}
serialized
}
pub fn deserialize<R: Read>(cursor: &mut R) -> Result<FrostCore<C>, FrostError> {
{
let missing = FrostError::InternalError("FrostCore serialization is missing its curve");
let different = FrostError::InternalError("deserializing FrostCore for another curve");
let mut id_len = [0; 4];
cursor.read_exact(&mut id_len).map_err(|_| missing)?;
if u32::try_from(C::ID.len()).unwrap().to_be_bytes() != id_len {
Err(different)?;
}
let mut id = vec![0; C::ID.len()];
cursor.read_exact(&mut id).map_err(|_| missing)?;
if id != C::ID {
Err(different)?;
}
}
let (t, n, i) = {
let mut read_u16 = || {
let mut value = [0; 2];
cursor
.read_exact(&mut value)
.map_err(|_| FrostError::InternalError("missing participant quantities"))?;
Ok(u16::from_be_bytes(value))
};
(read_u16()?, read_u16()?, read_u16()?)
};
let secret_share =
C::read_F(cursor).map_err(|_| FrostError::InternalError("invalid secret share"))?;
let mut verification_shares = HashMap::new();
for l in 1 ..= n {
verification_shares.insert(
l,
C::read_G(cursor).map_err(|_| FrostError::InternalError("invalid verification share"))?,
);
}
Ok(FrostCore::new(
FrostParams::new(t, n, i).map_err(|_| FrostError::InternalError("invalid parameters"))?,
secret_share,
verification_shares,
))
}
}
/// FROST keys usable for signing.
#[derive(Clone, Debug, Zeroize)]
pub struct FrostKeys<C: Curve> {
/// Core keys.
#[zeroize(skip)]
core: Arc<FrostCore<C>>,
/// Offset applied to these keys.
pub(crate) offset: Option<C::F>,
}
// Manually implement Drop due to https://github.com/RustCrypto/utils/issues/786
impl<C: Curve> Drop for FrostKeys<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Curve> ZeroizeOnDrop for FrostKeys<C> {}
/// View of keys passed to algorithm implementations.
#[derive(Clone, Zeroize)]
pub struct FrostView<C: Curve> {
group_key: C::G,
#[zeroize(skip)]
included: Vec<u16>,
secret_share: C::F,
#[zeroize(skip)]
verification_shares: HashMap<u16, C::G>,
}
impl<C: Curve> Drop for FrostView<C> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Curve> ZeroizeOnDrop for FrostView<C> {}
impl<C: Curve> FrostKeys<C> {
pub fn new(core: FrostCore<C>) -> FrostKeys<C> {
FrostKeys { core: Arc::new(core), offset: None }
}
/// Offset the keys by a given scalar to allow for account and privacy schemes.
/// This offset is ephemeral and will not be included when these keys are serialized.
/// Keys offset multiple times will form a new offset of their sum.
/// Not IETF compliant.
pub fn offset(&self, offset: C::F) -> FrostKeys<C> {
let mut res = self.clone();
// Carry any existing offset
// Enables schemes like Monero's subaddresses which have a per-subaddress offset and then a
// one-time-key offset
res.offset = Some(offset + res.offset.unwrap_or_else(C::F::zero));
res
}
pub fn params(&self) -> FrostParams {
self.core.params
}
pub(crate) fn secret_share(&self) -> C::F {
self.core.secret_share
}
/// Returns the group key with any offset applied.
pub fn group_key(&self) -> C::G {
self.core.group_key + (C::generator() * self.offset.unwrap_or_else(C::F::zero))
}
/// Returns all participants' verification shares without any offsetting.
pub(crate) fn verification_shares(&self) -> HashMap<u16, C::G> {
self.core.verification_shares()
}
pub fn serialized_len(n: u16) -> usize {
FrostCore::<C>::serialized_len(n)
}
pub fn serialize(&self) -> Vec<u8> {
self.core.serialize()
}
pub fn view(&self, included: &[u16]) -> Result<FrostView<C>, FrostError> {
if (included.len() < self.params().t.into()) || (usize::from(self.params().n) < included.len())
{
Err(FrostError::InvalidSigningSet("invalid amount of participants included"))?;
}
let offset_share = self.offset.unwrap_or_else(C::F::zero) *
C::F::from(included.len().try_into().unwrap()).invert().unwrap();
let offset_verification_share = C::generator() * offset_share;
Ok(FrostView {
group_key: self.group_key(),
secret_share: (self.secret_share() * lagrange::<C::F>(self.params().i, included)) +
offset_share,
verification_shares: self
.verification_shares()
.iter()
.map(|(l, share)| {
(*l, (*share * lagrange::<C::F>(*l, included)) + offset_verification_share)
})
.collect(),
included: included.to_vec(),
})
}
}
impl<C: Curve> FrostView<C> {
pub fn group_key(&self) -> C::G {
self.group_key
}
pub fn included(&self) -> Vec<u16> {
self.included.clone()
}
pub fn secret_share(&self) -> C::F {
self.secret_share
}
pub fn verification_share(&self, l: u16) -> C::G {
self.verification_shares[&l]
}
}

267
crypto/frost/src/nonce.rs Normal file
View file

@ -0,0 +1,267 @@
// FROST defines its nonce as sum(Di, Ei * bi)
// Monero needs not just the nonce over G however, yet also over H
// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once
//
// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount
// of nonces, each against an arbitrary list of basepoints
//
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
// When multiple D, E pairs are provided, a DLEq proof is also provided to confirm their integrity
use std::{
io::{self, Read, Write},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop};
use transcript::Transcript;
use group::{ff::PrimeField, Group, GroupEncoding};
use multiexp::multiexp_vartime;
use dleq::DLEqProof;
use crate::curve::Curve;
fn dleq_transcript<T: Transcript>() -> T {
T::new(b"FROST_nonce_dleq")
}
// Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper
// This is considered a single nonce as r = d + be
#[derive(Clone, Zeroize)]
pub(crate) struct Nonce<C: Curve>(pub(crate) [C::F; 2]);
impl<C: Curve> Drop for Nonce<C> {
fn drop(&mut self) {
self.zeroize();
}
}
impl<C: Curve> ZeroizeOnDrop for Nonce<C> {}
// Commitments to a specific generator for this nonce
#[derive(Copy, Clone, PartialEq, Eq)]
pub(crate) struct GeneratorCommitments<C: Curve>(pub(crate) [C::G; 2]);
impl<C: Curve> GeneratorCommitments<C> {
fn read<R: Read>(reader: &mut R) -> io::Result<GeneratorCommitments<C>> {
Ok(GeneratorCommitments([<C as Curve>::read_G(reader)?, <C as Curve>::read_G(reader)?]))
}
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.0[0].to_bytes().as_ref())?;
writer.write_all(self.0[1].to_bytes().as_ref())
}
}
// A single nonce's commitments and relevant proofs
#[derive(Clone, PartialEq, Eq)]
pub(crate) struct NonceCommitments<C: Curve> {
// Called generators as these commitments are indexed by generator
pub(crate) generators: Vec<GeneratorCommitments<C>>,
// DLEq Proofs proving that these commitments are generated using the same scalar pair
// This could be further optimized with a multi-nonce proof, offering just one proof for all
// nonces. See https://github.com/serai-dex/serai/issues/38
// TODO
pub(crate) dleqs: Option<[DLEqProof<C::G>; 2]>,
}
impl<C: Curve> NonceCommitments<C> {
pub(crate) fn new<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
mut secret_share: C::F,
generators: &[C::G],
) -> (Nonce<C>, NonceCommitments<C>) {
let nonce =
Nonce([C::random_nonce(secret_share, &mut *rng), C::random_nonce(secret_share, &mut *rng)]);
secret_share.zeroize();
let mut commitments = Vec::with_capacity(generators.len());
for generator in generators {
commitments.push(GeneratorCommitments([*generator * nonce.0[0], *generator * nonce.0[1]]));
}
let mut dleqs = None;
if generators.len() >= 2 {
let mut dleq = |nonce| {
// Uses an independent transcript as each signer must prove this with their commitments,
// yet they're validated while processing everyone's data sequentially, by the global order
// This avoids needing to clone and fork the transcript around
// TODO: At least include a challenge from the existing transcript
DLEqProof::prove(&mut *rng, &mut dleq_transcript::<T>(), generators, nonce)
};
dleqs = Some([dleq(nonce.0[0]), dleq(nonce.0[1])]);
}
(nonce, NonceCommitments { generators: commitments, dleqs })
}
fn read<R: Read, T: Transcript>(
reader: &mut R,
generators: &[C::G],
) -> io::Result<NonceCommitments<C>> {
let commitments: Vec<GeneratorCommitments<C>> = (0 .. generators.len())
.map(|_| GeneratorCommitments::read(reader))
.collect::<Result<_, _>>()?;
let mut dleqs = None;
if generators.len() >= 2 {
let mut verify = |i| -> io::Result<_> {
let dleq = DLEqProof::deserialize(reader)?;
dleq
.verify(
&mut dleq_transcript::<T>(),
generators,
&commitments.iter().map(|commitments| commitments.0[i]).collect::<Vec<_>>(),
)
.map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid DLEq proof"))?;
Ok(dleq)
};
dleqs = Some([verify(0)?, verify(1)?]);
}
Ok(NonceCommitments { generators: commitments, dleqs })
}
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
for generator in &self.generators {
generator.write(writer)?;
}
if let Some(dleqs) = &self.dleqs {
dleqs[0].serialize(writer)?;
dleqs[1].serialize(writer)?;
}
Ok(())
}
}
#[derive(Clone, PartialEq, Eq)]
pub(crate) struct Commitments<C: Curve> {
// Called nonces as these commitments are indexed by nonce
pub(crate) nonces: Vec<NonceCommitments<C>>,
}
impl<C: Curve> Commitments<C> {
pub(crate) fn new<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
secret_share: C::F,
planned_nonces: &[Vec<C::G>],
) -> (Vec<Nonce<C>>, Commitments<C>) {
let mut nonces = vec![];
let mut commitments = vec![];
for generators in planned_nonces {
let (nonce, these_commitments) =
NonceCommitments::new::<_, T>(&mut *rng, secret_share, generators);
nonces.push(nonce);
commitments.push(these_commitments);
}
(nonces, Commitments { nonces: commitments })
}
pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {
for nonce in &self.nonces {
for commitments in &nonce.generators {
t.append_message(b"commitment_D", commitments.0[0].to_bytes().as_ref());
t.append_message(b"commitment_E", commitments.0[1].to_bytes().as_ref());
}
// Transcripting the DLEqs implicitly transcripts the exact generators used for this nonce
// This means it shouldn't be possible for variadic generators to cause conflicts as they're
// committed to as their entire series per-nonce, not as isolates
if let Some(dleqs) = &nonce.dleqs {
let mut transcript_dleq = |label, dleq: &DLEqProof<C::G>| {
let mut buf = vec![];
dleq.serialize(&mut buf).unwrap();
t.append_message(label, &buf);
};
transcript_dleq(b"dleq_D", &dleqs[0]);
transcript_dleq(b"dleq_E", &dleqs[1]);
}
}
}
pub(crate) fn read<R: Read, T: Transcript>(
reader: &mut R,
nonces: &[Vec<C::G>],
) -> io::Result<Self> {
Ok(Commitments {
nonces: (0 .. nonces.len())
.map(|i| NonceCommitments::read::<_, T>(reader, &nonces[i]))
.collect::<Result<_, _>>()?,
})
}
pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
for nonce in &self.nonces {
nonce.write(writer)?;
}
Ok(())
}
}
pub(crate) struct IndividualBinding<C: Curve> {
commitments: Commitments<C>,
binding_factors: Option<Vec<C::F>>,
}
pub(crate) struct BindingFactor<C: Curve>(pub(crate) HashMap<u16, IndividualBinding<C>>);
impl<C: Curve> BindingFactor<C> {
pub(crate) fn insert(&mut self, i: u16, commitments: Commitments<C>) {
self.0.insert(i, IndividualBinding { commitments, binding_factors: None });
}
pub(crate) fn calculate_binding_factors<T: Clone + Transcript>(&mut self, transcript: &mut T) {
for (l, binding) in self.0.iter_mut() {
let mut transcript = transcript.clone();
transcript.append_message(b"participant", C::F::from(u64::from(*l)).to_repr().as_ref());
// It *should* be perfectly fine to reuse a binding factor for multiple nonces
// This generates a binding factor per nonce just to ensure it never comes up as a question
binding.binding_factors = Some(
(0 .. binding.commitments.nonces.len())
.map(|_| C::hash_binding_factor(transcript.challenge(b"rho").as_ref()))
.collect(),
);
}
}
pub(crate) fn binding_factors(&self, i: u16) -> &[C::F] {
self.0[&i].binding_factors.as_ref().unwrap()
}
// Get the bound nonces for a specific party
pub(crate) fn bound(&self, l: u16) -> Vec<Vec<C::G>> {
let mut res = vec![];
for (i, (nonce, rho)) in
self.0[&l].commitments.nonces.iter().zip(self.binding_factors(l).iter()).enumerate()
{
res.push(vec![]);
for generator in &nonce.generators {
res[i].push(generator.0[0] + (generator.0[1] * rho));
}
}
res
}
// Get the nonces for this signing session
pub(crate) fn nonces(&self, planned_nonces: &[Vec<C::G>]) -> Vec<Vec<C::G>> {
let mut nonces = Vec::with_capacity(planned_nonces.len());
for n in 0 .. planned_nonces.len() {
nonces.push(Vec::with_capacity(planned_nonces[n].len()));
for g in 0 .. planned_nonces[n].len() {
#[allow(non_snake_case)]
let mut D = C::G::identity();
let mut statements = Vec::with_capacity(self.0.len());
#[allow(non_snake_case)]
for IndividualBinding { commitments, binding_factors } in self.0.values() {
D += commitments.nonces[n].generators[g].0[0];
statements
.push((binding_factors.as_ref().unwrap()[n], commitments.nonces[n].generators[g].0[1]));
}
nonces[n].push(D + multiexp_vartime(&statements));
}
}
nonces
}
}

View file

@ -1,73 +0,0 @@
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
use multiexp::BatchVerifier;
use crate::Curve;
/// A Schnorr signature of the form (R, s) where s = r + cx.
#[allow(non_snake_case)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct SchnorrSignature<C: Curve> {
pub R: C::G,
pub s: C::F,
}
impl<C: Curve> SchnorrSignature<C> {
pub fn serialize(&self) -> Vec<u8> {
let mut res = Vec::with_capacity(C::G_len() + C::F_len());
res.extend(self.R.to_bytes().as_ref());
res.extend(self.s.to_repr().as_ref());
res
}
}
pub(crate) fn sign<C: Curve>(
mut private_key: C::F,
mut nonce: C::F,
challenge: C::F,
) -> SchnorrSignature<C> {
let res = SchnorrSignature { R: C::generator() * nonce, s: nonce + (private_key * challenge) };
private_key.zeroize();
nonce.zeroize();
res
}
#[must_use]
pub(crate) fn verify<C: Curve>(
public_key: C::G,
challenge: C::F,
signature: &SchnorrSignature<C>,
) -> bool {
(C::generator() * signature.s) == (signature.R + (public_key * challenge))
}
pub(crate) fn batch_verify<C: Curve, R: RngCore + CryptoRng>(
rng: &mut R,
triplets: &[(u16, C::G, C::F, SchnorrSignature<C>)],
) -> Result<(), u16> {
let mut values = [(C::F::one(), C::generator()); 3];
let mut batch = BatchVerifier::new(triplets.len());
for triple in triplets {
// s = r + ca
// sG == R + cA
// R + cA - sG == 0
// R
values[0].1 = triple.3.R;
// cA
values[1] = (triple.2, triple.1);
// -sG
values[2].0 = -triple.3.s;
batch.queue(rng, triple.0, values);
}
batch.verify_vartime_with_vartime_blame()
}

View file

@ -1,41 +1,65 @@
use core::fmt;
use std::{
io::{Read, Cursor},
io::{self, Read, Write},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop};
use subtle::ConstantTimeEq;
use transcript::Transcript;
use group::{
ff::{Field, PrimeField},
Group, GroupEncoding,
};
use multiexp::multiexp_vartime;
use dleq::DLEqProof;
use group::{ff::PrimeField, GroupEncoding};
use crate::{
curve::Curve, FrostError, FrostParams, FrostKeys, FrostView, algorithm::Algorithm, validate_map,
curve::Curve,
FrostError, ThresholdParams, ThresholdKeys, ThresholdView,
algorithm::{WriteAddendum, Addendum, Algorithm},
validate_map,
};
/// Pairing of an Algorithm with a FrostKeys instance and this specific signing set.
#[derive(Clone)]
pub struct Params<C: Curve, A: Algorithm<C>> {
algorithm: A,
keys: FrostKeys<C>,
view: FrostView<C>,
pub(crate) use crate::nonce::*;
/// Trait enabling writing preprocesses and signature shares.
pub trait Writable {
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()>;
fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
}
// Currently public to enable more complex operations as desired, yet solely used in testing
impl<T: Writable> Writable for Vec<T> {
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
for w in self {
w.write(writer)?;
}
Ok(())
}
}
/// Pairing of an Algorithm with a ThresholdKeys instance and this specific signing set.
#[derive(Clone, Zeroize)]
pub struct Params<C: Curve, A: Algorithm<C>> {
#[zeroize(skip)]
algorithm: A,
keys: ThresholdKeys<C>,
view: ThresholdView<C>,
}
impl<C: Curve, A: Algorithm<C>> Drop for Params<C, A> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Curve, A: Algorithm<C>> ZeroizeOnDrop for Params<C, A> {}
impl<C: Curve, A: Algorithm<C>> Params<C, A> {
pub fn new(
algorithm: A,
keys: FrostKeys<C>,
keys: ThresholdKeys<C>,
included: &[u16],
) -> Result<Params<C, A>, FrostError> {
let params = keys.params();
@ -44,16 +68,16 @@ impl<C: Curve, A: Algorithm<C>> Params<C, A> {
included.sort_unstable();
// Included < threshold
if included.len() < usize::from(params.t) {
if included.len() < usize::from(params.t()) {
Err(FrostError::InvalidSigningSet("not enough signers"))?;
}
// Invalid index
if included[0] == 0 {
Err(FrostError::InvalidParticipantIndex(included[0], params.n))?;
Err(FrostError::InvalidParticipantIndex(included[0], params.n()))?;
}
// OOB index
if included[included.len() - 1] > params.n {
Err(FrostError::InvalidParticipantIndex(included[included.len() - 1], params.n))?;
if included[included.len() - 1] > params.n() {
Err(FrostError::InvalidParticipantIndex(included[included.len() - 1], params.n()))?;
}
// Same signer included multiple times
for i in 0 .. (included.len() - 1) {
@ -62,7 +86,7 @@ impl<C: Curve, A: Algorithm<C>> Params<C, A> {
}
}
// Not included
if !included.contains(&params.i) {
if !included.contains(&params.i()) {
Err(FrostError::InvalidSigningSet("signing despite not being included"))?;
}
@ -70,338 +94,43 @@ impl<C: Curve, A: Algorithm<C>> Params<C, A> {
Ok(Params { algorithm, view: keys.view(&included).unwrap(), keys })
}
pub fn multisig_params(&self) -> FrostParams {
pub fn multisig_params(&self) -> ThresholdParams {
self.keys.params()
}
pub fn view(&self) -> FrostView<C> {
pub fn view(&self) -> ThresholdView<C> {
self.view.clone()
}
}
fn nonce_transcript<T: Transcript>() -> T {
T::new(b"FROST_nonce_dleq")
/// Preprocess for an instance of the FROST signing protocol.
#[derive(Clone, PartialEq, Eq)]
pub struct Preprocess<C: Curve, A: Addendum> {
pub(crate) commitments: Commitments<C>,
pub addendum: A,
}
#[derive(Zeroize)]
pub(crate) struct PreprocessPackage<C: Curve> {
pub(crate) nonces: Vec<[C::F; 2]>,
#[zeroize(skip)]
pub(crate) commitments: Vec<Vec<[C::G; 2]>>,
pub(crate) addendum: Vec<u8>,
}
impl<C: Curve> Drop for PreprocessPackage<C> {
fn drop(&mut self) {
self.zeroize()
impl<C: Curve, A: Addendum> Writable for Preprocess<C, A> {
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
self.commitments.write(writer)?;
self.addendum.write(writer)
}
}
impl<C: Curve> ZeroizeOnDrop for PreprocessPackage<C> {}
fn preprocess<R: RngCore + CryptoRng, C: Curve, A: Algorithm<C>>(
rng: &mut R,
params: &mut Params<C, A>,
) -> (PreprocessPackage<C>, Vec<u8>) {
let mut serialized = Vec::with_capacity(2 * C::G_len());
let (nonces, commitments) = params
.algorithm
.nonces()
.iter()
.map(|generators| {
let nonces = [
C::random_nonce(params.view().secret_share(), &mut *rng),
C::random_nonce(params.view().secret_share(), &mut *rng),
];
let commit = |generator: C::G, buf: &mut Vec<u8>| {
let commitments = [generator * nonces[0], generator * nonces[1]];
buf.extend(commitments[0].to_bytes().as_ref());
buf.extend(commitments[1].to_bytes().as_ref());
commitments
};
let mut commitments = Vec::with_capacity(generators.len());
for generator in generators.iter() {
commitments.push(commit(*generator, &mut serialized));
}
// Provide a DLEq proof to verify these commitments are for the same nonce
if generators.len() >= 2 {
// Uses an independent transcript as each signer must do this now, yet we validate them
// sequentially by the global order. Avoids needing to clone and fork the transcript around
let mut transcript = nonce_transcript::<A::Transcript>();
// This could be further optimized with a multi-nonce proof.
// See https://github.com/serai-dex/serai/issues/38
for mut nonce in nonces {
DLEqProof::prove(&mut *rng, &mut transcript, generators, nonce)
.serialize(&mut serialized)
.unwrap();
nonce.zeroize();
}
}
(nonces, commitments)
})
.unzip();
let addendum = params.algorithm.preprocess_addendum(rng, &params.view);
serialized.extend(&addendum);
(PreprocessPackage { nonces, commitments, addendum }, serialized)
}
#[allow(non_snake_case)]
fn read_D_E<Re: Read, C: Curve>(cursor: &mut Re, l: u16) -> Result<[C::G; 2], FrostError> {
Ok([
C::read_G(cursor).map_err(|_| FrostError::InvalidCommitment(l))?,
C::read_G(cursor).map_err(|_| FrostError::InvalidCommitment(l))?,
])
}
#[allow(non_snake_case)]
struct Package<C: Curve> {
B: HashMap<u16, (Vec<Vec<[C::G; 2]>>, C::F)>,
Rs: Vec<Vec<C::G>>,
share: C::F,
}
// Has every signer perform the role of the signature aggregator
// Step 1 was already deprecated by performing nonce generation as needed
// Step 2 is simply the broadcast round from step 1
fn sign_with_share<Re: Read, C: Curve, A: Algorithm<C>>(
params: &mut Params<C, A>,
our_preprocess: PreprocessPackage<C>,
mut commitments: HashMap<u16, Re>,
msg: &[u8],
) -> Result<(Package<C>, Vec<u8>), FrostError> {
let multisig_params = params.multisig_params();
validate_map(&commitments, &params.view.included, multisig_params.i)?;
{
// Domain separate FROST
params.algorithm.transcript().domain_separate(b"FROST");
}
let nonces = params.algorithm.nonces();
#[allow(non_snake_case)]
let mut B = HashMap::<u16, _>::with_capacity(params.view.included.len());
{
// Parse the commitments
for l in &params.view.included {
{
params
.algorithm
.transcript()
.append_message(b"participant", C::F::from(u64::from(*l)).to_repr().as_ref());
}
// While this doesn't note which nonce/basepoint this is for, those are expected to be
// static. Beyond that, they're committed to in the DLEq proof transcripts, ensuring
// consistency. While this is suboptimal, it maintains IETF compliance, and Algorithm is
// documented accordingly
let transcript = |t: &mut A::Transcript, commitments: [C::G; 2]| {
if commitments[0].ct_eq(&C::G::identity()).into() ||
commitments[1].ct_eq(&C::G::identity()).into()
{
Err(FrostError::InvalidCommitment(*l))?;
}
t.append_message(b"commitment_D", commitments[0].to_bytes().as_ref());
t.append_message(b"commitment_E", commitments[1].to_bytes().as_ref());
Ok(())
};
if *l == params.keys.params().i {
for nonce_commitments in &our_preprocess.commitments {
for commitments in nonce_commitments {
transcript(params.algorithm.transcript(), *commitments).unwrap();
}
}
B.insert(*l, (our_preprocess.commitments.clone(), C::F::zero()));
params.algorithm.process_addendum(
&params.view,
*l,
&mut Cursor::new(our_preprocess.addendum.clone()),
)?;
} else {
let mut cursor = commitments.remove(l).unwrap();
let mut commitments = Vec::with_capacity(nonces.len());
for (n, nonce_generators) in nonces.clone().iter_mut().enumerate() {
commitments.push(Vec::with_capacity(nonce_generators.len()));
for _ in 0 .. nonce_generators.len() {
commitments[n].push(read_D_E::<_, C>(&mut cursor, *l)?);
transcript(params.algorithm.transcript(), commitments[n][commitments[n].len() - 1])?;
}
if nonce_generators.len() >= 2 {
let mut transcript = nonce_transcript::<A::Transcript>();
for de in 0 .. 2 {
DLEqProof::deserialize(&mut cursor)
.map_err(|_| FrostError::InvalidCommitment(*l))?
.verify(
&mut transcript,
nonce_generators,
&commitments[n].iter().map(|commitments| commitments[de]).collect::<Vec<_>>(),
)
.map_err(|_| FrostError::InvalidCommitment(*l))?;
}
}
}
B.insert(*l, (commitments, C::F::zero()));
params.algorithm.process_addendum(&params.view, *l, &mut cursor)?;
}
}
// Re-format into the FROST-expected rho transcript
let mut rho_transcript = A::Transcript::new(b"FROST_rho");
rho_transcript.append_message(b"message", &C::hash_msg(msg));
// This won't just be the commitments, yet the full existing transcript if used in an extended
// protocol
rho_transcript.append_message(
b"commitments",
&C::hash_commitments(params.algorithm.transcript().challenge(b"commitments").as_ref()),
);
// Include the offset, if one exists
// While this isn't part of the FROST-expected rho transcript, the offset being here coincides
// with another specification (despite the transcript format being distinct)
if let Some(offset) = params.keys.offset {
// Transcript as a point
// Under a coordinated model, the coordinater can be the only party to know the discrete log
// of the offset. This removes the ability for any signer to provide the discrete log,
// proving a key is related to another, slightly increasing security
// While further code edits would still be required for such a model (having the offset
// communicated as a point along with only a single party applying the offset), this means it
// wouldn't require a transcript change as well
rho_transcript.append_message(b"offset", (C::generator() * offset).to_bytes().as_ref());
}
// Generate the per-signer binding factors
for (l, commitments) in B.iter_mut() {
let mut rho_transcript = rho_transcript.clone();
rho_transcript.append_message(b"participant", C::F::from(u64::from(*l)).to_repr().as_ref());
commitments.1 = C::hash_binding_factor(rho_transcript.challenge(b"rho").as_ref());
}
// Merge the rho transcript back into the global one to ensure its advanced while committing to
// everything
params
.algorithm
.transcript()
.append_message(b"rho_transcript", rho_transcript.challenge(b"merge").as_ref());
}
#[allow(non_snake_case)]
let mut Rs = Vec::with_capacity(nonces.len());
for n in 0 .. nonces.len() {
Rs.push(vec![C::G::identity(); nonces[n].len()]);
for g in 0 .. nonces[n].len() {
#[allow(non_snake_case)]
let mut D = C::G::identity();
let mut statements = Vec::with_capacity(B.len());
#[allow(non_snake_case)]
for (B, binding) in B.values() {
D += B[n][g][0];
statements.push((*binding, B[n][g][1]));
}
Rs[n][g] = D + multiexp_vartime(&statements);
}
}
let mut nonces = our_preprocess
.nonces
.iter()
.map(|nonces| nonces[0] + (nonces[1] * B[&params.keys.params().i()].1))
.collect::<Vec<_>>();
let share = params.algorithm.sign_share(&params.view, &Rs, &nonces, msg);
nonces.zeroize();
Ok((Package { B, Rs, share }, share.to_repr().as_ref().to_vec()))
}
fn complete<Re: Read, C: Curve, A: Algorithm<C>>(
sign_params: &Params<C, A>,
sign: Package<C>,
mut shares: HashMap<u16, Re>,
) -> Result<A::Signature, FrostError> {
let params = sign_params.multisig_params();
validate_map(&shares, &sign_params.view.included, params.i)?;
let mut responses = HashMap::new();
let mut sum = C::F::zero();
for l in &sign_params.view.included {
let part = if *l == params.i {
sign.share
} else {
C::read_F(shares.get_mut(l).unwrap()).map_err(|_| FrostError::InvalidShare(*l))?
};
sum += part;
responses.insert(*l, part);
}
// Perform signature validation instead of individual share validation
// For the success route, which should be much more frequent, this should be faster
// It also acts as an integrity check of this library's signing function
let res = sign_params.algorithm.verify(sign_params.view.group_key, &sign.Rs, sum);
if let Some(res) = res {
return Ok(res);
}
// Find out who misbehaved. It may be beneficial to randomly sort this to have detection be
// within n / 2 on average, and not gameable to n, though that should be minor
for l in &sign_params.view.included {
if !sign_params.algorithm.verify_share(
sign_params.view.verification_share(*l),
&sign.B[l]
.0
.iter()
.map(|nonces| {
nonces.iter().map(|commitments| commitments[0] + (commitments[1] * sign.B[l].1)).collect()
})
.collect::<Vec<_>>(),
responses[l],
) {
Err(FrostError::InvalidShare(*l))?;
}
}
// If everyone has a valid share and there were enough participants, this should've worked
Err(FrostError::InternalError("everyone had a valid share yet the signature was still invalid"))
}
/// Trait for the initial state machine of a two-round signing protocol.
pub trait PreprocessMachine {
/// Preprocess message for this machine.
type Preprocess: Clone + PartialEq + Writable;
/// Signature produced by this machine.
type Signature: Clone + PartialEq + fmt::Debug;
type SignMachine: SignMachine<Self::Signature>;
/// SignMachine this PreprocessMachine turns into.
type SignMachine: SignMachine<Self::Signature, Preprocess = Self::Preprocess>;
/// Perform the preprocessing round required in order to sign.
/// Returns a byte vector to be broadcast to all participants, over an authenticated channel.
fn preprocess<R: RngCore + CryptoRng>(self, rng: &mut R) -> (Self::SignMachine, Vec<u8>);
}
/// Trait for the second machine of a two-round signing protocol.
pub trait SignMachine<S> {
type SignatureMachine: SignatureMachine<S>;
/// Sign a message.
/// Takes in the participants' preprocesses. Returns a byte vector representing a signature share
/// to be broadcast to all participants, over an authenticated channel.
fn sign<Re: Read>(
self,
commitments: HashMap<u16, Re>,
msg: &[u8],
) -> Result<(Self::SignatureMachine, Vec<u8>), FrostError>;
}
/// Trait for the final machine of a two-round signing protocol.
pub trait SignatureMachine<S> {
/// Complete signing.
/// Takes in everyone elses' shares. Returns the signature.
fn complete<Re: Read>(self, shares: HashMap<u16, Re>) -> Result<S, FrostError>;
/// Returns a preprocess message to be broadcast to all participants, over an authenticated
/// channel.
fn preprocess<R: RngCore + CryptoRng>(self, rng: &mut R)
-> (Self::SignMachine, Self::Preprocess);
}
/// State machine which manages signing for an arbitrary signature algorithm.
@ -409,23 +138,11 @@ pub struct AlgorithmMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
}
/// Next step of the state machine for the signing process.
pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
preprocess: PreprocessPackage<C>,
}
/// Final step of the state machine for the signing process.
pub struct AlgorithmSignatureMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
sign: Package<C>,
}
impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
/// Creates a new machine to generate a signature with the specified keys.
pub fn new(
algorithm: A,
keys: FrostKeys<C>,
keys: ThresholdKeys<C>,
included: &[u16],
) -> Result<AlgorithmMachine<C, A>, FrostError> {
Ok(AlgorithmMachine { params: Params::new(algorithm, keys, included)? })
@ -434,39 +151,274 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
#[cfg(any(test, feature = "tests"))]
pub(crate) fn unsafe_override_preprocess(
self,
preprocess: PreprocessPackage<C>,
nonces: Vec<Nonce<C>>,
preprocess: Preprocess<C, A::Addendum>,
) -> AlgorithmSignMachine<C, A> {
AlgorithmSignMachine { params: self.params, preprocess }
AlgorithmSignMachine { params: self.params, nonces, preprocess }
}
}
impl<C: Curve, A: Algorithm<C>> PreprocessMachine for AlgorithmMachine<C, A> {
type Preprocess = Preprocess<C, A::Addendum>;
type Signature = A::Signature;
type SignMachine = AlgorithmSignMachine<C, A>;
fn preprocess<R: RngCore + CryptoRng>(self, rng: &mut R) -> (Self::SignMachine, Vec<u8>) {
fn preprocess<R: RngCore + CryptoRng>(
self,
rng: &mut R,
) -> (Self::SignMachine, Preprocess<C, A::Addendum>) {
let mut params = self.params;
let (preprocess, serialized) = preprocess::<R, C, A>(rng, &mut params);
(AlgorithmSignMachine { params, preprocess }, serialized)
let (nonces, commitments) = Commitments::new::<_, A::Transcript>(
&mut *rng,
params.view().secret_share(),
&params.algorithm.nonces(),
);
let addendum = params.algorithm.preprocess_addendum(rng, &params.view);
let preprocess = Preprocess { commitments, addendum };
(AlgorithmSignMachine { params, nonces, preprocess: preprocess.clone() }, preprocess)
}
}
/// Share of a signature produced via FROST.
#[derive(Clone, PartialEq, Eq)]
pub struct SignatureShare<C: Curve>(C::F);
impl<C: Curve> Writable for SignatureShare<C> {
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.0.to_repr().as_ref())
}
}
/// Trait for the second machine of a two-round signing protocol.
pub trait SignMachine<S> {
/// Preprocess message for this machine.
type Preprocess: Clone + PartialEq + Writable;
/// SignatureShare message for this machine.
type SignatureShare: Clone + PartialEq + Writable;
/// SignatureMachine this SignMachine turns into.
type SignatureMachine: SignatureMachine<S, SignatureShare = Self::SignatureShare>;
/// Read a Preprocess message.
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess>;
/// Sign a message.
/// Takes in the participants' preprocess messages. Returns the signature share to be broadcast
/// to all participants, over an authenticated channel.
fn sign(
self,
commitments: HashMap<u16, Self::Preprocess>,
msg: &[u8],
) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError>;
}
/// Next step of the state machine for the signing process.
pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
pub(crate) nonces: Vec<Nonce<C>>,
pub(crate) preprocess: Preprocess<C, A::Addendum>,
}
impl<C: Curve, A: Algorithm<C>> Zeroize for AlgorithmSignMachine<C, A> {
fn zeroize(&mut self) {
self.nonces.zeroize()
}
}
impl<C: Curve, A: Algorithm<C>> Drop for AlgorithmSignMachine<C, A> {
fn drop(&mut self) {
self.zeroize()
}
}
impl<C: Curve, A: Algorithm<C>> ZeroizeOnDrop for AlgorithmSignMachine<C, A> {}
impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachine<C, A> {
type Preprocess = Preprocess<C, A::Addendum>;
type SignatureShare = SignatureShare<C>;
type SignatureMachine = AlgorithmSignatureMachine<C, A>;
fn sign<Re: Read>(
self,
commitments: HashMap<u16, Re>,
msg: &[u8],
) -> Result<(Self::SignatureMachine, Vec<u8>), FrostError> {
let mut params = self.params;
let (sign, serialized) = sign_with_share(&mut params, self.preprocess, commitments, msg)?;
Ok((AlgorithmSignatureMachine { params, sign }, serialized))
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
Ok(Preprocess {
commitments: Commitments::read::<_, A::Transcript>(reader, &self.params.algorithm.nonces())?,
addendum: self.params.algorithm.read_addendum(reader)?,
})
}
fn sign(
mut self,
mut preprocesses: HashMap<u16, Preprocess<C, A::Addendum>>,
msg: &[u8],
) -> Result<(Self::SignatureMachine, SignatureShare<C>), FrostError> {
let multisig_params = self.params.multisig_params();
validate_map(&preprocesses, &self.params.view.included(), multisig_params.i())?;
{
// Domain separate FROST
self.params.algorithm.transcript().domain_separate(b"FROST");
}
let nonces = self.params.algorithm.nonces();
#[allow(non_snake_case)]
let mut B = BindingFactor(HashMap::<u16, _>::with_capacity(self.params.view.included().len()));
{
// Parse the preprocesses
for l in &self.params.view.included() {
{
self
.params
.algorithm
.transcript()
.append_message(b"participant", C::F::from(u64::from(*l)).to_repr().as_ref());
}
if *l == self.params.keys.params().i() {
let commitments = self.preprocess.commitments.clone();
commitments.transcript(self.params.algorithm.transcript());
let addendum = self.preprocess.addendum.clone();
{
let mut buf = vec![];
addendum.write(&mut buf).unwrap();
self.params.algorithm.transcript().append_message(b"addendum", &buf);
}
B.insert(*l, commitments);
self.params.algorithm.process_addendum(&self.params.view, *l, addendum)?;
} else {
let preprocess = preprocesses.remove(l).unwrap();
preprocess.commitments.transcript(self.params.algorithm.transcript());
{
let mut buf = vec![];
preprocess.addendum.write(&mut buf).unwrap();
self.params.algorithm.transcript().append_message(b"addendum", &buf);
}
B.insert(*l, preprocess.commitments);
self.params.algorithm.process_addendum(&self.params.view, *l, preprocess.addendum)?;
}
}
// Re-format into the FROST-expected rho transcript
let mut rho_transcript = A::Transcript::new(b"FROST_rho");
rho_transcript.append_message(b"message", &C::hash_msg(msg));
rho_transcript.append_message(
b"preprocesses",
&C::hash_commitments(
self.params.algorithm.transcript().challenge(b"preprocesses").as_ref(),
),
);
// Include the offset, if one exists
// While this isn't part of the FROST-expected rho transcript, the offset being here
// coincides with another specification (despite the transcript format still being distinct)
if let Some(offset) = self.params.keys.current_offset() {
// Transcript as a point
// Under a coordinated model, the coordinater can be the only party to know the discrete
// log of the offset. This removes the ability for any signer to provide the discrete log,
// proving a key is related to another, slightly increasing security
// While further code edits would still be required for such a model (having the offset
// communicated as a point along with only a single party applying the offset), this means
// it wouldn't require a transcript change as well
rho_transcript.append_message(b"offset", (C::generator() * offset).to_bytes().as_ref());
}
// Generate the per-signer binding factors
B.calculate_binding_factors(&mut rho_transcript);
// Merge the rho transcript back into the global one to ensure its advanced, while
// simultaneously committing to everything
self
.params
.algorithm
.transcript()
.append_message(b"rho_transcript", rho_transcript.challenge(b"merge").as_ref());
}
#[allow(non_snake_case)]
let Rs = B.nonces(&nonces);
let our_binding_factors = B.binding_factors(multisig_params.i());
let mut nonces = self
.nonces
.iter()
.enumerate()
.map(|(n, nonces)| nonces.0[0] + (nonces.0[1] * our_binding_factors[n]))
.collect::<Vec<_>>();
self.nonces.zeroize();
let share = self.params.algorithm.sign_share(&self.params.view, &Rs, &nonces, msg);
nonces.zeroize();
Ok((
AlgorithmSignatureMachine { params: self.params.clone(), B, Rs, share },
SignatureShare(share),
))
}
}
/// Trait for the final machine of a two-round signing protocol.
pub trait SignatureMachine<S> {
/// SignatureShare message for this machine.
type SignatureShare: Clone + PartialEq + Writable;
/// Read a Signature Share message.
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare>;
/// Complete signing.
/// Takes in everyone elses' shares. Returns the signature.
fn complete(self, shares: HashMap<u16, Self::SignatureShare>) -> Result<S, FrostError>;
}
/// Final step of the state machine for the signing process.
#[allow(non_snake_case)]
pub struct AlgorithmSignatureMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
B: BindingFactor<C>,
Rs: Vec<Vec<C::G>>,
share: C::F,
}
impl<C: Curve, A: Algorithm<C>> SignatureMachine<A::Signature> for AlgorithmSignatureMachine<C, A> {
fn complete<Re: Read>(self, shares: HashMap<u16, Re>) -> Result<A::Signature, FrostError> {
complete(&self.params, self.sign, shares)
type SignatureShare = SignatureShare<C>;
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<SignatureShare<C>> {
Ok(SignatureShare(C::read_F(reader)?))
}
fn complete(
self,
mut shares: HashMap<u16, SignatureShare<C>>,
) -> Result<A::Signature, FrostError> {
let params = self.params.multisig_params();
validate_map(&shares, &self.params.view.included(), params.i())?;
let mut responses = HashMap::new();
responses.insert(params.i(), self.share);
let mut sum = self.share;
for (l, share) in shares.drain() {
responses.insert(l, share.0);
sum += share.0;
}
// Perform signature validation instead of individual share validation
// For the success route, which should be much more frequent, this should be faster
// It also acts as an integrity check of this library's signing function
if let Some(sig) = self.params.algorithm.verify(self.params.view.group_key(), &self.Rs, sum) {
return Ok(sig);
}
// Find out who misbehaved. It may be beneficial to randomly sort this to have detection be
// within n / 2 on average, and not gameable to n, though that should be minor
// TODO
for l in &self.params.view.included() {
if !self.params.algorithm.verify_share(
self.params.view.verification_share(*l),
&self.B.bound(*l),
responses[l],
) {
Err(FrostError::InvalidShare(*l))?;
}
}
// If everyone has a valid share and there were enough participants, this should've worked
Err(FrostError::InternalError("everyone had a valid share yet the signature was still invalid"))
}
}

View file

@ -1,23 +1,8 @@
use std::io::Cursor;
use rand_core::{RngCore, CryptoRng};
use group::Group;
use crate::{Curve, FrostCore, tests::core_gen};
// Test generation of FROST keys
fn key_generation<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// This alone verifies the verification shares and group key are agreed upon as expected
core_gen::<_, C>(rng);
}
// Test serialization of generated keys
fn keys_serialization<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
for (_, keys) in core_gen::<_, C>(rng) {
assert_eq!(&FrostCore::<C>::deserialize(&mut Cursor::new(keys.serialize())).unwrap(), &keys);
}
}
use crate::Curve;
// Test successful multiexp, with enough pairs to trigger its variety of algorithms
// Multiexp has its own tests, yet only against k256 and Ed25519 (which should be sufficient
@ -27,7 +12,7 @@ pub fn test_multiexp<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let mut sum = C::G::identity();
for _ in 0 .. 10 {
for _ in 0 .. 100 {
pairs.push((C::random_F(&mut *rng), C::generator() * C::random_F(&mut *rng)));
pairs.push((C::random_nonzero_F(&mut *rng), C::generator() * C::random_nonzero_F(&mut *rng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
assert_eq!(multiexp::multiexp(&pairs), sum);
@ -39,8 +24,4 @@ pub fn test_curve<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// TODO: Test the Curve functions themselves
test_multiexp::<_, C>(rng);
// Test FROST key generation and serialization of FrostCore works as expected
key_generation::<_, C>(rng);
keys_serialization::<_, C>(rng);
}

View file

@ -5,7 +5,7 @@ use crate::{
tests::vectors::{Vectors, test_with_vectors},
};
#[cfg(any(test, feature = "ristretto"))]
#[cfg(feature = "ristretto")]
#[test]
fn ristretto_vectors() {
test_with_vectors::<_, curve::Ristretto, curve::IetfRistrettoHram>(

View file

@ -1,10 +1,11 @@
use std::io::Cursor;
use rand_core::OsRng;
use ciphersuite::Ciphersuite;
use schnorr::SchnorrSignature;
use crate::{
curve::{Curve, Ed448, Ietf8032Ed448Hram, IetfEd448Hram},
schnorr::{SchnorrSignature, verify},
curve::{Ed448, Ietf8032Ed448Hram, IetfEd448Hram},
tests::vectors::{Vectors, test_with_vectors},
};
@ -13,38 +14,35 @@ fn ed448_8032_vector() {
let context = hex::decode("666f6f").unwrap();
#[allow(non_snake_case)]
let A = Ed448::read_G(&mut Cursor::new(
hex::decode(
let A = Ed448::read_G::<&[u8]>(
&mut hex::decode(
"43ba28f430cdff456ae531545f7ecd0ac834a55d9358c0372bfa0c6c".to_owned() +
"6798c0866aea01eb00742802b8438ea4cb82169c235160627b4c3a94" +
"80",
)
.unwrap(),
))
.unwrap()
.as_ref(),
)
.unwrap();
let msg = hex::decode("03").unwrap();
let mut sig = Cursor::new(
hex::decode(
"d4f8f6131770dd46f40867d6fd5d5055de43541f8c5e35abbcd001b3".to_owned() +
"2a89f7d2151f7647f11d8ca2ae279fb842d607217fce6e042f6815ea" +
"00" +
"0c85741de5c8da1144a6a1aba7f96de42505d7a7298524fda538fccb" +
"bb754f578c1cad10d54d0d5428407e85dcbc98a49155c13764e66c3c" +
"00",
)
.unwrap(),
);
let sig = hex::decode(
"d4f8f6131770dd46f40867d6fd5d5055de43541f8c5e35abbcd001b3".to_owned() +
"2a89f7d2151f7647f11d8ca2ae279fb842d607217fce6e042f6815ea" +
"00" +
"0c85741de5c8da1144a6a1aba7f96de42505d7a7298524fda538fccb" +
"bb754f578c1cad10d54d0d5428407e85dcbc98a49155c13764e66c3c" +
"00",
)
.unwrap();
#[allow(non_snake_case)]
let R = Ed448::read_G(&mut sig).unwrap();
let s = Ed448::read_F(&mut sig).unwrap();
let R = Ed448::read_G::<&[u8]>(&mut sig.as_ref()).unwrap();
let s = Ed448::read_F::<&[u8]>(&mut &sig[57 ..]).unwrap();
assert!(verify(
A,
Ietf8032Ed448Hram::hram(&context, &R, &A, &msg),
&SchnorrSignature::<Ed448> { R, s }
));
assert!(
SchnorrSignature::<Ed448> { R, s }.verify(A, Ietf8032Ed448Hram::hram(&context, &R, &A, &msg))
);
}
#[test]

View file

@ -1,6 +1,6 @@
#[cfg(any(test, feature = "dalek"))]
#[cfg(any(feature = "ristretto", feature = "ed25519"))]
mod dalek;
#[cfg(feature = "kp256")]
#[cfg(any(feature = "secp256k1", feature = "p256"))]
mod kp256;
#[cfg(feature = "ed448")]
mod ed448;

View file

@ -1,22 +1,17 @@
use std::{io::Cursor, collections::HashMap};
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng};
use group::ff::Field;
pub use dkg::tests::{key_gen, recover_key};
use crate::{
Curve, FrostParams, FrostCore, FrostKeys, lagrange,
key_gen::KeyGenMachine,
Curve, ThresholdKeys,
algorithm::Algorithm,
sign::{PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine},
sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine},
};
/// Curve tests.
pub mod curve;
/// Schnorr signature tests.
pub mod schnorr;
/// Promotion tests.
pub mod promote;
/// Vectorized test suite to ensure consistency.
pub mod vectors;
@ -39,85 +34,11 @@ pub fn clone_without<K: Clone + std::cmp::Eq + std::hash::Hash, V: Clone>(
res
}
/// Generate FROST keys (as FrostCore objects) for tests.
pub fn core_gen<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) -> HashMap<u16, FrostCore<C>> {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
for i in 1 ..= PARTICIPANTS {
let machine = KeyGenMachine::<C>::new(
FrostParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(),
"FROST Test key_gen".to_string(),
);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(i, Cursor::new(these_commitments));
}
let mut secret_shares = HashMap::new();
let mut machines = machines
.drain()
.map(|(l, machine)| {
let (machine, shares) =
machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap();
secret_shares.insert(l, shares);
(l, machine)
})
.collect::<HashMap<_, _>>();
let mut verification_shares = None;
let mut group_key = None;
machines
.drain()
.map(|(i, machine)| {
let mut our_secret_shares = HashMap::new();
for (l, shares) in &secret_shares {
if i == *l {
continue;
}
our_secret_shares.insert(*l, Cursor::new(shares[&i].clone()));
}
let these_keys = machine.complete(rng, our_secret_shares).unwrap();
// Verify the verification_shares are agreed upon
if verification_shares.is_none() {
verification_shares = Some(these_keys.verification_shares());
}
assert_eq!(verification_shares.as_ref().unwrap(), &these_keys.verification_shares());
// Verify the group keys are agreed upon
if group_key.is_none() {
group_key = Some(these_keys.group_key());
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
(i, these_keys)
})
.collect::<HashMap<_, _>>()
}
/// Generate FROST keys for tests.
pub fn key_gen<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) -> HashMap<u16, FrostKeys<C>> {
core_gen(rng).drain().map(|(i, core)| (i, FrostKeys::new(core))).collect()
}
/// Recover the secret from a collection of keys.
pub fn recover<C: Curve>(keys: &HashMap<u16, FrostKeys<C>>) -> C::F {
let first = keys.values().next().expect("no keys provided");
assert!(keys.len() >= first.params().t().into(), "not enough keys provided");
let included = keys.keys().cloned().collect::<Vec<_>>();
let group_private = keys.iter().fold(C::F::zero(), |accum, (i, keys)| {
accum + (keys.secret_share() * lagrange::<C::F>(*i, &included))
});
assert_eq!(C::generator() * group_private, first.group_key(), "failed to recover keys");
group_private
}
/// Spawn algorithm machines for a random selection of signers, each executing the given algorithm.
pub fn algorithm_machines<R: RngCore, C: Curve, A: Algorithm<C>>(
rng: &mut R,
algorithm: A,
keys: &HashMap<u16, FrostKeys<C>>,
keys: &HashMap<u16, ThresholdKeys<C>>,
) -> HashMap<u16, AlgorithmMachine<C, A>> {
let mut included = vec![];
while included.len() < usize::from(keys[&1].params().t()) {
@ -154,7 +75,11 @@ pub fn sign<R: RngCore + CryptoRng, M: PreprocessMachine>(
.drain()
.map(|(i, machine)| {
let (machine, preprocess) = machine.preprocess(rng);
commitments.insert(i, Cursor::new(preprocess));
commitments.insert(i, {
let mut buf = vec![];
preprocess.write(&mut buf).unwrap();
machine.read_preprocess::<&[u8]>(&mut buf.as_ref()).unwrap()
});
(i, machine)
})
.collect::<HashMap<_, _>>();
@ -164,7 +89,11 @@ pub fn sign<R: RngCore + CryptoRng, M: PreprocessMachine>(
.drain()
.map(|(i, machine)| {
let (machine, share) = machine.sign(clone_without(&commitments, &i), msg).unwrap();
shares.insert(i, Cursor::new(share));
shares.insert(i, {
let mut buf = vec![];
share.write(&mut buf).unwrap();
machine.read_share::<&[u8]>(&mut buf.as_ref()).unwrap()
});
(i, machine)
})
.collect::<HashMap<_, _>>();

View file

@ -1,121 +0,0 @@
use std::{marker::PhantomData, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use group::Group;
use crate::{
Curve, // FrostKeys,
promote::{GeneratorPromotion /* CurvePromote */},
tests::{clone_without, key_gen, schnorr::sign_core},
};
/*
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
struct AltFunctions<C: Curve> {
_curve: PhantomData<C>,
}
impl<C: Curve> Curve for AltFunctions<C> {
type F = C::F;
type G = C::G;
const ID: &'static [u8] = b"alt_functions";
fn generator() -> Self::G {
C::generator()
}
fn hash_msg(msg: &[u8]) -> Vec<u8> {
C::hash_msg(&[msg, b"alt"].concat())
}
fn hash_binding_factor(binding: &[u8]) -> Self::F {
C::hash_to_F(b"rho_alt", binding)
}
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
C::hash_to_F(&[dst, b"alt"].concat(), msg)
}
}
// Test promotion of FROST keys to another set of functions for interoperability
fn test_ciphersuite_promotion<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<_, C>(&mut *rng);
for keys in keys.values() {
let promoted: FrostKeys<AltFunctions<C>> = keys.clone().promote();
// Verify equivalence via their serializations, minus the ID's length and ID itself
assert_eq!(
keys.serialize()[(4 + C::ID.len()) ..],
promoted.serialize()[(4 + AltFunctions::<C>::ID.len()) ..]
);
}
}
*/
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
struct AltGenerator<C: Curve> {
_curve: PhantomData<C>,
}
impl<C: Curve> Curve for AltGenerator<C> {
type F = C::F;
type G = C::G;
const ID: &'static [u8] = b"alt_generator";
fn generator() -> Self::G {
C::G::generator() * C::hash_to_F(b"FROST_tests", b"generator")
}
fn hash_to_vec(dst: &[u8], data: &[u8]) -> Vec<u8> {
C::hash_to_vec(&[b"FROST_tests_alt", dst].concat(), data)
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
C::hash_to_F(&[b"FROST_tests_alt", dst].concat(), data)
}
}
// Test promotion of FROST keys to another generator
fn test_generator_promotion<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// A seeded RNG can theoretically generate for C1 and C2, verifying promotion that way?
// TODO
let keys = key_gen::<_, C>(&mut *rng);
let mut promotions = HashMap::new();
let mut proofs = HashMap::new();
for (i, keys) in &keys {
let promotion = GeneratorPromotion::<_, AltGenerator<C>>::promote(&mut *rng, keys.clone());
promotions.insert(*i, promotion.0);
proofs.insert(*i, promotion.1);
}
let mut new_keys = HashMap::new();
let mut group_key = None;
let mut verification_shares = None;
for (i, promoting) in promotions.drain() {
let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap();
assert_eq!(keys[&i].params(), promoted.params());
assert_eq!(keys[&i].secret_share(), promoted.secret_share());
if group_key.is_none() {
group_key = Some(keys[&i].group_key());
verification_shares = Some(keys[&i].verification_shares());
}
assert_eq!(keys[&i].group_key(), group_key.unwrap());
assert_eq!(&keys[&i].verification_shares(), verification_shares.as_ref().unwrap());
new_keys.insert(i, promoted);
}
// Sign with the keys to ensure their integrity
sign_core(rng, &new_keys);
}
pub fn test_promotion<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// test_ciphersuite_promotion::<_, C>(rng);
test_generator_promotion::<_, C>(rng);
}

View file

@ -1,131 +0,0 @@
use std::{marker::PhantomData, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use group::{ff::Field, Group, GroupEncoding};
use crate::{
Curve, FrostKeys,
schnorr::{self, SchnorrSignature},
algorithm::{Hram, Schnorr},
tests::{key_gen, algorithm_machines, sign as sign_test},
};
pub(crate) fn core_sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let private_key = C::random_F(&mut *rng);
let nonce = C::random_F(&mut *rng);
let challenge = C::random_F(rng); // Doesn't bother to craft an HRAm
assert!(schnorr::verify::<C>(
C::generator() * private_key,
challenge,
&schnorr::sign(private_key, nonce, challenge)
));
}
// The above sign function verifies signing works
// This verifies invalid signatures don't pass, using zero signatures, which should effectively be
// random
pub(crate) fn core_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
assert!(!schnorr::verify::<C>(
C::generator() * C::random_F(&mut *rng),
C::random_F(rng),
&SchnorrSignature { R: C::G::identity(), s: C::F::zero() }
));
}
pub(crate) fn core_batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Create 5 signatures
let mut keys = vec![];
let mut challenges = vec![];
let mut sigs = vec![];
for i in 0 .. 5 {
keys.push(C::random_F(&mut *rng));
challenges.push(C::random_F(&mut *rng));
sigs.push(schnorr::sign::<C>(keys[i], C::random_F(&mut *rng), challenges[i]));
}
// Batch verify
let triplets = (0 .. 5)
.map(|i| (u16::try_from(i + 1).unwrap(), C::generator() * keys[i], challenges[i], sigs[i]))
.collect::<Vec<_>>();
schnorr::batch_verify(rng, &triplets).unwrap();
// Shift 1 from s from one to another and verify it fails
// This test will fail if unique factors aren't used per-signature, hence its inclusion
{
let mut triplets = triplets.clone();
triplets[1].3.s += C::F::one();
triplets[2].3.s -= C::F::one();
if let Err(blame) = schnorr::batch_verify(rng, &triplets) {
assert_eq!(blame, 2);
} else {
panic!("batch verification considered a malleated signature valid");
}
}
// Make sure a completely invalid signature fails when included
for i in 0 .. 5 {
let mut triplets = triplets.clone();
triplets[i].3.s = C::random_F(&mut *rng);
if let Err(blame) = schnorr::batch_verify(rng, &triplets) {
assert_eq!(blame, u16::try_from(i + 1).unwrap());
} else {
panic!("batch verification considered an invalid signature valid");
}
}
}
pub(crate) fn sign_core<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
keys: &HashMap<u16, FrostKeys<C>>,
) {
const MESSAGE: &[u8] = b"Hello, World!";
let machines = algorithm_machines(rng, Schnorr::<C, TestHram<C>>::new(), keys);
let sig = sign_test(&mut *rng, machines, MESSAGE);
let group_key = keys[&1].group_key();
assert!(schnorr::verify(group_key, TestHram::<C>::hram(&sig.R, &group_key, MESSAGE), &sig));
}
#[derive(Clone)]
pub struct TestHram<C: Curve> {
_curve: PhantomData<C>,
}
impl<C: Curve> Hram<C> for TestHram<C> {
#[allow(non_snake_case)]
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F {
C::hash_to_F(b"challenge", &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat())
}
}
fn sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<_, C>(&mut *rng);
sign_core(rng, &keys);
}
fn sign_with_offset<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let mut keys = key_gen::<_, C>(&mut *rng);
let group_key = keys[&1].group_key();
let offset = C::hash_to_F(b"FROST Test sign_with_offset", b"offset");
for i in 1 ..= u16::try_from(keys.len()).unwrap() {
keys.insert(i, keys[&i].offset(offset));
}
let offset_key = group_key + (C::generator() * offset);
assert_eq!(keys[&1].group_key(), offset_key);
sign_core(rng, &keys);
}
pub fn test_schnorr<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Test Schnorr signatures work as expected
// This is a bit unnecessary, as they should for any valid curve, yet this establishes sanity
core_sign::<_, C>(rng);
core_verify::<_, C>(rng);
core_batch_verify::<_, C>(rng);
// Test Schnorr signatures under FROST
sign::<_, C>(rng);
sign_with_offset::<_, C>(rng);
}

View file

@ -1,4 +1,4 @@
use std::{io::Cursor, collections::HashMap};
use std::collections::HashMap;
#[cfg(test)]
use std::str::FromStr;
@ -6,14 +6,17 @@ use rand_core::{RngCore, CryptoRng};
use group::{ff::PrimeField, GroupEncoding};
use dkg::tests::{test_ciphersuite as test_dkg};
use crate::{
curve::Curve,
FrostCore, FrostKeys,
ThresholdCore, ThresholdKeys,
algorithm::{Schnorr, Hram},
sign::{PreprocessPackage, SignMachine, SignatureMachine, AlgorithmMachine},
tests::{
clone_without, curve::test_curve, schnorr::test_schnorr, promote::test_promotion, recover,
sign::{
Nonce, GeneratorCommitments, NonceCommitments, Commitments, Writable, Preprocess, SignMachine,
SignatureMachine, AlgorithmMachine,
},
tests::{clone_without, recover_key, curve::test_curve},
};
pub struct Vectors {
@ -73,17 +76,18 @@ impl From<serde_json::Value> for Vectors {
}
}
// Load these vectors into FrostKeys using a custom serialization it'll deserialize
fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<u16, FrostKeys<C>> {
// Load these vectors into ThresholdKeys using a custom serialization it'll deserialize
fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<u16, ThresholdKeys<C>> {
let shares = vectors
.shares
.iter()
.map(|secret| C::read_F(&mut Cursor::new(hex::decode(secret).unwrap())).unwrap())
.map(|secret| C::read_F::<&[u8]>(&mut hex::decode(secret).unwrap().as_ref()).unwrap())
.collect::<Vec<_>>();
let verification_shares = shares.iter().map(|secret| C::generator() * secret).collect::<Vec<_>>();
let mut keys = HashMap::new();
for i in 1 ..= u16::try_from(shares.len()).unwrap() {
// Manually re-implement the serialization for ThresholdCore to import this data
let mut serialized = vec![];
serialized.extend(u32::try_from(C::ID.len()).unwrap().to_be_bytes());
serialized.extend(C::ID);
@ -95,13 +99,13 @@ fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<u16, FrostKe
serialized.extend(share.to_bytes().as_ref());
}
let these_keys = FrostCore::<C>::deserialize(&mut Cursor::new(serialized)).unwrap();
let these_keys = ThresholdCore::<C>::deserialize::<&[u8]>(&mut serialized.as_ref()).unwrap();
assert_eq!(these_keys.params().t(), vectors.threshold);
assert_eq!(usize::from(these_keys.params().n()), shares.len());
assert_eq!(these_keys.params().i(), i);
assert_eq!(these_keys.secret_share(), shares[usize::from(i - 1)]);
assert_eq!(hex::encode(these_keys.group_key().to_bytes().as_ref()), vectors.group_key);
keys.insert(i, FrostKeys::new(these_keys));
keys.insert(i, ThresholdKeys::new(these_keys));
}
keys
@ -113,15 +117,18 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
) {
// Do basic tests before trying the vectors
test_curve::<_, C>(&mut *rng);
test_schnorr::<_, C>(&mut *rng);
test_promotion::<_, C>(rng);
// Test the DKG
test_dkg::<_, C>(&mut *rng);
// Test against the vectors
let keys = vectors_to_multisig_keys::<C>(&vectors);
let group_key = C::read_G(&mut Cursor::new(hex::decode(&vectors.group_key).unwrap())).unwrap();
let secret = C::read_F(&mut Cursor::new(hex::decode(&vectors.group_secret).unwrap())).unwrap();
let group_key =
<C as Curve>::read_G::<&[u8]>(&mut hex::decode(&vectors.group_key).unwrap().as_ref()).unwrap();
let secret =
C::read_F::<&[u8]>(&mut hex::decode(&vectors.group_secret).unwrap().as_ref()).unwrap();
assert_eq!(C::generator() * secret, group_key);
assert_eq!(recover(&keys), secret);
assert_eq!(recover_key(&keys), secret);
let mut machines = vec![];
for i in &vectors.included {
@ -142,27 +149,36 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
.drain(..)
.map(|(i, machine)| {
let nonces = [
C::read_F(&mut Cursor::new(hex::decode(&vectors.nonces[c][0]).unwrap())).unwrap(),
C::read_F(&mut Cursor::new(hex::decode(&vectors.nonces[c][1]).unwrap())).unwrap(),
C::read_F::<&[u8]>(&mut hex::decode(&vectors.nonces[c][0]).unwrap().as_ref()).unwrap(),
C::read_F::<&[u8]>(&mut hex::decode(&vectors.nonces[c][1]).unwrap().as_ref()).unwrap(),
];
c += 1;
let these_commitments = vec![[C::generator() * nonces[0], C::generator() * nonces[1]]];
let machine = machine.unsafe_override_preprocess(PreprocessPackage {
nonces: vec![nonces],
commitments: vec![these_commitments.clone()],
addendum: vec![],
});
let these_commitments = [C::generator() * nonces[0], C::generator() * nonces[1]];
let machine = machine.unsafe_override_preprocess(
vec![Nonce(nonces)],
Preprocess {
commitments: Commitments {
nonces: vec![NonceCommitments {
generators: vec![GeneratorCommitments(these_commitments)],
dleqs: None,
}],
},
addendum: (),
},
);
commitments.insert(
*i,
Cursor::new(
[
these_commitments[0][0].to_bytes().as_ref(),
these_commitments[0][1].to_bytes().as_ref(),
]
.concat()
.to_vec(),
),
machine
.read_preprocess::<&[u8]>(
&mut [
these_commitments[0].to_bytes().as_ref(),
these_commitments[1].to_bytes().as_ref(),
]
.concat()
.as_ref(),
)
.unwrap(),
);
(i, machine)
})
@ -176,10 +192,15 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
let (machine, share) =
machine.sign(clone_without(&commitments, i), &hex::decode(&vectors.msg).unwrap()).unwrap();
let share = {
let mut buf = vec![];
share.write(&mut buf).unwrap();
buf
};
assert_eq!(share, hex::decode(&vectors.sig_shares[c]).unwrap());
c += 1;
shares.insert(*i, Cursor::new(share));
shares.insert(*i, machine.read_share::<&[u8]>(&mut share.as_ref()).unwrap());
(i, machine)
})
.collect::<HashMap<_, _>>();

27
crypto/schnorr/Cargo.toml Normal file
View file

@ -0,0 +1,27 @@
[package]
name = "schnorr-signatures"
version = "0.1.0"
description = "Minimal Schnorr signatures crate hosting common code"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/schnorr"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["schnorr", "ff", "group"]
edition = "2021"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
rand_core = "0.6"
zeroize = { version = "1.5", features = ["zeroize_derive"] }
group = "0.12"
ciphersuite = { path = "../ciphersuite", version = "0.1" }
multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] }
[dev-dependencies]
dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2" }
ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["ristretto"] }

21
crypto/schnorr/LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021-2022 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

86
crypto/schnorr/src/lib.rs Normal file
View file

@ -0,0 +1,86 @@
use std::io::{self, Read, Write};
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
use multiexp::BatchVerifier;
use ciphersuite::Ciphersuite;
#[cfg(test)]
mod tests;
/// A Schnorr signature of the form (R, s) where s = r + cx.
#[allow(non_snake_case)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct SchnorrSignature<C: Ciphersuite> {
pub R: C::G,
pub s: C::F,
}
impl<C: Ciphersuite> SchnorrSignature<C> {
/// Read a SchnorrSignature from something implementing Read.
pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
Ok(SchnorrSignature { R: C::read_G(reader)?, s: C::read_F(reader)? })
}
/// Write a SchnorrSignature to something implementing Read.
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.R.to_bytes().as_ref())?;
writer.write_all(self.s.to_repr().as_ref())
}
/// Serialize a SchnorrSignature, returning a Vec<u8>.
pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
/// Sign a Schnorr signature with the given nonce for the specified challenge.
pub fn sign(mut private_key: C::F, mut nonce: C::F, challenge: C::F) -> SchnorrSignature<C> {
let res = SchnorrSignature { R: C::generator() * nonce, s: nonce + (private_key * challenge) };
private_key.zeroize();
nonce.zeroize();
res
}
/// Verify a Schnorr signature for the given key with the specified challenge.
#[must_use]
pub fn verify(&self, public_key: C::G, challenge: C::F) -> bool {
(C::generator() * self.s) == (self.R + (public_key * challenge))
}
/// Queue a signature for batch verification.
pub fn batch_verify<R: RngCore + CryptoRng, I: Copy + Zeroize>(
&self,
rng: &mut R,
batch: &mut BatchVerifier<I, C::G>,
id: I,
public_key: C::G,
challenge: C::F,
) {
// s = r + ca
// sG == R + cA
// R + cA - sG == 0
batch.queue(
rng,
id,
[
// R
(C::F::one(), self.R),
// cA
(challenge, public_key),
// -sG
(-self.s, C::generator()),
],
);
}
}

View file

@ -0,0 +1,72 @@
use rand_core::OsRng;
use group::{ff::Field, Group};
use multiexp::BatchVerifier;
use ciphersuite::{Ciphersuite, Ristretto};
use crate::SchnorrSignature;
pub(crate) fn core_sign<C: Ciphersuite>() {
let private_key = C::random_nonzero_F(&mut OsRng);
let nonce = C::random_nonzero_F(&mut OsRng);
let challenge = C::random_nonzero_F(&mut OsRng); // Doesn't bother to craft an HRAm
assert!(SchnorrSignature::<C>::sign(private_key, nonce, challenge)
.verify(C::generator() * private_key, challenge));
}
// The above sign function verifies signing works
// This verifies invalid signatures don't pass, using zero signatures, which should effectively be
// random
pub(crate) fn core_verify<C: Ciphersuite>() {
assert!(!SchnorrSignature::<C> { R: C::G::identity(), s: C::F::zero() }
.verify(C::generator() * C::random_nonzero_F(&mut OsRng), C::random_nonzero_F(&mut OsRng)));
}
pub(crate) fn core_batch_verify<C: Ciphersuite>() {
// Create 5 signatures
let mut keys = vec![];
let mut challenges = vec![];
let mut sigs = vec![];
for i in 0 .. 5 {
keys.push(C::random_nonzero_F(&mut OsRng));
challenges.push(C::random_nonzero_F(&mut OsRng));
sigs.push(SchnorrSignature::<C>::sign(keys[i], C::random_nonzero_F(&mut OsRng), challenges[i]));
}
// Batch verify
{
let mut batch = BatchVerifier::new(5);
for (i, sig) in sigs.iter().enumerate() {
sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i], challenges[i]);
}
batch.verify_with_vartime_blame().unwrap();
}
// Shift 1 from s from one to another and verify it fails
// This test will fail if unique factors aren't used per-signature, hence its inclusion
{
let mut batch = BatchVerifier::new(5);
for (i, mut sig) in sigs.clone().drain(..).enumerate() {
if i == 1 {
sig.s += C::F::one();
}
if i == 2 {
sig.s -= C::F::one();
}
sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i], challenges[i]);
}
if let Err(blame) = batch.verify_with_vartime_blame() {
assert!((blame == 1) || (blame == 2));
} else {
panic!("Batch verification considered malleated signatures valid");
}
}
}
#[test]
fn test() {
core_sign::<Ristretto>();
core_verify::<Ristretto>();
core_batch_verify::<Ristretto>();
}

View file

@ -38,6 +38,8 @@ services:
args:
TAG: serai
entrypoint: /scripts/entry-dev.sh
volumes:
- "./serai/scripts:/scripts"
serai-base:
<<: *serai_defaults

View file

@ -20,12 +20,17 @@ RUN pip3 install solc-select==0.2.1
RUN solc-select install 0.8.16
RUN solc-select use 0.8.16
# Build it
RUN cargo build --release
# Mount cargo and serai cache for Cache & Build
RUN --mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/serai/target/release/build \
--mount=type=cache,target=/serai/target/release/deps \
--mount=type=cache,target=/serai/target/release/.fingerprint \
--mount=type=cache,target=/serai/target/release/incremental \
--mount=type=cache,target=/serai/target/release/wbuild \
--mount=type=cache,target=/serai/target/release/lib* \
cargo build --release
# Mount for Cache
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/serai/target
# Prepare Image
FROM ubuntu:latest as image
@ -34,9 +39,8 @@ LABEL description="STAGE 2: Copy and Run"
WORKDIR /home/serai
# Copy necessary files to run node
COPY --from=builder /serai/target/release/* /bin/
COPY --from=builder /serai/target/release/ /bin/
COPY --from=builder /serai/AGPL-3.0 .
COPY deploy/serai/scripts /scripts
# Run node
EXPOSE 30333 9615 9933 9944

View file

@ -4,7 +4,11 @@ use async_trait::async_trait;
use thiserror::Error;
use transcript::RecommendedTranscript;
use frost::{curve::Curve, FrostKeys, sign::PreprocessMachine};
use frost::{
curve::{Ciphersuite, Curve},
ThresholdKeys,
sign::PreprocessMachine,
};
pub mod monero;
pub use self::monero::Monero;
@ -45,14 +49,14 @@ pub trait Coin {
const MAX_OUTPUTS: usize; // TODO: Decide if this includes change or not
// Doesn't have to take self, enables some level of caching which is pleasant
fn address(&self, key: <Self::Curve as Curve>::G) -> Self::Address;
fn address(&self, key: <Self::Curve as Ciphersuite>::G) -> Self::Address;
async fn get_latest_block_number(&self) -> Result<usize, CoinError>;
async fn get_block(&self, number: usize) -> Result<Self::Block, CoinError>;
async fn get_outputs(
&self,
block: &Self::Block,
key: <Self::Curve as Curve>::G,
key: <Self::Curve as Ciphersuite>::G,
) -> Result<Vec<Self::Output>, CoinError>;
// TODO: Remove
@ -60,7 +64,7 @@ pub trait Coin {
async fn prepare_send(
&self,
keys: FrostKeys<Self::Curve>,
keys: ThresholdKeys<Self::Curve>,
transcript: RecommendedTranscript,
block_number: usize,
inputs: Vec<Self::Output>,

View file

@ -4,7 +4,7 @@ use curve25519_dalek::scalar::Scalar;
use dalek_ff_group as dfg;
use transcript::RecommendedTranscript;
use frost::{curve::Ed25519, FrostKeys};
use frost::{curve::Ed25519, ThresholdKeys};
use monero_serai::{
transaction::Transaction,
@ -55,7 +55,7 @@ impl OutputTrait for Output {
#[derive(Debug)]
pub struct SignableTransaction {
keys: FrostKeys<Ed25519>,
keys: ThresholdKeys<Ed25519>,
transcript: RecommendedTranscript,
// Monero height, defined as the length of the chain
height: usize,
@ -157,7 +157,7 @@ impl Coin for Monero {
async fn prepare_send(
&self,
keys: FrostKeys<Ed25519>,
keys: ThresholdKeys<Ed25519>,
transcript: RecommendedTranscript,
block_number: usize,
mut inputs: Vec<Output>,

View file

@ -1,9 +1,9 @@
use std::{marker::Send, io::Cursor, collections::HashMap};
use std::{marker::Send, collections::HashMap};
use async_trait::async_trait;
use thiserror::Error;
use frost::{curve::Curve, FrostError};
use frost::{curve::Ciphersuite, FrostError};
mod coin;
use coin::{CoinError, Coin};
@ -18,7 +18,7 @@ pub enum NetworkError {}
#[async_trait]
pub trait Network: Send {
async fn round(&mut self, data: Vec<u8>) -> Result<HashMap<u16, Cursor<Vec<u8>>>, NetworkError>;
async fn round(&mut self, data: Vec<u8>) -> Result<HashMap<u16, Vec<u8>>, NetworkError>;
}
#[derive(Clone, Error, Debug)]
@ -35,6 +35,9 @@ pub enum SignError {
// Doesn't consider the current group key to increase the simplicity of verifying Serai's status
// Takes an index, k, to support protocols which use multiple secondary keys
// Presumably a view key
pub(crate) fn additional_key<C: Coin>(k: u64) -> <C::Curve as Curve>::F {
C::Curve::hash_to_F(b"Serai DEX Additional Key", &[C::ID, &k.to_le_bytes()].concat())
pub(crate) fn additional_key<C: Coin>(k: u64) -> <C::Curve as Ciphersuite>::F {
<C::Curve as Ciphersuite>::hash_to_F(
b"Serai DEX Additional Key",
&[C::ID, &k.to_le_bytes()].concat(),
)
}

View file

@ -1,5 +1,4 @@
use std::{
io::Cursor,
sync::{Arc, RwLock},
collections::HashMap,
};
@ -19,7 +18,7 @@ struct LocalNetwork {
i: u16,
size: u16,
round: usize,
rounds: Arc<RwLock<Vec<HashMap<u16, Cursor<Vec<u8>>>>>>,
rounds: Arc<RwLock<Vec<HashMap<u16, Vec<u8>>>>>,
}
impl LocalNetwork {
@ -35,13 +34,13 @@ impl LocalNetwork {
#[async_trait]
impl Network for LocalNetwork {
async fn round(&mut self, data: Vec<u8>) -> Result<HashMap<u16, Cursor<Vec<u8>>>, NetworkError> {
async fn round(&mut self, data: Vec<u8>) -> Result<HashMap<u16, Vec<u8>>, NetworkError> {
{
let mut rounds = self.rounds.write().unwrap();
if rounds.len() == self.round {
rounds.push(HashMap::new());
}
rounds[self.round].insert(self.i, Cursor::new(data));
rounds[self.round].insert(self.i, data);
}
while {

View file

@ -6,9 +6,9 @@ use group::GroupEncoding;
use transcript::{Transcript, RecommendedTranscript};
use frost::{
curve::Curve,
FrostKeys,
sign::{PreprocessMachine, SignMachine, SignatureMachine},
curve::{Ciphersuite, Curve},
FrostError, ThresholdKeys,
sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine},
};
use crate::{
@ -17,12 +17,12 @@ use crate::{
};
pub struct WalletKeys<C: Curve> {
keys: FrostKeys<C>,
keys: ThresholdKeys<C>,
creation_block: usize,
}
impl<C: Curve> WalletKeys<C> {
pub fn new(keys: FrostKeys<C>, creation_block: usize) -> WalletKeys<C> {
pub fn new(keys: ThresholdKeys<C>, creation_block: usize) -> WalletKeys<C> {
WalletKeys { keys, creation_block }
}
@ -34,13 +34,13 @@ impl<C: Curve> WalletKeys<C> {
// system, there are potentially other benefits to binding this to a specific group key
// It's no longer possible to influence group key gen to key cancel without breaking the hash
// function as well, although that degree of influence means key gen is broken already
fn bind(&self, chain: &[u8]) -> FrostKeys<C> {
fn bind(&self, chain: &[u8]) -> ThresholdKeys<C> {
const DST: &[u8] = b"Serai Processor Wallet Chain Bind";
let mut transcript = RecommendedTranscript::new(DST);
transcript.append_message(b"chain", chain);
transcript.append_message(b"curve", C::ID);
transcript.append_message(b"group_key", self.keys.group_key().to_bytes().as_ref());
self.keys.offset(C::hash_to_F(DST, &transcript.challenge(b"offset")))
self.keys.offset(<C as Ciphersuite>::hash_to_F(DST, &transcript.challenge(b"offset")))
}
}
@ -203,8 +203,8 @@ fn select_inputs_outputs<C: Coin>(
pub struct Wallet<D: CoinDb, C: Coin> {
db: D,
coin: C,
keys: Vec<(FrostKeys<C::Curve>, Vec<C::Output>)>,
pending: Vec<(usize, FrostKeys<C::Curve>)>,
keys: Vec<(ThresholdKeys<C::Curve>, Vec<C::Output>)>,
pending: Vec<(usize, ThresholdKeys<C::Curve>)>,
}
impl<D: CoinDb, C: Coin> Wallet<D, C> {
@ -343,10 +343,36 @@ impl<D: CoinDb, C: Coin> Wallet<D, C> {
self.coin.attempt_send(prepared, &included).await.map_err(SignError::CoinError)?;
let (attempt, commitments) = attempt.preprocess(&mut OsRng);
let commitments = network.round(commitments).await.map_err(SignError::NetworkError)?;
let commitments = network
.round(commitments.serialize())
.await
.map_err(SignError::NetworkError)?
.drain()
.map(|(validator, preprocess)| {
Ok((
validator,
attempt
.read_preprocess::<&[u8]>(&mut preprocess.as_ref())
.map_err(|_| SignError::FrostError(FrostError::InvalidPreprocess(validator)))?,
))
})
.collect::<Result<HashMap<_, _>, _>>()?;
let (attempt, share) = attempt.sign(commitments, b"").map_err(SignError::FrostError)?;
let shares = network.round(share).await.map_err(SignError::NetworkError)?;
let shares = network
.round(share.serialize())
.await
.map_err(SignError::NetworkError)?
.drain()
.map(|(validator, share)| {
Ok((
validator,
attempt
.read_share::<&[u8]>(&mut share.as_ref())
.map_err(|_| SignError::FrostError(FrostError::InvalidShare(validator)))?,
))
})
.collect::<Result<HashMap<_, _>, _>>()?;
let tx = attempt.complete(shares).map_err(SignError::FrostError)?;