Merge branch 'develop' into firo

This commit is contained in:
Luke Parker 2022-07-12 01:29:37 -04:00
commit e3ff4f7af6
No known key found for this signature in database
GPG key ID: F9F1386DB1E119B6
86 changed files with 5678 additions and 2076 deletions

View file

@ -2,10 +2,15 @@
members = [
"crypto/transcript",
"crypto/multiexp",
"crypto/frost",
"crypto/dalek-ff-group",
"crypto/multiexp",
"crypto/dleq",
"crypto/frost",
"coins/monero",
"coins/firo",
"processor",
]

22
README.md Normal file
View file

@ -0,0 +1,22 @@
# Serai
Serai is a new DEX, built from the ground up, initially planning on listing
Bitcoin, Ethereum, Monero, DAI, and USDC, offering a liquidity pool trading
experience. Funds are stored in an economically secured threshold multisig
wallet.
### Layout
- `docs` - Documentation on the Serai protocol.
- `coins` - Various coin libraries intended for usage in Serai yet also by the
wider community. This means they will always support the functionality Serai
needs, yet won't disadvantage other use cases when possible.
- `crypto` - A series of composable cryptographic libraries built around the
`ff`/`group` APIs achieving a variety of tasks. These range from generic
infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as
needed for Bitcoin-Monero atomic swaps.
- `processor` - A generic chain processor to process data for Serai and process
events from Serai, executing transactions as expected and needed.

View file

@ -6,7 +6,11 @@ license = "MIT"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
[build-dependencies]
cc = "1.0"
[dependencies]
hex-literal = "0.3"
lazy_static = "1"
thiserror = "1"
@ -15,29 +19,34 @@ rand_chacha = { version = "0.3", optional = true }
rand = "0.8"
rand_distr = "0.4"
subtle = "2.4"
tiny-keccak = { version = "2", features = ["keccak"] }
blake2 = "0.10"
blake2 = { version = "0.10", optional = true }
curve25519-dalek = { version = "3", features = ["std"] }
ff = { version = "0.11", optional = true }
group = { version = "0.11", optional = true }
group = { version = "0.12" }
dalek-ff-group = { path = "../../crypto/dalek-ff-group" }
dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true }
transcript = { path = "../../crypto/transcript", optional = true }
frost = { path = "../../crypto/frost", optional = true }
monero = "0.16"
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", features = ["recommended"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true }
dleq = { path = "../../crypto/dleq", features = ["serialize"], optional = true }
hex = "0.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
base58-monero = "1"
monero-epee-bin-serde = "1.0"
monero = "0.16"
reqwest = { version = "0.11", features = ["json"] }
[features]
experimental = []
multisig = ["ff", "group", "rand_chacha", "transcript", "frost", "dalek-ff-group"]
multisig = ["rand_chacha", "blake2", "transcript", "frost", "dleq"]
[dev-dependencies]
sha2 = "0.10"
tokio = { version = "1", features = ["full"] }

7
coins/monero/README.md Normal file
View file

@ -0,0 +1,7 @@
# monero-serai
A modern Monero transaction library intended for usage in wallets. It prides
itself on accuracy, correctness, and removing common pit falls developers may
face.
Threshold multisignature support is available via the `multisig` feature.

View file

@ -1,6 +1,4 @@
use std::process::Command;
use std::env;
use std::path::Path;
use std::{env, path::Path, process::Command};
fn main() {
if !Command::new("git").args(&["submodule", "update", "--init", "--recursive"]).status().unwrap().success() {
@ -16,14 +14,8 @@ fn main() {
// Use a file to signal if Monero was already built, as that should never be rebuilt
// If the signaling file was deleted, run this script again to rebuild Monero though
// TODO: Move this signaling file into OUT_DIR once Monero is built statically successfully
println!("cargo:rerun-if-changed=c/.build/monero");
if !Path::new("c/.build/monero").exists() {
if !Command::new("cmake").args(&["cmake", "-DCMAKE_BUILD_TYPE=Release", "-DBUILD_SHARED_LIBS=1", "."])
.current_dir(&Path::new("c/monero")).status().unwrap().success() {
panic!("cmake failed to generate Monero's build scripts");
}
if !Command::new("make").arg(format!("-j{}", &env::var("THREADS").unwrap_or("2".to_string())))
.current_dir(&Path::new("c/monero")).status().unwrap().success() {
panic!("make failed to build Monero. Please check your dependencies");
@ -35,56 +27,46 @@ fn main() {
}
}
println!("cargo:rerun-if-env-changed=OUT_DIR");
if !Path::new(
&format!(
"{}/{}cncrypto.{}",
out_dir,
&env::consts::DLL_PREFIX,
&env::consts::DLL_EXTENSION
)
).exists() {
for (folder, lib) in [
("crypto", "cncrypto"),
("device", "device"),
("ringct", "ringct_basic"),
("ringct", "ringct")
] {
if !Command::new("cp").args(&[
&format!(
"c/monero/src/{}/{}{}.{}",
folder,
&env::consts::DLL_PREFIX,
lib,
&env::consts::DLL_EXTENSION
),
out_dir
]).status().unwrap().success() {
panic!("Failed to cp {}", lib);
}
}
}
println!("cargo:rerun-if-changed=c/wrapper.cpp");
if !Command::new("g++").args(&[
"-O3", "-Wall", "-shared", "-std=c++14", "-fPIC",
"-Imonero/contrib/epee/include", "-Imonero/src",
"wrapper.cpp", "-o", &format!(
"{}/{}wrapper.{}",
out_dir,
&env::consts::DLL_PREFIX,
&env::consts::DLL_EXTENSION
),
&format!("-L{}", out_dir),
"-ldevice", "-lringct_basic", "-lringct"
]).current_dir(&Path::new("c")).status().unwrap().success() {
panic!("g++ failed to build the wrapper");
}
cc::Build::new()
.static_flag(true)
.warnings(false)
.extra_warnings(false)
.flag("-Wno-deprecated-declarations")
.include("c/monero/external/supercop/include")
.include("c/monero/contrib/epee/include")
.include("c/monero/src")
.include("c/monero/build/release/generated_include")
.define("AUTO_INITIALIZE_EASYLOGGINGPP", None)
.include("c/monero/external/easylogging++")
.file("c/monero/external/easylogging++/easylogging++.cc")
.file("c/monero/src/common/aligned.c")
.file("c/monero/src/common/perf_timer.cpp")
.include("c/monero/src/crypto")
.file("c/monero/src/crypto/crypto-ops-data.c")
.file("c/monero/src/crypto/crypto-ops.c")
.file("c/monero/src/crypto/keccak.c")
.file("c/monero/src/crypto/hash.c")
.include("c/monero/src/device")
.file("c/monero/src/device/device_default.cpp")
.include("c/monero/src/ringct")
.file("c/monero/src/ringct/rctCryptoOps.c")
.file("c/monero/src/ringct/rctTypes.cpp")
.file("c/monero/src/ringct/rctOps.cpp")
.file("c/monero/src/ringct/multiexp.cc")
.file("c/monero/src/ringct/bulletproofs.cc")
.file("c/monero/src/ringct/rctSigs.cpp")
.file("c/wrapper.cpp")
.compile("wrapper");
println!("cargo:rustc-link-search={}", out_dir);
println!("cargo:rustc-link-lib=cncrypto");
println!("cargo:rustc-link-lib=device");
println!("cargo:rustc-link-lib=ringct_basic");
println!("cargo:rustc-link-lib=ringct");
println!("cargo:rustc-link-lib=wrapper");
println!("cargo:rustc-link-lib=stdc++");
}

@ -1 +1 @@
Subproject commit ab18fea3500841fc312630d49ed6840b3aedb34d
Subproject commit 424e4de16b98506170db7b0d7d87a79ccf541744

View file

@ -6,36 +6,50 @@
#include "ringct/rctSigs.h"
typedef std::lock_guard<std::mutex> lock;
std::mutex rng_mutex;
std::mutex rng_mutex;
uint8_t rng_entropy[64];
void rng(uint8_t* seed) {
// Set the first half to the seed
memcpy(rng_entropy, seed, 32);
// Set the second half to the hash of a DST to ensure a lack of collisions
crypto::cn_fast_hash("RNG_entropy_seed", 16, (char*) &rng_entropy[32]);
}
extern "C" {
void generate_random_bytes_not_thread_safe(size_t n, uint8_t* value) {
void rng(uint8_t* seed) {
// Set the first half to the seed
memcpy(rng_entropy, seed, 32);
// Set the second half to the hash of a DST to ensure a lack of collisions
crypto::cn_fast_hash("RNG_entropy_seed", 16, (char*) &rng_entropy[32]);
}
}
extern "C" void monero_wide_reduce(uint8_t* value);
namespace crypto {
void generate_random_bytes_not_thread_safe(size_t n, void* value) {
size_t written = 0;
while (written != n) {
uint8_t hash[32];
crypto::cn_fast_hash(rng_entropy, 64, (char*) hash);
// Step the RNG by setting the latter half to the most recent result
// Does not leak the RNG, even if the values are leaked (which they are expected to be) due to
// the first half remaining constant and undisclosed
// Does not leak the RNG, even if the values are leaked (which they are
// expected to be) due to the first half remaining constant and
// undisclosed
memcpy(&rng_entropy[32], hash, 32);
size_t next = n - written;
if (next > 32) {
next = 32;
}
memcpy(&value[written], hash, next);
memcpy(&((uint8_t*) value)[written], hash, next);
written += next;
}
}
void random32_unbiased(unsigned char *bytes) {
uint8_t value[64];
generate_random_bytes_not_thread_safe(64, value);
monero_wide_reduce(value);
memcpy(bytes, value, 32);
}
}
extern "C" {
void c_hash_to_point(uint8_t* point) {
rct::key key_point;
ge_p3 e_p3;
@ -62,16 +76,24 @@ extern "C" {
std::stringstream ss;
binary_archive<true> ba(ss);
::serialization::serialize(ba, bp);
uint8_t* res = (uint8_t*) calloc(ss.str().size(), 1); // malloc would also work
uint8_t* res = (uint8_t*) calloc(ss.str().size(), 1);
memcpy(res, ss.str().data(), ss.str().size());
return res;
}
bool c_verify_bp(uint8_t* seed, uint s_len, uint8_t* s, uint8_t c_len, uint8_t* c) {
// BPs are batch verified which use RNG based challenges to ensure individual integrity
// That's why this must also have control over RNG, to prevent interrupting multisig signing
// while not using known seeds. Considering this doesn't actually define a batch,
// and it's only verifying a single BP, it'd probably be fine, but...
bool c_verify_bp(
uint8_t* seed,
uint s_len,
uint8_t* s,
uint8_t c_len,
uint8_t* c
) {
// BPs are batch verified which use RNG based weights to ensure individual
// integrity
// That's why this must also have control over RNG, to prevent interrupting
// multisig signing while not using known seeds. Considering this doesn't
// actually define a batch, and it's only verifying a single BP,
// it'd probably be fine, but...
lock guard(rng_mutex);
rng(seed);
@ -94,7 +116,15 @@ extern "C" {
try { return rct::bulletproof_VERIFY(bp); } catch(...) { return false; }
}
bool c_verify_clsag(uint s_len, uint8_t* s, uint8_t k_len, uint8_t* k, uint8_t* I, uint8_t* p, uint8_t* m) {
bool c_verify_clsag(
uint s_len,
uint8_t* s,
uint8_t k_len,
uint8_t* k,
uint8_t* I,
uint8_t* p,
uint8_t* m
) {
rct::clsag clsag;
std::stringstream ss;
std::string str;
@ -121,6 +151,8 @@ extern "C" {
rct::key msg;
memcpy(msg.bytes, m, 32);
try { return verRctCLSAGSimple(msg, clsag, keys, pseudo_out); } catch(...) { return false; }
try {
return verRctCLSAGSimple(msg, clsag, keys, pseudo_out);
} catch(...) { return false; }
}
}

View file

@ -1,26 +1,15 @@
use core::convert::TryInto;
use std::{convert::TryInto, io::Cursor};
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use blake2::{digest::Update, Digest, Blake2b512};
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
use curve25519_dalek::{
constants::ED25519_BASEPOINT_TABLE as DTable,
scalar::Scalar as DScalar,
edwards::EdwardsPoint as DPoint
};
use group::{Group, GroupEncoding};
use ff::PrimeField;
use group::Group;
use transcript::{Transcript as TranscriptTrait, DigestTranscript};
use frost::{CurveError, Curve};
use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group as dfg;
use crate::random_scalar;
pub type Transcript = DigestTranscript::<blake2::Blake2b512>;
use dleq::{Generators, DLEqProof};
#[derive(Clone, Error, Debug)]
pub enum MultisigError {
@ -32,202 +21,59 @@ pub enum MultisigError {
InvalidKeyImage(u16)
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Ed25519;
impl Curve for Ed25519 {
type F = dfg::Scalar;
type G = dfg::EdwardsPoint;
type T = &'static dfg::EdwardsBasepointTable;
fn id() -> String {
"Ed25519".to_string()
}
fn id_len() -> u8 {
u8::try_from(Self::id().len()).unwrap()
}
fn generator() -> Self::G {
Self::G::generator()
}
fn generator_table() -> Self::T {
&dfg::ED25519_BASEPOINT_TABLE
}
fn little_endian() -> bool {
true
}
// This will already be a keccak256 hash in the case of CLSAG signing, making it fine to simply
// return as-is, yet this ensures it's fixed size (a security requirement) and unique regardless
// of how it's called/what it's called with
fn hash_msg(msg: &[u8]) -> Vec<u8> {
Blake2b512::digest(msg).to_vec()
}
fn hash_binding_factor(binding: &[u8]) -> Self::F {
Self::hash_to_F(&[b"rho", binding].concat())
}
fn hash_to_F(data: &[u8]) -> Self::F {
dfg::Scalar::from_hash(Blake2b512::new().chain(data))
}
fn F_len() -> usize {
32
}
fn G_len() -> usize {
32
}
fn F_from_slice(slice: &[u8]) -> Result<Self::F, CurveError> {
let scalar = Self::F::from_repr(
slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))?
);
if scalar.is_some().unwrap_u8() == 0 {
Err(CurveError::InvalidScalar)?;
}
Ok(scalar.unwrap())
}
fn G_from_slice(slice: &[u8]) -> Result<Self::G, CurveError> {
let bytes = slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))?;
let point = dfg::CompressedEdwardsY::new(bytes).decompress();
if let Some(point) = point {
// Ban torsioned points
if !point.is_torsion_free() {
Err(CurveError::InvalidPoint)?;
}
// Ban points which weren't canonically encoded
if point.compress().to_bytes() != bytes {
Err(CurveError::InvalidPoint)?;
}
Ok(point)
} else {
Err(CurveError::InvalidPoint)
}
}
fn F_to_bytes(f: &Self::F) -> Vec<u8> {
f.to_repr().to_vec()
}
fn G_to_bytes(g: &Self::G) -> Vec<u8> {
g.compress().to_bytes().to_vec()
}
}
// Used to prove legitimacy of key images and nonces which both involve other basepoints
#[derive(Clone)]
pub struct DLEqProof {
s: DScalar,
c: DScalar
fn transcript() -> RecommendedTranscript {
RecommendedTranscript::new(b"monero_key_image_dleq")
}
#[allow(non_snake_case)]
impl DLEqProof {
fn challenge(H: &DPoint, xG: &DPoint, xH: &DPoint, rG: &DPoint, rH: &DPoint) -> DScalar {
pub(crate) fn write_dleq<R: RngCore + CryptoRng>(
rng: &mut R,
H: EdwardsPoint,
x: Scalar
) -> Vec<u8> {
let mut res = Vec::with_capacity(64);
DLEqProof::prove(
rng,
// Doesn't take in a larger transcript object due to the usage of this
// Every prover would immediately write their own DLEq proof, when they can only do so in
// the proper order if they want to reach consensus
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to try to
// merge later in some form, when it should instead just merge xH (as it does)
let mut transcript = Transcript::new(b"DLEq Proof".to_vec());
// Bit redundant, keeps things consistent
transcript.domain_separate(b"DLEq");
// Doesn't include G which is constant, does include H which isn't, even though H manipulation
// shouldn't be possible in practice as it's independently calculated as a product of known data
transcript.append_message(b"H", &H.compress().to_bytes());
transcript.append_message(b"xG", &xG.compress().to_bytes());
transcript.append_message(b"xH", &xH.compress().to_bytes());
transcript.append_message(b"rG", &rG.compress().to_bytes());
transcript.append_message(b"rH", &rH.compress().to_bytes());
DScalar::from_bytes_mod_order_wide(
&transcript.challenge(b"challenge").try_into().expect("Blake2b512 output wasn't 64 bytes")
)
}
pub fn prove<R: RngCore + CryptoRng>(
rng: &mut R,
H: &DPoint,
secret: &DScalar
) -> DLEqProof {
let r = random_scalar(rng);
let rG = &DTable * &r;
let rH = r * H;
// We can frequently (always?) save a scalar mul if we accept xH as an arg, yet it opens room
// for incorrect data to be passed, and therefore faults, making it not worth having
// We could also return xH but... it's really micro-optimizing
let c = DLEqProof::challenge(H, &(secret * &DTable), &(secret * H), &rG, &rH);
let s = r + (c * secret);
DLEqProof { s, c }
}
pub fn verify(
&self,
H: &DPoint,
l: u16,
xG: &DPoint,
xH: &DPoint
) -> Result<(), MultisigError> {
let s = self.s;
let c = self.c;
let rG = (&s * &DTable) - (c * xG);
let rH = (s * H) - (c * xH);
if c != DLEqProof::challenge(H, &xG, &xH, &rG, &rH) {
Err(MultisigError::InvalidDLEqProof(l))?;
}
Ok(())
}
pub fn serialize(
&self
) -> Vec<u8> {
let mut res = Vec::with_capacity(64);
res.extend(self.s.to_bytes());
res.extend(self.c.to_bytes());
res
}
pub fn deserialize(
serialized: &[u8]
) -> Option<DLEqProof> {
if serialized.len() != 64 {
return None;
}
DScalar::from_canonical_bytes(serialized[0 .. 32].try_into().unwrap()).and_then(
|s| DScalar::from_canonical_bytes(serialized[32 .. 64].try_into().unwrap()).and_then(
|c| Some(DLEqProof { s, c })
)
)
}
&mut transcript(),
Generators::new(dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)),
dfg::Scalar(x)
).serialize(&mut res).unwrap();
res
}
#[allow(non_snake_case)]
pub fn read_dleq(
pub(crate) fn read_dleq(
serialized: &[u8],
start: usize,
H: &DPoint,
H: EdwardsPoint,
l: u16,
xG: &DPoint
xG: dfg::EdwardsPoint
) -> Result<dfg::EdwardsPoint, MultisigError> {
// Not using G_from_slice here would enable non-canonical points and break blame
let other = <Ed25519 as Curve>::G_from_slice(
&serialized[(start + 0) .. (start + 32)]
if serialized.len() != 96 {
Err(MultisigError::InvalidDLEqProof(l))?;
}
let bytes = (&serialized[.. 32]).try_into().unwrap();
// dfg ensures the point is torsion free
let xH = Option::<dfg::EdwardsPoint>::from(
dfg::EdwardsPoint::from_bytes(&bytes)).ok_or(MultisigError::InvalidDLEqProof(l)
)?;
// Ensure this is a canonical point
if xH.to_bytes() != bytes {
Err(MultisigError::InvalidDLEqProof(l))?;
}
DLEqProof::<dfg::EdwardsPoint>::deserialize(
&mut Cursor::new(&serialized[32 ..])
).map_err(|_| MultisigError::InvalidDLEqProof(l))?.verify(
&mut transcript(),
Generators::new(dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)),
(xG, xH)
).map_err(|_| MultisigError::InvalidDLEqProof(l))?;
DLEqProof::deserialize(&serialized[(start + 32) .. (start + 96)])
.ok_or(MultisigError::InvalidDLEqProof(l))?
.verify(H, l, xG, &other).map_err(|_| MultisigError::InvalidDLEqProof(l))?;
Ok(other)
Ok(xH)
}

View file

@ -1,6 +1,10 @@
use std::slice;
use lazy_static::lazy_static;
use rand_core::{RngCore, CryptoRng};
use subtle::ConstantTimeEq;
use tiny_keccak::{Hasher, Keccak};
use curve25519_dalek::{
@ -32,6 +36,29 @@ lazy_static! {
static ref H_TABLE: EdwardsBasepointTable = EdwardsBasepointTable::create(&*H);
}
// Function from libsodium our subsection of Monero relies on. Implementing it here means we don't
// need to link against libsodium
#[no_mangle]
unsafe extern "C" fn crypto_verify_32(a: *const u8, b: *const u8) -> isize {
isize::from(
slice::from_raw_parts(a, 32).ct_eq(slice::from_raw_parts(b, 32)).unwrap_u8()
) - 1
}
// Offer a wide reduction to C. Our seeded RNG prevented Monero from defining an unbiased scalar
// generation function, and in order to not use Monero code (which would require propagating its
// license), the function was rewritten. It was rewritten with wide reduction, instead of rejection
// sampling however, hence the need for this function
#[no_mangle]
unsafe extern "C" fn monero_wide_reduce(value: *mut u8) {
let res = Scalar::from_bytes_mod_order_wide(
std::slice::from_raw_parts(value, 64).try_into().unwrap()
);
for (i, b) in res.to_bytes().iter().enumerate() {
value.add(i).write(*b);
}
}
#[allow(non_snake_case)]
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Commitment {
@ -71,20 +98,3 @@ pub fn hash(data: &[u8]) -> [u8; 32] {
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar::from_bytes_mod_order(hash(&data))
}
pub fn hash_to_point(point: &EdwardsPoint) -> EdwardsPoint {
let mut bytes = point.compress().to_bytes();
unsafe {
#[link(name = "wrapper")]
extern "C" {
fn c_hash_to_point(point: *const u8);
}
c_hash_to_point(bytes.as_mut_ptr());
}
CompressedEdwardsY::from_slice(&bytes).decompress().unwrap()
}
pub fn generate_key_image(secret: &Scalar) -> EdwardsPoint {
secret * hash_to_point(&(secret * &ED25519_BASEPOINT_TABLE))
}

View file

@ -6,6 +6,8 @@ use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
use crate::{Commitment, wallet::TransactionError, serialize::*};
pub(crate) const MAX_OUTPUTS: usize = 16;
#[derive(Clone, PartialEq, Debug)]
pub struct Bulletproofs {
pub A: EdwardsPoint,
@ -22,8 +24,22 @@ pub struct Bulletproofs {
}
impl Bulletproofs {
pub(crate) fn fee_weight(outputs: usize) -> usize {
let proofs = 6 + usize::try_from(usize::BITS - (outputs - 1).leading_zeros()).unwrap();
let len = (9 + (2 * proofs)) * 32;
let mut clawback = 0;
let padded = 1 << (proofs - 6);
if padded > 2 {
const BP_BASE: usize = 368;
clawback = ((BP_BASE * padded) - len) * 4 / 5;
}
len + clawback
}
pub fn new<R: RngCore + CryptoRng>(rng: &mut R, outputs: &[Commitment]) -> Result<Bulletproofs, TransactionError> {
if outputs.len() > 16 {
if outputs.len() > MAX_OUTPUTS {
return Err(TransactionError::TooManyOutputs)?;
}
@ -60,6 +76,7 @@ impl Bulletproofs {
Ok(res)
}
#[must_use]
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
if commitments.len() > 16 {
return false;

View file

@ -12,9 +12,10 @@ use curve25519_dalek::{
};
use crate::{
Commitment,
Commitment, random_scalar, hash_to_scalar,
transaction::RING_LEN,
wallet::decoys::Decoys,
random_scalar, hash_to_scalar, hash_to_point,
ringct::hash_to_point,
serialize::*
};
@ -169,7 +170,7 @@ fn core(
let c_c = mu_C * c;
let L = (&s[i] * &ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
let PH = hash_to_point(&P[i]);
let PH = hash_to_point(P[i]);
// Shouldn't be an issue as all of the variables in this vartime statement are public
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul(&[c_p, c_c]);
@ -207,7 +208,7 @@ impl Clsag {
let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate();
let z = input.commitment.mask - mask;
let H = hash_to_point(&input.decoys.ring[r][0]);
let H = hash_to_point(input.decoys.ring[r][0]);
let D = H * z;
let mut s = Vec::with_capacity(input.decoys.ring.len());
for _ in 0 .. input.decoys.ring.len() {
@ -253,7 +254,7 @@ impl Clsag {
mask,
&msg,
&nonce * &ED25519_BASEPOINT_TABLE,
nonce * hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0])
nonce * hash_to_point(inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0])
);
clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce - ((p * inputs[i].0) + c);
@ -287,6 +288,10 @@ impl Clsag {
Ok(())
}
pub(crate) fn fee_weight() -> usize {
(RING_LEN * 32) + 32 + 32
}
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
write_raw_vec(write_scalar, &self.s, w)?;
w.write_all(&self.c1.to_bytes())?;

View file

@ -1,30 +1,29 @@
use core::fmt::Debug;
use std::{rc::Rc, cell::RefCell};
use std::sync::{Arc, RwLock};
use rand_core::{RngCore, CryptoRng, SeedableRng};
use rand_chacha::ChaCha12Rng;
use curve25519_dalek::{
constants::ED25519_BASEPOINT_TABLE,
traits::Identity,
traits::{Identity, IsIdentity},
scalar::Scalar,
edwards::EdwardsPoint
};
use group::Group;
use transcript::Transcript as TranscriptTrait;
use frost::{FrostError, MultisigView, algorithm::Algorithm};
use transcript::{Transcript, RecommendedTranscript};
use frost::{curve::Ed25519, FrostError, FrostView, algorithm::Algorithm};
use dalek_ff_group as dfg;
use crate::{
hash_to_point,
frost::{Transcript, MultisigError, Ed25519, DLEqProof, read_dleq},
ringct::clsag::{ClsagInput, Clsag}
frost::{MultisigError, write_dleq, read_dleq},
ringct::{hash_to_point, clsag::{ClsagInput, Clsag}}
};
impl ClsagInput {
fn transcript<T: TranscriptTrait>(&self, transcript: &mut T) {
fn transcript<T: Transcript>(&self, transcript: &mut T) {
// Doesn't domain separate as this is considered part of the larger CLSAG proof
// Ring index
@ -47,7 +46,7 @@ impl ClsagInput {
}
}
#[derive(Clone, PartialEq, Debug)]
#[derive(Clone, Debug)]
pub struct ClsagDetails {
input: ClsagInput,
mask: Scalar
@ -70,16 +69,15 @@ struct Interim {
}
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Debug)]
#[derive(Clone, Debug)]
pub struct ClsagMultisig {
transcript: Transcript,
transcript: RecommendedTranscript,
H: EdwardsPoint,
// Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires a round
image: EdwardsPoint,
AH: (dfg::EdwardsPoint, dfg::EdwardsPoint),
details: Rc<RefCell<Option<ClsagDetails>>>,
details: Arc<RwLock<Option<ClsagDetails>>>,
msg: Option<[u8; 32]>,
interim: Option<Interim>
@ -87,16 +85,16 @@ pub struct ClsagMultisig {
impl ClsagMultisig {
pub fn new(
transcript: Transcript,
details: Rc<RefCell<Option<ClsagDetails>>>
transcript: RecommendedTranscript,
output_key: EdwardsPoint,
details: Arc<RwLock<Option<ClsagDetails>>>
) -> Result<ClsagMultisig, MultisigError> {
Ok(
ClsagMultisig {
transcript,
H: EdwardsPoint::identity(),
H: hash_to_point(output_key),
image: EdwardsPoint::identity(),
AH: (dfg::EdwardsPoint::identity(), dfg::EdwardsPoint::identity()),
details,
@ -107,81 +105,62 @@ impl ClsagMultisig {
}
pub fn serialized_len() -> usize {
3 * (32 + 64)
32 + (2 * 32)
}
fn input(&self) -> ClsagInput {
self.details.borrow().as_ref().unwrap().input.clone()
(*self.details.read().unwrap()).as_ref().unwrap().input.clone()
}
fn mask(&self) -> Scalar {
self.details.borrow().as_ref().unwrap().mask
(*self.details.read().unwrap()).as_ref().unwrap().mask
}
}
impl Algorithm<Ed25519> for ClsagMultisig {
type Transcript = Transcript;
type Transcript = RecommendedTranscript;
type Signature = (Clsag, EdwardsPoint);
fn nonces(&self) -> Vec<Vec<dfg::EdwardsPoint>> {
vec![vec![dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)]]
}
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
view: &MultisigView<Ed25519>,
nonces: &[dfg::Scalar; 2]
view: &FrostView<Ed25519>
) -> Vec<u8> {
self.H = hash_to_point(&view.group_key().0);
let mut serialized = Vec::with_capacity(ClsagMultisig::serialized_len());
let mut serialized = Vec::with_capacity(Self::serialized_len());
serialized.extend((view.secret_share().0 * self.H).compress().to_bytes());
serialized.extend(DLEqProof::prove(rng, &self.H, &view.secret_share().0).serialize());
serialized.extend((nonces[0].0 * self.H).compress().to_bytes());
serialized.extend(&DLEqProof::prove(rng, &self.H, &nonces[0].0).serialize());
serialized.extend((nonces[1].0 * self.H).compress().to_bytes());
serialized.extend(&DLEqProof::prove(rng, &self.H, &nonces[1].0).serialize());
serialized.extend(write_dleq(rng, self.H, view.secret_share().0));
serialized
}
fn process_addendum(
&mut self,
view: &MultisigView<Ed25519>,
view: &FrostView<Ed25519>,
l: u16,
commitments: &[dfg::EdwardsPoint; 2],
serialized: &[u8]
) -> Result<(), FrostError> {
if serialized.len() != ClsagMultisig::serialized_len() {
if serialized.len() != Self::serialized_len() {
// Not an optimal error but...
Err(FrostError::InvalidCommitment(l))?;
}
if self.AH.0.is_identity().into() {
if self.image.is_identity().into() {
self.transcript.domain_separate(b"CLSAG");
self.input().transcript(&mut self.transcript);
self.transcript.append_message(b"mask", &self.mask().to_bytes());
}
// Uses the same format FROST does for the expected commitments (nonce * G where this is nonce * H)
// The following technically shouldn't need to be committed to, as we've committed to equivalents,
// yet it doesn't hurt and may resolve some unknown issues
self.transcript.append_message(b"participant", &l.to_be_bytes());
let mut cursor = 0;
self.transcript.append_message(b"image_share", &serialized[cursor .. (cursor + 32)]);
self.transcript.append_message(b"key_image_share", &serialized[.. 32]);
self.image += read_dleq(
serialized,
cursor,
&self.H,
self.H,
l,
&view.verification_share(l).0
view.verification_share(l)
).map_err(|_| FrostError::InvalidCommitment(l))?.0;
cursor += 96;
self.transcript.append_message(b"commitment_D_H", &serialized[cursor .. (cursor + 32)]);
self.AH.0 += read_dleq(serialized, cursor, &self.H, l, &commitments[0]).map_err(|_| FrostError::InvalidCommitment(l))?;
cursor += 96;
self.transcript.append_message(b"commitment_E_H", &serialized[cursor .. (cursor + 32)]);
self.AH.1 += read_dleq(serialized, cursor, &self.H, l, &commitments[1]).map_err(|_| FrostError::InvalidCommitment(l))?;
Ok(())
}
@ -192,15 +171,11 @@ impl Algorithm<Ed25519> for ClsagMultisig {
fn sign_share(
&mut self,
view: &MultisigView<Ed25519>,
nonce_sum: dfg::EdwardsPoint,
b: dfg::Scalar,
nonce: dfg::Scalar,
view: &FrostView<Ed25519>,
nonce_sums: &[Vec<dfg::EdwardsPoint>],
nonces: &[dfg::Scalar],
msg: &[u8]
) -> dfg::Scalar {
// Apply the binding factor to the H variant of the nonce
self.AH.0 += self.AH.1 * b;
// Use the transcript to get a seeded random number generator
// The transcript contains private data, preventing passive adversaries from recreating this
// process even if they have access to commitments (specifically, the ring index being signed
@ -217,21 +192,22 @@ impl Algorithm<Ed25519> for ClsagMultisig {
&self.input(),
self.mask(),
&self.msg.as_ref().unwrap(),
nonce_sum.0,
self.AH.0.0
nonce_sums[0][0].0,
nonce_sums[0][1].0
);
self.interim = Some(Interim { p, c, clsag, pseudo_out });
let share = dfg::Scalar(nonce.0 - (p * view.secret_share().0));
let share = dfg::Scalar(nonces[0].0 - (p * view.secret_share().0));
share
}
#[must_use]
fn verify(
&self,
_: u16,
_: dfg::EdwardsPoint,
_: dfg::EdwardsPoint,
_: &[Vec<dfg::EdwardsPoint>],
sum: dfg::Scalar
) -> Option<Self::Signature> {
let interim = self.interim.as_ref().unwrap();
@ -248,15 +224,16 @@ impl Algorithm<Ed25519> for ClsagMultisig {
return None;
}
#[must_use]
fn verify_share(
&self,
verification_share: dfg::EdwardsPoint,
nonce: dfg::EdwardsPoint,
nonces: &[Vec<dfg::EdwardsPoint>],
share: dfg::Scalar,
) -> bool {
let interim = self.interim.as_ref().unwrap();
return (&share.0 * &ED25519_BASEPOINT_TABLE) == (
nonce.0 - (interim.p * verification_share.0)
nonces[0][0].0 - (interim.p * verification_share.0)
);
}
}

View file

@ -0,0 +1,67 @@
use subtle::ConditionallySelectable;
use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint};
use group::ff::{Field, PrimeField};
use dalek_ff_group::field::FieldElement;
use crate::hash;
pub fn hash_to_point(point: EdwardsPoint) -> EdwardsPoint {
let mut bytes = point.compress().to_bytes();
unsafe {
#[link(name = "wrapper")]
extern "C" {
fn c_hash_to_point(point: *const u8);
}
c_hash_to_point(bytes.as_mut_ptr());
}
CompressedEdwardsY::from_slice(&bytes).decompress().unwrap()
}
// This works without issue. It's also 140 times slower (@ 3.5ms), and despite checking it passes
// for all branches, there still could be *some* discrepancy somewhere. There's no reason to use it
// unless we're trying to purge that section of the C static library, which we aren't right now
#[allow(dead_code)]
pub(crate) fn rust_hash_to_point(key: EdwardsPoint) -> EdwardsPoint {
#[allow(non_snake_case)]
let A = FieldElement::from(486662u64);
let v = FieldElement::from_square(hash(&key.compress().to_bytes())).double();
let w = v + FieldElement::one();
let x = w.square() + (-A.square() * v);
// This isn't the complete X, yet its initial value
// We don't calculate the full X, and instead solely calculate Y, letting dalek reconstruct X
// While inefficient, it solves API boundaries and reduces the amount of work done here
#[allow(non_snake_case)]
let X = {
let u = w;
let v = x;
let v3 = v * v * v;
let uv3 = u * v3;
let v7 = v3 * v3 * v;
let uv7 = u * v7;
uv3 * uv7.pow((-FieldElement::from(5u8)) * FieldElement::from(8u8).invert().unwrap())
};
let x = X.square() * x;
let y = w - x;
let non_zero_0 = !y.is_zero();
let y_if_non_zero_0 = w + x;
let sign = non_zero_0 & (!y_if_non_zero_0.is_zero());
let mut z = -A;
z *= FieldElement::conditional_select(&v, &FieldElement::from(1u8), sign);
#[allow(non_snake_case)]
let Z = z + w;
#[allow(non_snake_case)]
let mut Y = z - w;
Y = Y * Z.invert().unwrap();
let mut bytes = Y.to_repr();
bytes[31] |= sign.unwrap_u8() << 7;
CompressedEdwardsY(bytes).decompress().unwrap().mul_by_cofactor()
}

View file

@ -1,13 +1,20 @@
use curve25519_dalek::edwards::EdwardsPoint;
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
pub(crate) mod hash_to_point;
pub use hash_to_point::hash_to_point;
pub mod bulletproofs;
pub mod clsag;
pub mod bulletproofs;
use crate::{
serialize::*,
ringct::{clsag::Clsag, bulletproofs::Bulletproofs}
};
pub fn generate_key_image(secret: Scalar) -> EdwardsPoint {
secret * hash_to_point(&secret * &ED25519_BASEPOINT_TABLE)
}
#[derive(Clone, PartialEq, Debug)]
pub struct RctBase {
pub fee: u64,
@ -16,6 +23,10 @@ pub struct RctBase {
}
impl RctBase {
pub(crate) fn fee_weight(outputs: usize) -> usize {
1 + 8 + (outputs * (8 + 32))
}
pub fn serialize<W: std::io::Write>(&self, w: &mut W, rct_type: u8) -> std::io::Result<()> {
w.write_all(&[rct_type])?;
match rct_type {
@ -69,6 +80,10 @@ impl RctPrunable {
}
}
pub(crate) fn fee_weight(inputs: usize, outputs: usize) -> usize {
1 + Bulletproofs::fee_weight(outputs) + (inputs * (Clsag::fee_weight() + 32))
}
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
match self {
RctPrunable::Null => Ok(()),
@ -114,6 +129,10 @@ pub struct RctSignatures {
}
impl RctSignatures {
pub(crate) fn fee_weight(inputs: usize, outputs: usize) -> usize {
RctBase::fee_weight(outputs) + RctPrunable::fee_weight(inputs, outputs)
}
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
self.base.serialize(w, self.prunable.rct_type())?;
self.prunable.serialize(w)

View file

@ -9,7 +9,7 @@ use serde_json::json;
use reqwest;
use crate::{transaction::{Input, Transaction}, block::Block};
use crate::{transaction::{Input, Timelock, Transaction}, block::Block, wallet::Fee};
#[derive(Deserialize, Debug)]
pub struct EmptyResponse {}
@ -34,8 +34,6 @@ pub enum RpcError {
InvalidTransaction([u8; 32])
}
pub struct Rpc(String);
fn rpc_hex(value: &str) -> Result<Vec<u8>, RpcError> {
hex::decode(value).map_err(|_| RpcError::InternalError("Monero returned invalid hex".to_string()))
}
@ -46,6 +44,9 @@ fn rpc_point(point: &str) -> Result<EdwardsPoint, RpcError> {
).decompress().ok_or(RpcError::InvalidPoint(point.to_string()))
}
#[derive(Clone, Debug)]
pub struct Rpc(String);
impl Rpc {
pub fn new(daemon: String) -> Rpc {
Rpc(daemon)
@ -232,6 +233,34 @@ impl Rpc {
Ok(indexes.o_indexes)
}
// from and to are inclusive
pub async fn get_output_distribution(&self, from: usize, to: usize) -> Result<Vec<u64>, RpcError> {
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
pub struct Distribution {
distribution: Vec<u64>
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct Distributions {
distributions: Vec<Distribution>
}
let mut distributions: JsonRpcResponse<Distributions> = self.rpc_call("json_rpc", Some(json!({
"method": "get_output_distribution",
"params": {
"binary": false,
"amounts": [0],
"cumulative": true,
"from_height": from,
"to_height": to
}
}))).await?;
Ok(distributions.result.distributions.swap_remove(0).distribution)
}
pub async fn get_outputs(
&self,
indexes: &[u64],
@ -267,37 +296,29 @@ impl Rpc {
// get the median time for the given height, yet we do need to in order to be complete
outs.outs.iter().enumerate().map(
|(i, out)| Ok(
if txs[i].prefix.unlock_time <= u64::try_from(height).unwrap() {
Some([rpc_point(&out.key)?, rpc_point(&out.mask)?])
} else { None }
Some([rpc_point(&out.key)?, rpc_point(&out.mask)?]).filter(|_| {
match txs[i].prefix.timelock {
Timelock::Block(t_height) => (t_height <= height),
_ => false
}
})
)
).collect()
}
pub async fn get_output_distribution(&self, height: usize) -> Result<Vec<u64>, RpcError> {
pub async fn get_fee(&self) -> Result<Fee, RpcError> {
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
pub struct Distribution {
distribution: Vec<u64>
struct FeeResponse {
fee: u64,
quantization_mask: u64
}
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct Distributions {
distributions: Vec<Distribution>
}
let mut distributions: JsonRpcResponse<Distributions> = self.rpc_call("json_rpc", Some(json!({
"method": "get_output_distribution",
"params": {
"binary": false,
"amounts": [0],
"cumulative": true,
"to_height": height
}
let res: JsonRpcResponse<FeeResponse> = self.rpc_call("json_rpc", Some(json!({
"method": "get_fee_estimate"
}))).await?;
Ok(distributions.result.distributions.swap_remove(0).distribution)
Ok(Fee { per_weight: res.result.fee, mask: res.result.quantization_mask })
}
pub async fn publish_transaction(&self, tx: &Transaction) -> Result<(), RpcError> {

View file

@ -4,6 +4,10 @@ use curve25519_dalek::{scalar::Scalar, edwards::{EdwardsPoint, CompressedEdwards
pub const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000;
pub fn varint_len(varint: usize) -> usize {
((usize::try_from(usize::BITS - varint.leading_zeros()).unwrap().saturating_sub(1)) / 7) + 1
}
pub fn write_varint<W: io::Write>(varint: &u64, w: &mut W) -> io::Result<()> {
let mut varint = *varint;
while {

View file

@ -0,0 +1,45 @@
use hex_literal::hex;
use crate::wallet::address::{Network, AddressType, Address};
const SPEND: [u8; 32] = hex!("f8631661f6ab4e6fda310c797330d86e23a682f20d5bc8cc27b18051191f16d7");
const VIEW: [u8; 32] = hex!("4a1535063ad1fee2dabbf909d4fd9a873e29541b401f0944754e17c9a41820ce");
const STANDARD: &'static str = "4B33mFPMq6mKi7Eiyd5XuyKRVMGVZz1Rqb9ZTyGApXW5d1aT7UBDZ89ewmnWFkzJ5wPd2SFbn313vCT8a4E2Qf4KQH4pNey";
const PAYMENT_ID: [u8; 8] = hex!("b8963a57855cf73f");
const INTEGRATED: &'static str = "4Ljin4CrSNHKi7Eiyd5XuyKRVMGVZz1Rqb9ZTyGApXW5d1aT7UBDZ89ewmnWFkzJ5wPd2SFbn313vCT8a4E2Qf4KbaTH6MnpXSn88oBX35";
const SUB_SPEND: [u8; 32] = hex!("fe358188b528335ad1cfdc24a22a23988d742c882b6f19a602892eaab3c1b62b");
const SUB_VIEW: [u8; 32] = hex!("9bc2b464de90d058468522098d5610c5019c45fd1711a9517db1eea7794f5470");
const SUBADDRESS: &'static str = "8C5zHM5ud8nGC4hC2ULiBLSWx9infi8JUUmWEat4fcTf8J4H38iWYVdFmPCA9UmfLTZxD43RsyKnGEdZkoGij6csDeUnbEB";
#[test]
fn standard_address() {
let addr = Address::from_str(STANDARD, Network::Mainnet).unwrap();
assert_eq!(addr.meta.network, Network::Mainnet);
assert_eq!(addr.meta.kind, AddressType::Standard);
assert_eq!(addr.meta.guaranteed, false);
assert_eq!(addr.spend.compress().to_bytes(), SPEND);
assert_eq!(addr.view.compress().to_bytes(), VIEW);
}
#[test]
fn integrated_address() {
let addr = Address::from_str(INTEGRATED, Network::Mainnet).unwrap();
assert_eq!(addr.meta.network, Network::Mainnet);
assert_eq!(addr.meta.kind, AddressType::Integrated(PAYMENT_ID));
assert_eq!(addr.meta.guaranteed, false);
assert_eq!(addr.spend.compress().to_bytes(), SPEND);
assert_eq!(addr.view.compress().to_bytes(), VIEW);
}
#[test]
fn subaddress() {
let addr = Address::from_str(SUBADDRESS, Network::Mainnet).unwrap();
assert_eq!(addr.meta.network, Network::Mainnet);
assert_eq!(addr.meta.kind, AddressType::Subaddress);
assert_eq!(addr.meta.guaranteed, false);
assert_eq!(addr.spend.compress().to_bytes(), SUB_SPEND);
assert_eq!(addr.view.compress().to_bytes(), SUB_VIEW);
}

View file

@ -1,18 +1,23 @@
#[cfg(feature = "multisig")]
use std::{cell::RefCell, rc::Rc};
use std::sync::{Arc, RwLock};
use rand::{RngCore, rngs::OsRng};
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar};
#[cfg(feature = "multisig")]
use transcript::{Transcript, RecommendedTranscript};
#[cfg(feature = "multisig")]
use frost::curve::Ed25519;
use crate::{
Commitment,
random_scalar, generate_key_image,
random_scalar,
wallet::Decoys,
ringct::clsag::{ClsagInput, Clsag}
ringct::{generate_key_image, clsag::{ClsagInput, Clsag}}
};
#[cfg(feature = "multisig")]
use crate::{frost::{Ed25519, MultisigError, Transcript}, ringct::clsag::{ClsagDetails, ClsagMultisig}};
use crate::{frost::MultisigError, ringct::clsag::{ClsagDetails, ClsagMultisig}};
#[cfg(feature = "multisig")]
use frost::tests::{key_gen, algorithm_machines, sign};
@ -43,7 +48,7 @@ fn clsag() {
ring.push([&dest * &ED25519_BASEPOINT_TABLE, Commitment::new(mask, amount).calculate()]);
}
let image = generate_key_image(&secrets[0]);
let image = generate_key_image(secrets[0]);
let (clsag, pseudo_out) = Clsag::sign(
&mut OsRng,
&vec![(
@ -96,8 +101,9 @@ fn clsag_multisig() -> Result<(), MultisigError> {
algorithm_machines(
&mut OsRng,
ClsagMultisig::new(
Transcript::new(b"Monero Serai CLSAG Test".to_vec()),
Rc::new(RefCell::new(Some(
RecommendedTranscript::new(b"Monero Serai CLSAG Test"),
keys[&1].group_key().0,
Arc::new(RwLock::new(Some(
ClsagDetails::new(
ClsagInput::new(
Commitment::new(randomness, AMOUNT),

View file

@ -1,11 +0,0 @@
use rand::rngs::OsRng;
use frost::tests::{curve::test_curve, key_gen};
use crate::frost::Ed25519;
#[test]
fn frost_ed25519() {
test_curve::<_, Ed25519>(&mut OsRng);
key_gen::<_, Ed25519>(&mut OsRng);
}

View file

@ -0,0 +1,13 @@
use rand::rngs::OsRng;
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
use crate::{random_scalar, ringct::hash_to_point::{hash_to_point, rust_hash_to_point}};
#[test]
fn test_hash_to_point() {
for _ in 0 .. 200 {
let point = &random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE;
assert_eq!(rust_hash_to_point(point), hash_to_point(point));
}
}

View file

@ -1,4 +1,3 @@
#[cfg(feature = "multisig")]
mod frost;
mod hash_to_point;
mod clsag;
mod address;

View file

@ -1,7 +1,11 @@
use core::cmp::Ordering;
use curve25519_dalek::edwards::EdwardsPoint;
use crate::{hash, serialize::*, ringct::{RctPrunable, RctSignatures}};
pub const RING_LEN: usize = 11;
#[derive(Clone, PartialEq, Debug)]
pub enum Input {
Gen(u64),
@ -14,6 +18,13 @@ pub enum Input {
}
impl Input {
// Worst-case predictive len
pub(crate) fn fee_weight() -> usize {
// Uses 1 byte for the VarInt amount due to amount being 0
// Uses 1 byte for the VarInt encoding of the length of the ring as well
1 + 1 + 1 + (8 * RING_LEN) + 32
}
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
match self {
Input::Gen(height) => {
@ -56,6 +67,10 @@ pub struct Output {
}
impl Output {
pub(crate) fn fee_weight() -> usize {
1 + 1 + 32 + 1
}
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
write_varint(&self.amount, w)?;
w.write_all(&[2 + (if self.tag.is_some() { 1 } else { 0 })])?;
@ -84,19 +99,73 @@ impl Output {
}
}
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum Timelock {
None,
Block(usize),
Time(u64)
}
impl Timelock {
fn from_raw(raw: u64) -> Timelock {
if raw == 0 {
Timelock::None
} else if raw < 500_000_000 {
Timelock::Block(usize::try_from(raw).unwrap())
} else {
Timelock::Time(raw)
}
}
pub(crate) fn fee_weight() -> usize {
8
}
fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
write_varint(
&match self {
Timelock::None => 0,
Timelock::Block(block) => (*block).try_into().unwrap(),
Timelock::Time(time) => *time
},
w
)
}
}
impl PartialOrd for Timelock {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (self, other) {
(Timelock::None, _) => Some(Ordering::Less),
(Timelock::Block(a), Timelock::Block(b)) => a.partial_cmp(b),
(Timelock::Time(a), Timelock::Time(b)) => a.partial_cmp(b),
_ => None
}
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct TransactionPrefix {
pub version: u64,
pub unlock_time: u64,
pub timelock: Timelock,
pub inputs: Vec<Input>,
pub outputs: Vec<Output>,
pub extra: Vec<u8>
}
impl TransactionPrefix {
pub(crate) fn fee_weight(inputs: usize, outputs: usize, extra: usize) -> usize {
// Assumes Timelock::None since this library won't let you create a TX with a timelock
1 + 1 +
varint_len(inputs) + (inputs * Input::fee_weight()) +
// Only 16 outputs are possible under transactions by this lib
1 + (outputs * Output::fee_weight()) +
varint_len(extra) + extra
}
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
write_varint(&self.version, w)?;
write_varint(&self.unlock_time, w)?;
self.timelock.serialize(w)?;
write_vec(Input::serialize, &self.inputs, w)?;
write_vec(Output::serialize, &self.outputs, w)?;
write_varint(&self.extra.len().try_into().unwrap(), w)?;
@ -106,7 +175,7 @@ impl TransactionPrefix {
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<TransactionPrefix> {
let mut prefix = TransactionPrefix {
version: read_varint(r)?,
unlock_time: read_varint(r)?,
timelock: Timelock::from_raw(read_varint(r)?),
inputs: read_vec(Input::deserialize, r)?,
outputs: read_vec(Output::deserialize, r)?,
extra: vec![]
@ -127,6 +196,10 @@ pub struct Transaction {
}
impl Transaction {
pub(crate) fn fee_weight(inputs: usize, outputs: usize, extra: usize) -> usize {
TransactionPrefix::fee_weight(inputs, outputs, extra) + RctSignatures::fee_weight(inputs, outputs)
}
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
self.prefix.serialize(w)?;
self.rct_signatures.serialize(w)

View file

@ -0,0 +1,152 @@
use std::string::ToString;
use thiserror::Error;
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, edwards::{EdwardsPoint, CompressedEdwardsY}};
use base58_monero::base58::{encode_check, decode_check};
use crate::wallet::ViewPair;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Network {
Mainnet,
Testnet,
Stagenet
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum AddressType {
Standard,
Integrated([u8; 8]),
Subaddress
}
impl AddressType {
fn network_bytes(network: Network) -> (u8, u8, u8) {
match network {
Network::Mainnet => (18, 19, 42),
Network::Testnet => (53, 54, 63),
Network::Stagenet => (24, 25, 36)
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct AddressMeta {
pub network: Network,
pub kind: AddressType,
pub guaranteed: bool
}
#[derive(Clone, Error, Debug)]
pub enum AddressError {
#[error("invalid address byte")]
InvalidByte,
#[error("invalid address encoding")]
InvalidEncoding,
#[error("invalid length")]
InvalidLength,
#[error("different network than expected")]
DifferentNetwork,
#[error("invalid key")]
InvalidKey
}
impl AddressMeta {
fn to_byte(&self) -> u8 {
let bytes = AddressType::network_bytes(self.network);
let byte = match self.kind {
AddressType::Standard => bytes.0,
AddressType::Integrated(_) => bytes.1,
AddressType::Subaddress => bytes.2
};
byte | (if self.guaranteed { 1 << 7 } else { 0 })
}
// Returns an incomplete type in the case of Integrated addresses
fn from_byte(byte: u8) -> Result<AddressMeta, AddressError> {
let actual = byte & 0b01111111;
let guaranteed = (byte >> 7) == 1;
let mut meta = None;
for network in [Network::Mainnet, Network::Testnet, Network::Stagenet] {
let (standard, integrated, subaddress) = AddressType::network_bytes(network);
if let Some(kind) = match actual {
_ if actual == standard => Some(AddressType::Standard),
_ if actual == integrated => Some(AddressType::Integrated([0; 8])),
_ if actual == subaddress => Some(AddressType::Subaddress),
_ => None
} {
meta = Some(AddressMeta { network, kind, guaranteed });
break;
}
}
meta.ok_or(AddressError::InvalidByte)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Address {
pub meta: AddressMeta,
pub spend: EdwardsPoint,
pub view: EdwardsPoint
}
impl ViewPair {
pub fn address(&self, network: Network, kind: AddressType, guaranteed: bool) -> Address {
Address {
meta: AddressMeta {
network,
kind,
guaranteed
},
spend: self.spend,
view: &self.view * &ED25519_BASEPOINT_TABLE
}
}
}
impl ToString for Address {
fn to_string(&self) -> String {
let mut data = vec![self.meta.to_byte()];
data.extend(self.spend.compress().to_bytes());
data.extend(self.view.compress().to_bytes());
if let AddressType::Integrated(id) = self.meta.kind {
data.extend(id);
}
encode_check(&data).unwrap()
}
}
impl Address {
pub fn from_str(s: &str, network: Network) -> Result<Self, AddressError> {
let raw = decode_check(s).map_err(|_| AddressError::InvalidEncoding)?;
if raw.len() == 1 {
Err(AddressError::InvalidLength)?;
}
let mut meta = AddressMeta::from_byte(raw[0])?;
if meta.network != network {
Err(AddressError::DifferentNetwork)?;
}
let len = match meta.kind {
AddressType::Standard | AddressType::Subaddress => 65,
AddressType::Integrated(_) => 73
};
if raw.len() != len {
Err(AddressError::InvalidLength)?;
}
let spend = CompressedEdwardsY(raw[1 .. 33].try_into().unwrap()).decompress().ok_or(AddressError::InvalidKey)?;
let view = CompressedEdwardsY(raw[33 .. 65].try_into().unwrap()).decompress().ok_or(AddressError::InvalidKey)?;
if let AddressType::Integrated(ref mut payment_id) = meta.kind {
payment_id.copy_from_slice(&raw[65 .. 73]);
}
Ok(Address { meta, spend, view })
}
}

View file

@ -1,4 +1,4 @@
use std::collections::HashSet;
use std::{sync::Mutex, collections::HashSet};
use lazy_static::lazy_static;
@ -7,7 +7,7 @@ use rand_distr::{Distribution, Gamma};
use curve25519_dalek::edwards::EdwardsPoint;
use crate::{wallet::SpendableOutput, rpc::{RpcError, Rpc}};
use crate::{transaction::RING_LEN, wallet::SpendableOutput, rpc::{RpcError, Rpc}};
const LOCK_WINDOW: usize = 10;
const MATURITY: u64 = 60;
@ -16,28 +16,34 @@ const BLOCK_TIME: usize = 120;
const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME;
const TIP_APPLICATION: f64 = (LOCK_WINDOW * BLOCK_TIME) as f64;
const RING_LEN: usize = 11;
const DECOYS: usize = RING_LEN - 1;
lazy_static! {
static ref GAMMA: Gamma<f64> = Gamma::new(19.28, 1.0 / 1.61).unwrap();
static ref DISTRIBUTION: Mutex<Vec<u64>> = Mutex::new(Vec::with_capacity(3000000));
}
async fn select_n<R: RngCore + CryptoRng>(
rng: &mut R,
rpc: &Rpc,
height: usize,
distribution: &[u64],
high: u64,
per_second: f64,
used: &mut HashSet<u64>,
count: usize
) -> Result<Vec<(u64, [EdwardsPoint; 2])>, RpcError> {
let mut iters = 0;
let mut confirmed = Vec::with_capacity(count);
while confirmed.len() != count {
let remaining = count - confirmed.len();
let mut candidates = Vec::with_capacity(remaining);
while candidates.len() != remaining {
iters += 1;
// This is cheap and on fresh chains, thousands of rounds may be needed
if iters == 10000 {
Err(RpcError::InternalError("not enough decoy candidates".to_string()))?;
}
// Use a gamma distribution
let mut age = GAMMA.sample(rng).exp();
if age > TIP_APPLICATION {
@ -49,6 +55,7 @@ async fn select_n<R: RngCore + CryptoRng>(
let o = (age * per_second) as u64;
if o < high {
let distribution = DISTRIBUTION.lock().unwrap();
let i = distribution.partition_point(|s| *s < (high - 1 - o));
let prev = i.saturating_sub(1);
let n = distribution[i] - distribution[prev];
@ -110,12 +117,29 @@ impl Decoys {
));
}
let distribution = rpc.get_output_distribution(height).await?;
let high = distribution[distribution.len() - 1];
let per_second = {
let blocks = distribution.len().min(BLOCKS_PER_YEAR);
let outputs = high - distribution[distribution.len().saturating_sub(blocks + 1)];
(outputs as f64) / ((blocks * BLOCK_TIME) as f64)
let distribution_len = {
let distribution = DISTRIBUTION.lock().unwrap();
distribution.len()
};
if distribution_len <= height {
let extension = rpc.get_output_distribution(distribution_len, height).await?;
DISTRIBUTION.lock().unwrap().extend(extension);
}
let high;
let per_second;
{
let mut distribution = DISTRIBUTION.lock().unwrap();
// If asked to use an older height than previously asked, truncate to ensure accuracy
// Should never happen, yet risks desyncing if it did
distribution.truncate(height + 1); // height is inclusive, and 0 is a valid height
high = distribution[distribution.len() - 1];
per_second = {
let blocks = distribution.len().min(BLOCKS_PER_YEAR);
let outputs = high - distribution[distribution.len().saturating_sub(blocks + 1)];
(outputs as f64) / ((blocks * BLOCK_TIME) as f64)
};
};
let mut used = HashSet::<u64>::new();
@ -123,10 +147,9 @@ impl Decoys {
used.insert(o.0);
}
// Panic if not enough decoys are available
// TODO: Simply create a TX with less than the target amount, or at least return an error
// TODO: Simply create a TX with less than the target amount
if (high - MATURITY) < u64::try_from(inputs.len() * RING_LEN).unwrap() {
panic!("Not enough decoys available");
Err(RpcError::InternalError("not enough decoy candidates".to_string()))?;
}
// Select all decoys for this transaction, assuming we generate a sane transaction
@ -136,7 +159,6 @@ impl Decoys {
rng,
rpc,
height,
&distribution,
high,
per_second,
&mut used,
@ -160,10 +182,7 @@ impl Decoys {
// small chains
if high > 500 {
// Make sure the TX passes the sanity check that the median output is within the last 40%
// This actually checks the median is within the last third, a slightly more aggressive
// boundary, as the height used in this calculation will be slightly under the height this is
// sanity checked against
let target_median = high * 2 / 3;
let target_median = high * 3 / 5;
while ring[RING_LEN / 2].0 < target_median {
// If it's not, update the bottom half with new values to ensure the median only moves up
for removed in ring.drain(0 .. (RING_LEN / 2)).collect::<Vec<_>>() {
@ -180,7 +199,7 @@ impl Decoys {
// Select new outputs until we have a full sized ring again
ring.extend(
select_n(rng, rpc, height, &distribution, high, per_second, &mut used, RING_LEN - ring.len()).await?
select_n(rng, rpc, height, high, per_second, &mut used, RING_LEN - ring.len()).await?
);
ring.sort_by(|a, b| a.0.cmp(&b.0));
}

View file

@ -6,6 +6,8 @@ use crate::{
transaction::Input
};
pub mod address;
mod scan;
pub use scan::SpendableOutput;
@ -13,15 +15,17 @@ pub(crate) mod decoys;
pub(crate) use decoys::Decoys;
mod send;
pub use send::{TransactionError, SignableTransaction};
pub use send::{Fee, TransactionError, SignableTransaction};
#[cfg(feature = "multisig")]
pub use send::TransactionMachine;
fn key_image_sort(x: &EdwardsPoint, y: &EdwardsPoint) -> std::cmp::Ordering {
x.compress().to_bytes().cmp(&y.compress().to_bytes()).reverse()
}
// https://github.com/monero-project/research-lab/issues/103
// https://gist.github.com/kayabaNerve/8066c13f1fe1573286ba7a2fd79f6100
pub(crate) fn uniqueness(inputs: &[Input]) -> [u8; 32] {
let mut u = b"domain_separator".to_vec();
let mut u = b"uniqueness".to_vec();
for input in inputs {
match input {
// If Gen, this should be the only input, making this loop somewhat pointless
@ -61,3 +65,9 @@ pub(crate) fn commitment_mask(shared_key: Scalar) -> Scalar {
mask.extend(shared_key.to_bytes());
hash_to_scalar(&mask)
}
#[derive(Clone, Copy)]
pub struct ViewPair {
pub spend: EdwardsPoint,
pub view: Scalar
}

View file

@ -11,8 +11,8 @@ use monero::{consensus::deserialize, blockdata::transaction::ExtraField};
use crate::{
Commitment,
serialize::{write_varint, read_32, read_scalar, read_point},
transaction::Transaction,
wallet::{uniqueness, shared_key, amount_decryption, commitment_mask}
transaction::{Timelock, Transaction},
wallet::{ViewPair, uniqueness, shared_key, amount_decryption, commitment_mask}
};
#[derive(Clone, PartialEq, Debug)]
@ -24,6 +24,30 @@ pub struct SpendableOutput {
pub commitment: Commitment
}
pub struct Timelocked(Timelock, Vec<SpendableOutput>);
impl Timelocked {
pub fn timelock(&self) -> Timelock {
self.0
}
pub fn not_locked(&self) -> Vec<SpendableOutput> {
if self.0 == Timelock::None {
return self.1.clone();
}
vec![]
}
/// Returns None if the Timelocks aren't comparable. Returns Some(vec![]) if none are unlocked
pub fn unlocked(&self, timelock: Timelock) -> Option<Vec<SpendableOutput>> {
// If the Timelocks are comparable, return the outputs if they're now unlocked
self.0.partial_cmp(&timelock).filter(|_| self.0 <= timelock).map(|_| self.1.clone())
}
pub fn ignore_timelock(&self) -> Vec<SpendableOutput> {
self.1.clone()
}
}
impl SpendableOutput {
pub fn serialize(&self) -> Vec<u8> {
let mut res = Vec::with_capacity(32 + 1 + 32 + 32 + 40);
@ -55,9 +79,9 @@ impl SpendableOutput {
impl Transaction {
pub fn scan(
&self,
view: Scalar,
spend: EdwardsPoint
) -> Vec<SpendableOutput> {
view: ViewPair,
guaranteed: bool
) -> Timelocked {
let mut extra = vec![];
write_varint(&u64::try_from(self.prefix.extra.len()).unwrap(), &mut extra).unwrap();
extra.extend(&self.prefix.extra);
@ -75,61 +99,63 @@ impl Transaction {
pubkeys = m_pubkeys.iter().map(|key| key.point.decompress()).filter_map(|key| key).collect();
} else {
return vec![];
return Timelocked(self.prefix.timelock, vec![]);
};
let mut res = vec![];
for (o, output) in self.prefix.outputs.iter().enumerate() {
// TODO: This may be replaceable by pubkeys[o]
for pubkey in &pubkeys {
let key_offset = shared_key(
Some(uniqueness(&self.prefix.inputs)).filter(|_| guaranteed),
view.view,
pubkey,
o
);
// P - shared == spend
if (output.key - (&key_offset * &ED25519_BASEPOINT_TABLE)) != view.spend {
continue;
}
// Since we've found an output to us, get its amount
let mut commitment = Commitment::zero();
// P - shared == spend
let matches = |shared_key| (output.key - (&shared_key * &ED25519_BASEPOINT_TABLE)) == spend;
let test = |shared_key| Some(shared_key).filter(|shared_key| matches(*shared_key));
// Miner transaction
if output.amount != 0 {
commitment.amount = output.amount;
// Regular transaction
} else {
let amount = match self.rct_signatures.base.ecdh_info.get(o) {
Some(amount) => amount_decryption(*amount, key_offset),
// This should never happen, yet it may be possible with miner transactions?
// Using get just decreases the possibility of a panic and lets us move on in that case
None => break
};
// Get the traditional shared key and unique shared key, testing if either matches for this output
let traditional = test(shared_key(None, view, pubkey, o));
let unique = test(shared_key(Some(uniqueness(&self.prefix.inputs)), view, pubkey, o));
// If either matches, grab it and decode the amount
if let Some(key_offset) = traditional.or(unique) {
// Miner transaction
if output.amount != 0 {
commitment.amount = output.amount;
// Regular transaction
} else {
let amount = match self.rct_signatures.base.ecdh_info.get(o) {
Some(amount) => amount_decryption(*amount, key_offset),
// This should never happen, yet it may be possible with miner transactions?
// Using get just decreases the possibility of a panic and lets us move on in that case
None => continue
};
// Rebuild the commitment to verify it
commitment = Commitment::new(commitment_mask(key_offset), amount);
// If this is a malicious commitment, move to the next output
// Any other R value will calculate to a different spend key and are therefore ignorable
if Some(&commitment.calculate()) != self.rct_signatures.base.commitments.get(o) {
break;
}
// Rebuild the commitment to verify it
commitment = Commitment::new(commitment_mask(key_offset), amount);
// If this is a malicious commitment, move to the next output
// Any other R value will calculate to a different spend key and are therefore ignorable
if Some(&commitment.calculate()) != self.rct_signatures.base.commitments.get(o) {
break;
}
if commitment.amount != 0 {
res.push(SpendableOutput {
tx: self.hash(),
o: o.try_into().unwrap(),
key: output.key,
key_offset,
commitment
});
}
// Break to prevent public keys from being included multiple times, triggering multiple
// inclusions of the same output
break;
}
if commitment.amount != 0 {
res.push(SpendableOutput {
tx: self.hash(),
o: o.try_into().unwrap(),
key: output.key,
key_offset,
commitment
});
}
// Break to prevent public keys from being included multiple times, triggering multiple
// inclusions of the same output
break;
}
}
res
Timelocked(self.prefix.timelock, res)
}
}

View file

@ -9,11 +9,7 @@ use curve25519_dalek::{
edwards::EdwardsPoint
};
use monero::{
consensus::Encodable,
util::{key::PublicKey, address::Address},
blockdata::transaction::SubField
};
use monero::{consensus::Encodable, PublicKey, blockdata::transaction::SubField};
#[cfg(feature = "multisig")]
use frost::FrostError;
@ -21,73 +17,81 @@ use frost::FrostError;
use crate::{
Commitment,
random_scalar,
generate_key_image,
ringct::{
generate_key_image,
clsag::{ClsagError, ClsagInput, Clsag},
bulletproofs::Bulletproofs,
bulletproofs::{MAX_OUTPUTS, Bulletproofs},
RctBase, RctPrunable, RctSignatures
},
transaction::{Input, Output, TransactionPrefix, Transaction},
transaction::{Input, Output, Timelock, TransactionPrefix, Transaction},
rpc::{Rpc, RpcError},
wallet::{SpendableOutput, Decoys, key_image_sort, uniqueness, shared_key, commitment_mask, amount_encryption}
wallet::{
address::{AddressType, Address}, SpendableOutput, Decoys,
key_image_sort, uniqueness, shared_key, commitment_mask, amount_encryption
}
};
#[cfg(feature = "multisig")]
use crate::frost::MultisigError;
#[cfg(feature = "multisig")]
mod multisig;
#[cfg(feature = "multisig")]
pub use multisig::TransactionMachine;
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Debug)]
struct SendOutput {
R: EdwardsPoint,
dest: EdwardsPoint,
mask: Scalar,
commitment: Commitment,
amount: [u8; 8]
}
impl SendOutput {
fn new<R: RngCore + CryptoRng>(
rng: &mut R,
unique: Option<[u8; 32]>,
unique: [u8; 32],
output: (Address, u64),
o: usize
) -> Result<SendOutput, TransactionError> {
) -> SendOutput {
let r = random_scalar(rng);
let shared_key = shared_key(
unique,
Some(unique).filter(|_| output.0.meta.guaranteed),
r,
&output.0.public_view.point.decompress().ok_or(TransactionError::InvalidAddress)?,
&output.0.view,
o
);
Ok(
SendOutput {
R: &r * &ED25519_BASEPOINT_TABLE,
dest: (
(&shared_key * &ED25519_BASEPOINT_TABLE) +
output.0.public_spend.point.decompress().ok_or(TransactionError::InvalidAddress)?
),
mask: commitment_mask(shared_key),
amount: amount_encryption(output.1, shared_key)
}
)
let spend = output.0.spend;
SendOutput {
R: match output.0.meta.kind {
AddressType::Standard => &r * &ED25519_BASEPOINT_TABLE,
AddressType::Integrated(_) => unimplemented!("SendOutput::new doesn't support Integrated addresses"),
AddressType::Subaddress => &r * spend
},
dest: ((&shared_key * &ED25519_BASEPOINT_TABLE) + spend),
commitment: Commitment::new(commitment_mask(shared_key), output.1),
amount: amount_encryption(output.1, shared_key)
}
}
}
#[derive(Clone, Error, Debug)]
pub enum TransactionError {
#[error("invalid address")]
InvalidAddress,
#[error("no inputs")]
NoInputs,
#[error("no outputs")]
NoOutputs,
#[error("only one output and no change address")]
NoChange,
#[error("too many outputs")]
TooManyOutputs,
#[error("not enough funds (in {0}, out {1})")]
NotEnoughFunds(u64, u64),
#[error("invalid address")]
InvalidAddress,
#[error("wrong spend private key")]
WrongPrivateKey,
#[error("rpc error ({0})")]
RpcError(RpcError),
#[error("clsag error ({0})")]
@ -122,7 +126,7 @@ async fn prepare_inputs<R: RngCore + CryptoRng>(
for (i, input) in inputs.iter().enumerate() {
signable.push((
spend + input.key_offset,
generate_key_image(&(spend + input.key_offset)),
generate_key_image(spend + input.key_offset),
ClsagInput::new(
input.commitment,
decoys[i].clone()
@ -149,24 +153,49 @@ async fn prepare_inputs<R: RngCore + CryptoRng>(
Ok(signable)
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Fee {
pub per_weight: u64,
pub mask: u64
}
impl Fee {
pub fn calculate(&self, weight: usize) -> u64 {
((((self.per_weight * u64::try_from(weight).unwrap()) - 1) / self.mask) + 1) * self.mask
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct SignableTransaction {
inputs: Vec<SpendableOutput>,
payments: Vec<(Address, u64)>,
change: Address,
fee_per_byte: u64,
fee: u64,
outputs: Vec<SendOutput>
outputs: Vec<SendOutput>,
fee: u64
}
impl SignableTransaction {
pub fn new(
inputs: Vec<SpendableOutput>,
payments: Vec<(Address, u64)>,
change: Address,
fee_per_byte: u64
mut payments: Vec<(Address, u64)>,
change_address: Option<Address>,
fee_rate: Fee
) -> Result<SignableTransaction, TransactionError> {
// Make sure all addresses are valid
let test = |addr: Address| {
match addr.meta.kind {
AddressType::Standard => Ok(()),
AddressType::Integrated(..) => Err(TransactionError::InvalidAddress),
AddressType::Subaddress => Ok(())
}
};
for payment in &payments {
test(payment.0)?;
}
if let Some(change) = change_address {
test(change)?;
}
if inputs.len() == 0 {
Err(TransactionError::NoInputs)?;
}
@ -174,15 +203,55 @@ impl SignableTransaction {
Err(TransactionError::NoOutputs)?;
}
// TODO TX MAX SIZE
// If we don't have two outputs, as required by Monero, add a second
let mut change = payments.len() == 1;
if change && change_address.is_none() {
Err(TransactionError::NoChange)?;
}
let mut outputs = payments.len() + (if change { 1 } else { 0 });
// Calculate the extra length.
// Type, length, value, with 1 field for the first key and 1 field for the rest
let extra = (outputs * (2 + 32)) - (outputs.saturating_sub(2) * 2);
// Calculate the fee.
let mut fee = fee_rate.calculate(Transaction::fee_weight(inputs.len(), outputs, extra));
// Make sure we have enough funds
let in_amount = inputs.iter().map(|input| input.commitment.amount).sum::<u64>();
let mut out_amount = payments.iter().map(|payment| payment.1).sum::<u64>() + fee;
if in_amount < out_amount {
Err(TransactionError::NotEnoughFunds(in_amount, out_amount))?;
}
// If we have yet to add a change output, do so if it's economically viable
if (!change) && change_address.is_some() && (in_amount != out_amount) {
// Check even with the new fee, there's remaining funds
let change_fee = fee_rate.calculate(Transaction::fee_weight(inputs.len(), outputs + 1, extra)) - fee;
if (out_amount + change_fee) < in_amount {
change = true;
outputs += 1;
out_amount += change_fee;
fee += change_fee;
}
}
if outputs > MAX_OUTPUTS {
Err(TransactionError::TooManyOutputs)?;
}
if change {
payments.push((change_address.unwrap(), in_amount - out_amount));
}
Ok(
SignableTransaction {
inputs,
payments,
change,
fee_per_byte,
fee: 0,
outputs: vec![]
outputs: vec![],
fee
}
)
}
@ -191,39 +260,19 @@ impl SignableTransaction {
&mut self,
rng: &mut R,
uniqueness: [u8; 32]
) -> Result<(Vec<Commitment>, Scalar), TransactionError> {
self.fee = self.fee_per_byte * 2000; // TODO
// TODO TX MAX SIZE
// Make sure we have enough funds
let in_amount = self.inputs.iter().map(|input| input.commitment.amount).sum();
let out_amount = self.fee + self.payments.iter().map(|payment| payment.1).sum::<u64>();
if in_amount < out_amount {
Err(TransactionError::NotEnoughFunds(in_amount, out_amount))?;
}
let mut temp_outputs = Vec::with_capacity(self.payments.len() + 1);
// Add the payments to the outputs
for payment in &self.payments {
temp_outputs.push((None, (payment.0, payment.1)));
}
temp_outputs.push((Some(uniqueness), (self.change, in_amount - out_amount)));
// Shuffle the outputs
temp_outputs.shuffle(rng);
) -> (Vec<Commitment>, Scalar) {
// Shuffle the payments
self.payments.shuffle(rng);
// Actually create the outputs
self.outputs = Vec::with_capacity(temp_outputs.len());
let mut commitments = Vec::with_capacity(temp_outputs.len());
let mut mask_sum = Scalar::zero();
for (o, output) in temp_outputs.iter().enumerate() {
self.outputs.push(SendOutput::new(rng, output.0, output.1, o)?);
commitments.push(Commitment::new(self.outputs[o].mask, output.1.1));
mask_sum += self.outputs[o].mask;
self.outputs = Vec::with_capacity(self.payments.len() + 1);
for (o, output) in self.payments.iter().enumerate() {
self.outputs.push(SendOutput::new(rng, uniqueness, *output, o));
}
Ok((commitments, mask_sum))
let commitments = self.outputs.iter().map(|output| output.commitment).collect::<Vec<_>>();
let sum = commitments.iter().map(|commitment| commitment.mask).sum();
(commitments, sum)
}
fn prepare_transaction(
@ -232,6 +281,7 @@ impl SignableTransaction {
bp: Bulletproofs
) -> Transaction {
// Create the TX extra
// TODO: Review this for canonicity with Monero
let mut extra = vec![];
SubField::TxPublicKey(
PublicKey { point: self.outputs[0].R.compress() }
@ -240,7 +290,6 @@ impl SignableTransaction {
self.outputs[1 ..].iter().map(|output| PublicKey { point: output.R.compress() }).collect()
).consensus_encode(&mut extra).unwrap();
// Format it for monero-rs
let mut tx_outputs = Vec::with_capacity(self.outputs.len());
let mut ecdh_info = Vec::with_capacity(self.outputs.len());
for o in 0 .. self.outputs.len() {
@ -255,7 +304,7 @@ impl SignableTransaction {
Transaction {
prefix: TransactionPrefix {
version: 2,
unlock_time: 0,
timelock: Timelock::None,
inputs: vec![],
outputs: tx_outputs,
extra
@ -283,7 +332,12 @@ impl SignableTransaction {
) -> Result<Transaction, TransactionError> {
let mut images = Vec::with_capacity(self.inputs.len());
for input in &self.inputs {
images.push(generate_key_image(&(spend + input.key_offset)));
let offset = spend + input.key_offset;
if (&offset * &ED25519_BASEPOINT_TABLE) != input.key {
Err(TransactionError::WrongPrivateKey)?;
}
images.push(generate_key_image(offset));
}
images.sort_by(key_image_sort);
@ -296,7 +350,7 @@ impl SignableTransaction {
key_image: *image
}).collect::<Vec<_>>()
)
)?;
);
let mut tx = self.prepare_transaction(&commitments, Bulletproofs::new(rng, &commitments)?);

View file

@ -1,15 +1,21 @@
use std::{cell::RefCell, rc::Rc, collections::HashMap};
use std::{sync::{Arc, RwLock}, collections::HashMap};
use rand_core::{RngCore, CryptoRng, SeedableRng};
use rand_chacha::ChaCha12Rng;
use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::{EdwardsPoint, CompressedEdwardsY}};
use transcript::Transcript as TranscriptTrait;
use frost::{FrostError, MultisigKeys, MultisigParams, sign::{State, StateMachine, AlgorithmMachine}};
use transcript::{Transcript, RecommendedTranscript};
use frost::{
curve::Ed25519,
FrostError, FrostKeys,
sign::{
PreprocessMachine, SignMachine, SignatureMachine,
AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine
}
};
use crate::{
frost::{Transcript, Ed25519},
random_scalar, ringct::{clsag::{ClsagInput, ClsagDetails, ClsagMultisig}, bulletproofs::Bulletproofs, RctPrunable},
transaction::{Input, Transaction},
rpc::Rpc,
@ -20,44 +26,55 @@ pub struct TransactionMachine {
signable: SignableTransaction,
i: u16,
included: Vec<u16>,
transcript: Transcript,
transcript: RecommendedTranscript,
decoys: Vec<Decoys>,
our_preprocess: Vec<u8>,
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
clsags: Vec<AlgorithmMachine<Ed25519, ClsagMultisig>>
}
images: Vec<EdwardsPoint>,
output_masks: Option<Scalar>,
inputs: Vec<Rc<RefCell<Option<ClsagDetails>>>>,
clsags: Vec<AlgorithmMachine<Ed25519, ClsagMultisig>>,
pub struct TransactionSignMachine {
signable: SignableTransaction,
i: u16,
included: Vec<u16>,
transcript: RecommendedTranscript,
tx: Option<Transaction>
decoys: Vec<Decoys>,
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
clsags: Vec<AlgorithmSignMachine<Ed25519, ClsagMultisig>>,
our_preprocess: Vec<u8>
}
pub struct TransactionSignatureMachine {
tx: Transaction,
clsags: Vec<AlgorithmSignatureMachine<Ed25519, ClsagMultisig>>
}
impl SignableTransaction {
pub async fn multisig<R: RngCore + CryptoRng>(
mut self,
label: Vec<u8>,
rng: &mut R,
pub async fn multisig(
self,
rpc: &Rpc,
keys: FrostKeys<Ed25519>,
mut transcript: RecommendedTranscript,
height: usize,
keys: MultisigKeys<Ed25519>,
mut included: Vec<u16>
) -> Result<TransactionMachine, TransactionError> {
let mut images = vec![];
images.resize(self.inputs.len(), EdwardsPoint::identity());
let mut inputs = vec![];
for _ in 0 .. self.inputs.len() {
// Doesn't resize as that will use a single Rc for the entire Vec
inputs.push(Rc::new(RefCell::new(None)));
inputs.push(Arc::new(RwLock::new(None)));
}
let mut clsags = vec![];
// Create a RNG out of the input shared keys, which either requires the view key or being every
// sender, and the payments (address and amount), which a passive adversary may be able to know
// depending on how these transactions are coordinated
// Being every sender would already let you note rings which happen to use your transactions
// multiple times, already breaking privacy there
let mut transcript = Transcript::new(label);
transcript.domain_separate(b"monero_transaction");
// Include the height we're using for our data
// The data itself will be included, making this unnecessary, yet a lot of this is technically
@ -77,16 +94,38 @@ impl SignableTransaction {
transcript.append_message(b"input_shared_key", &input.key_offset.to_bytes());
}
for payment in &self.payments {
transcript.append_message(b"payment_address", &payment.0.as_bytes());
transcript.append_message(b"payment_address", &payment.0.to_string().as_bytes());
transcript.append_message(b"payment_amount", &payment.1.to_le_bytes());
}
transcript.append_message(b"change", &self.change.as_bytes());
// Sort included before cloning it around
included.sort_unstable();
for (i, input) in self.inputs.iter().enumerate() {
// Check this the right set of keys
let offset = keys.offset(dalek_ff_group::Scalar(input.key_offset));
if offset.group_key().0 != input.key {
Err(TransactionError::WrongPrivateKey)?;
}
clsags.push(
AlgorithmMachine::new(
ClsagMultisig::new(
transcript.clone(),
input.key,
inputs[i].clone()
).map_err(|e| TransactionError::MultisigError(e))?,
Arc::new(offset),
&included
).map_err(|e| TransactionError::FrostError(e))?
);
}
// Select decoys
// Ideally, this would be done post entropy, instead of now, yet doing so would require sign
// to be async which isn't preferable. This should be suitably competent though
// While this inability means we can immediately create the input, moving it out of the
// Rc RefCell, keeping it within an Rc RefCell keeps our options flexible
// Arc RwLock, keeping it within an Arc RwLock keeps our options flexible
let decoys = Decoys::select(
// Using a seeded RNG with a specific height, committed to above, should make these decoys
// committed to. They'll also be committed to later via the TX message as a whole
@ -96,62 +135,41 @@ impl SignableTransaction {
&self.inputs
).await.map_err(|e| TransactionError::RpcError(e))?;
// Sort included before cloning it around
included.sort_unstable();
Ok(
TransactionMachine {
signable: self,
i: keys.params().i(),
included,
transcript,
for (i, input) in self.inputs.iter().enumerate() {
clsags.push(
AlgorithmMachine::new(
ClsagMultisig::new(
transcript.clone(),
inputs[i].clone()
).map_err(|e| TransactionError::MultisigError(e))?,
Rc::new(keys.offset(dalek_ff_group::Scalar(input.key_offset))),
&included
).map_err(|e| TransactionError::FrostError(e))?
);
}
decoys,
// Verify these outputs by a dummy prep
self.prepare_outputs(rng, [0; 32])?;
Ok(TransactionMachine {
signable: self,
i: keys.params().i(),
included,
transcript,
decoys,
our_preprocess: vec![],
images,
output_masks: None,
inputs,
clsags,
tx: None
})
inputs,
clsags
}
)
}
}
impl StateMachine for TransactionMachine {
impl PreprocessMachine for TransactionMachine {
type Signature = Transaction;
type SignMachine = TransactionSignMachine;
fn preprocess<R: RngCore + CryptoRng>(
&mut self,
mut self,
rng: &mut R
) -> Result<Vec<u8>, FrostError> {
if self.state() != State::Fresh {
Err(FrostError::InvalidSignTransition(State::Fresh, self.state()))?;
}
) -> (TransactionSignMachine, Vec<u8>) {
// Iterate over each CLSAG calling preprocess
let mut serialized = Vec::with_capacity(self.clsags.len() * (64 + ClsagMultisig::serialized_len()));
for clsag in self.clsags.iter_mut() {
serialized.extend(&clsag.preprocess(rng)?);
}
self.our_preprocess = serialized.clone();
let mut serialized = Vec::with_capacity(
// D_{G, H}, E_{G, H}, DLEqs, key image addendum
self.clsags.len() * ((2 * (32 + 32)) + (2 * (32 + 32)) + ClsagMultisig::serialized_len())
);
let clsags = self.clsags.drain(..).map(|clsag| {
let (clsag, preprocess) = clsag.preprocess(rng);
serialized.extend(&preprocess);
clsag
}).collect();
let our_preprocess = serialized.clone();
// We could add further entropy here, and previous versions of this library did so
// As of right now, the multisig's key, the inputs being spent, and the FROST data itself
@ -162,24 +180,46 @@ impl StateMachine for TransactionMachine {
// increase privacy. If they're not sent in plain text, or are otherwise inaccessible, they
// already offer sufficient entropy. That's why further entropy is not included
Ok(serialized)
(
TransactionSignMachine {
signable: self.signable,
i: self.i,
included: self.included,
transcript: self.transcript,
decoys: self.decoys,
inputs: self.inputs,
clsags,
our_preprocess,
},
serialized
)
}
}
impl SignMachine<Transaction> for TransactionSignMachine {
type SignatureMachine = TransactionSignatureMachine;
fn sign(
&mut self,
mut self,
mut commitments: HashMap<u16, Vec<u8>>,
// Drop FROST's 'msg' since we calculate the actual message in this function
_: &[u8]
) -> Result<Vec<u8>, FrostError> {
if self.state() != State::Preprocessed {
Err(FrostError::InvalidSignTransition(State::Preprocessed, self.state()))?;
msg: &[u8]
) -> Result<(TransactionSignatureMachine, Vec<u8>), FrostError> {
if msg.len() != 0 {
Err(
FrostError::InternalError(
"message was passed to the TransactionMachine when it generates its own".to_string()
)
)?;
}
// Add all commitments to the transcript for their entropy
// While each CLSAG will do this as they need to for security, they have their own transcripts
// cloned from this TX's initial premise's transcript. For our TX transcript to have the CLSAG
// data for entropy, it'll have to be added ourselves
commitments.insert(self.i, self.our_preprocess.clone());
commitments.insert(self.i, self.our_preprocess);
for l in &self.included {
self.transcript.append_message(b"participant", &(*l).to_be_bytes());
// FROST itself will error if this is None, so let it
@ -188,34 +228,41 @@ impl StateMachine for TransactionMachine {
}
}
// FROST commitments, image, H commitments, and their proofs
let clsag_len = 64 + ClsagMultisig::serialized_len();
// FROST commitments and their DLEqs, and the image and its DLEq
let clsag_len = (2 * (32 + 32)) + (2 * (32 + 32)) + ClsagMultisig::serialized_len();
for (l, commitments) in &commitments {
if commitments.len() != (self.clsags.len() * clsag_len) {
Err(FrostError::InvalidCommitment(*l))?;
}
}
let mut commitments = (0 .. self.clsags.len()).map(|c| commitments.iter().map(
|(l, commitments)| (*l, commitments[(c * clsag_len) .. ((c + 1) * clsag_len)].to_vec())
// Convert the unified commitments to a Vec of the individual commitments
let mut commitments = (0 .. self.clsags.len()).map(|_| commitments.iter_mut().map(
|(l, commitments)| (*l, commitments.drain(.. clsag_len).collect::<Vec<_>>())
).collect::<HashMap<_, _>>()).collect::<Vec<_>>();
// Calculate the key images
// Clsag will parse/calculate/validate this as needed, yet doing so here as well provides
// the easiest API overall, as this is where the TX is (which needs the key images in its
// message), along with where the outputs are determined (where our change output needs these
// to be unique)
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
for c in 0 .. self.clsags.len() {
// Calculate the key images
// Multisig will parse/calculate/validate this as needed, yet doing so here as well provides
// the easiest API overall, as this is where the TX is (which needs the key images in its
// message), along with where the outputs are determined (where our change output needs these
// to be unique)
for (l, preprocess) in &commitments[c] {
self.images[c] += CompressedEdwardsY(
preprocess[64 .. 96].try_into().map_err(|_| FrostError::InvalidCommitment(*l))?
images[c] += CompressedEdwardsY(
preprocess[(clsag_len - 96) .. (clsag_len - 64)].try_into().map_err(|_| FrostError::InvalidCommitment(*l))?
).decompress().ok_or(FrostError::InvalidCommitment(*l))?;
}
}
// Create the actual transaction
let output_masks;
let mut tx = {
// Calculate uniqueness
let mut images = self.images.clone();
images.sort_by(key_image_sort);
let mut sorted_images = images.clone();
sorted_images.sort_by(key_image_sort);
// Not invalid outputs due to already doing a dummy prep
let (commitments, output_masks) = self.signable.prepare_outputs(
let commitments;
(commitments, output_masks) = self.signable.prepare_outputs(
&mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"tx_keys")),
uniqueness(
&images.iter().map(|image| Input::ToKey {
@ -224,8 +271,7 @@ impl StateMachine for TransactionMachine {
key_image: *image
}).collect::<Vec<_>>()
)
).expect("Couldn't prepare outputs despite already doing a dummy prep");
self.output_masks = Some(output_masks);
);
self.signable.prepare_transaction(
&commitments,
@ -236,18 +282,19 @@ impl StateMachine for TransactionMachine {
)
};
let mut sorted = Vec::with_capacity(self.decoys.len());
while self.decoys.len() != 0 {
// Sort the inputs, as expected
let mut sorted = Vec::with_capacity(self.clsags.len());
while self.clsags.len() != 0 {
sorted.push((
images.swap_remove(0),
self.signable.inputs.swap_remove(0),
self.decoys.swap_remove(0),
self.images.swap_remove(0),
self.inputs.swap_remove(0),
self.clsags.swap_remove(0),
commitments.swap_remove(0)
));
}
sorted.sort_by(|x, y| x.2.compress().to_bytes().cmp(&y.2.compress().to_bytes()).reverse());
sorted.sort_by(|x, y| key_image_sort(&x.0, &y.0));
let mut rng = ChaCha12Rng::from_seed(self.transcript.rng_seed(b"pseudo_out_masks"));
let mut sum_pseudo_outs = Scalar::zero();
@ -256,7 +303,7 @@ impl StateMachine for TransactionMachine {
let mut mask = random_scalar(&mut rng);
if sorted.len() == 0 {
mask = self.output_masks.unwrap() - sum_pseudo_outs;
mask = output_masks - sum_pseudo_outs;
} else {
sum_pseudo_outs += mask;
}
@ -264,20 +311,18 @@ impl StateMachine for TransactionMachine {
tx.prefix.inputs.push(
Input::ToKey {
amount: 0,
key_offsets: value.1.offsets.clone(),
key_image: value.2
key_offsets: value.2.offsets.clone(),
key_image: value.0
}
);
value.3.replace(
Some(
ClsagDetails::new(
ClsagInput::new(
value.0.commitment,
value.1
).map_err(|_| panic!("Signing an input which isn't present in the ring we created for it"))?,
mask
)
*value.3.write().unwrap() = Some(
ClsagDetails::new(
ClsagInput::new(
value.1.commitment,
value.2
).map_err(|_| panic!("Signing an input which isn't present in the ring we created for it"))?,
mask
)
);
@ -286,30 +331,31 @@ impl StateMachine for TransactionMachine {
}
let msg = tx.signature_hash();
self.tx = Some(tx);
// Iterate over each CLSAG calling sign
let mut serialized = Vec::with_capacity(self.clsags.len() * 32);
for clsag in self.clsags.iter_mut() {
serialized.extend(&clsag.sign(commitments.remove(0), &msg)?);
}
let clsags = self.clsags.drain(..).map(|clsag| {
let (clsag, share) = clsag.sign(commitments.remove(0), &msg)?;
serialized.extend(&share);
Ok(clsag)
}).collect::<Result<_, _>>()?;
Ok(serialized)
Ok((TransactionSignatureMachine { tx, clsags }, serialized))
}
}
fn complete(&mut self, shares: HashMap<u16, Vec<u8>>) -> Result<Transaction, FrostError> {
if self.state() != State::Signed {
Err(FrostError::InvalidSignTransition(State::Signed, self.state()))?;
}
let mut tx = self.tx.take().unwrap();
impl SignatureMachine<Transaction> for TransactionSignatureMachine {
fn complete(self, mut shares: HashMap<u16, Vec<u8>>) -> Result<Transaction, FrostError> {
let mut tx = self.tx;
match tx.rct_signatures.prunable {
RctPrunable::Null => panic!("Signing for RctPrunable::Null"),
RctPrunable::Clsag { ref mut clsags, ref mut pseudo_outs, .. } => {
for (c, clsag) in self.clsags.iter_mut().enumerate() {
let (clsag, pseudo_out) = clsag.complete(shares.iter().map(
|(l, shares)| (*l, shares[(c * 32) .. ((c + 1) * 32)].to_vec())
).collect::<HashMap<_, _>>())?;
for clsag in self.clsags {
let (clsag, pseudo_out) = clsag.complete(
shares.iter_mut().map(
|(l, shares)| (*l, shares.drain(.. 32).collect())
).collect::<HashMap<_, _>>()
)?;
clsags.push(clsag);
pseudo_outs.push(pseudo_out);
}
@ -317,12 +363,4 @@ impl StateMachine for TransactionMachine {
}
Ok(tx)
}
fn multisig_params(&self) -> MultisigParams {
self.clsags[0].multisig_params()
}
fn state(&self) -> State {
self.clsags[0].state()
}
}

View file

@ -1,4 +1,6 @@
use std::{sync::Mutex, collections::HashMap};
use std::sync::Mutex;
#[cfg(feature = "multisig")]
use std::collections::HashMap;
use lazy_static::lazy_static;
@ -12,21 +14,15 @@ use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
#[cfg(feature = "multisig")]
use dalek_ff_group::Scalar;
#[cfg(feature = "multisig")]
use frost::tests::{THRESHOLD, key_gen, sign};
use transcript::{Transcript, RecommendedTranscript};
#[cfg(feature = "multisig")]
use frost::{curve::Ed25519, tests::{THRESHOLD, key_gen, sign}};
use monero::{
network::Network,
util::{key::PublicKey, address::Address}
};
use monero_serai::{random_scalar, wallet::SignableTransaction};
use monero_serai::{random_scalar, wallet::{ViewPair, address::{Network, AddressType}, SignableTransaction}};
mod rpc;
use crate::rpc::{rpc, mine_block};
#[cfg(feature = "multisig")]
use monero_serai::frost::Ed25519;
lazy_static! {
static ref SEQUENTIAL: Mutex<()> = Mutex::new(());
}
@ -72,15 +68,10 @@ async fn send_core(test: usize, multisig: bool) {
}
}
let addr = Address::standard(
Network::Mainnet,
PublicKey { point: spend_pub.compress() },
PublicKey { point: (&view * &ED25519_BASEPOINT_TABLE).compress() }
);
let view_pair = ViewPair { view, spend: spend_pub };
let addr = view_pair.address(Network::Mainnet, AddressType::Standard, false);
// TODO
let fee_per_byte = 50000000;
let fee = fee_per_byte * 2000;
let fee = rpc.get_fee().await.unwrap();
let start = rpc.get_height().await.unwrap();
for _ in 0 .. 7 {
@ -100,7 +91,7 @@ async fn send_core(test: usize, multisig: bool) {
// Grab the largest output available
let output = {
let mut outputs = tx.as_ref().unwrap().scan(view, spend_pub);
let mut outputs = tx.as_ref().unwrap().scan(view_pair, false).ignore_timelock();
outputs.sort_by(|x, y| x.commitment.amount.cmp(&y.commitment.amount).reverse());
outputs.swap_remove(0)
};
@ -125,14 +116,14 @@ async fn send_core(test: usize, multisig: bool) {
for i in (start + 1) .. (start + 9) {
let tx = rpc.get_block_transactions(i).await.unwrap().swap_remove(0);
let output = tx.scan(view, spend_pub).swap_remove(0);
let output = tx.scan(view_pair, false).ignore_timelock().swap_remove(0);
amount += output.commitment.amount;
outputs.push(output);
}
}
let mut signable = SignableTransaction::new(
outputs, vec![(addr, amount - fee)], addr, fee_per_byte
outputs, vec![(addr, amount - 10000000000)], Some(addr), fee
).unwrap();
if !multisig {
@ -145,11 +136,10 @@ async fn send_core(test: usize, multisig: bool) {
machines.insert(
i,
signable.clone().multisig(
b"Monero Serai Test Transaction".to_vec(),
&mut OsRng,
&rpc,
rpc.get_height().await.unwrap() - 10,
(*keys[&i]).clone(),
RecommendedTranscript::new(b"Monero Serai Test Transaction"),
rpc.get_height().await.unwrap() - 10,
(1 ..= THRESHOLD).collect::<Vec<_>>()
).await.unwrap()
);

View file

@ -1,9 +1,11 @@
[package]
name = "dalek-ff-group"
version = "0.1.0"
version = "0.1.1"
description = "ff/group bindings around curve25519-dalek"
license = "MIT"
repository = "https://github.com/serai-dex/serai"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
edition = "2021"
[dependencies]
@ -12,7 +14,8 @@ digest = "0.10"
subtle = "2.4"
ff = "0.11"
group = "0.11"
ff = "0.12"
group = "0.12"
crypto-bigint = "0.4"
curve25519-dalek = "3.2"

View file

@ -1,3 +1,6 @@
# Dalek FF/Group
ff/group bindings around curve25519-dalek with a random function based around a more modern rand_core.
ff/group bindings around curve25519-dalek with a from_hash/random function based
around modern dependencies.
Some functions currently remain unimplemented.

View file

@ -0,0 +1,142 @@
use core::ops::{Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign};
use rand_core::RngCore;
use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable};
use crypto_bigint::{Encoding, U256, U512};
use ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
use crate::{choice, constant_time, math_op, math, from_wrapper, from_uint};
const FIELD_MODULUS: U256 = U256::from_be_hex(
"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed"
);
#[derive(Clone, Copy, PartialEq, Eq, Debug, Default)]
pub struct FieldElement(U256);
pub const SQRT_M1: FieldElement = FieldElement(
U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0")
);
constant_time!(FieldElement, U256);
math!(
FieldElement,
FieldElement,
|x, y| U256::add_mod(&x, &y, &FIELD_MODULUS),
|x, y| U256::sub_mod(&x, &y, &FIELD_MODULUS),
|x, y| {
#[allow(non_snake_case)]
let WIDE_MODULUS: U512 = U512::from((U256::ZERO, FIELD_MODULUS));
debug_assert_eq!(FIELD_MODULUS.to_le_bytes()[..], WIDE_MODULUS.to_le_bytes()[.. 32]);
let wide = U256::mul_wide(&x, &y);
U256::from_le_slice(
&U512::from((wide.1, wide.0)).reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32]
)
}
);
from_uint!(FieldElement, U256);
impl Neg for FieldElement {
type Output = Self;
fn neg(self) -> Self::Output { Self(self.0.neg_mod(&FIELD_MODULUS)) }
}
impl Field for FieldElement {
fn random(mut rng: impl RngCore) -> Self {
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
#[allow(non_snake_case)]
let WIDE_MODULUS: U512 = U512::from((U256::ZERO, FIELD_MODULUS));
debug_assert_eq!(FIELD_MODULUS.to_le_bytes()[..], WIDE_MODULUS.to_le_bytes()[.. 32]);
FieldElement(
U256::from_le_slice(
&U512::from_be_bytes(bytes).reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32]
)
)
}
fn zero() -> Self { Self(U256::ZERO) }
fn one() -> Self { Self(U256::ONE) }
fn square(&self) -> Self { *self * self }
fn double(&self) -> Self { *self + self }
fn invert(&self) -> CtOption<Self> {
CtOption::new(self.pow(-FieldElement(U256::from(2u64))), !self.is_zero())
}
fn sqrt(&self) -> CtOption<Self> {
let c1 = SQRT_M1;
let c2 = FIELD_MODULUS.saturating_add(&U256::from(3u8)).checked_div(&U256::from(8u8)).unwrap();
let tv1 = self.pow(FieldElement(c2));
let tv2 = tv1 * c1;
let res = Self::conditional_select(&tv2, &tv1, tv1.square().ct_eq(self));
debug_assert_eq!(res * res, *self);
CtOption::new(Self::conditional_select(&tv2, &tv1, tv1.square().ct_eq(self)), 1.into())
}
fn is_zero(&self) -> Choice { self.0.ct_eq(&U256::ZERO) }
fn cube(&self) -> Self { *self * self * self }
fn pow_vartime<S: AsRef<[u64]>>(&self, _exp: S) -> Self { unimplemented!() }
}
impl PrimeField for FieldElement {
type Repr = [u8; 32];
const NUM_BITS: u32 = 255;
const CAPACITY: u32 = 254;
fn from_repr(bytes: [u8; 32]) -> CtOption<Self> {
let res = Self(U256::from_le_bytes(bytes));
CtOption::new(res, res.0.add_mod(&U256::ZERO, &FIELD_MODULUS).ct_eq(&res.0))
}
fn to_repr(&self) -> [u8; 32] { self.0.to_le_bytes() }
const S: u32 = 2;
fn is_odd(&self) -> Choice { unimplemented!() }
fn multiplicative_generator() -> Self { 2u64.into() }
fn root_of_unity() -> Self {
FieldElement(
U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0")
)
}
}
impl PrimeFieldBits for FieldElement {
type ReprBits = [u8; 32];
fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
self.to_repr().into()
}
fn char_le_bits() -> FieldBits<Self::ReprBits> {
FIELD_MODULUS.to_le_bytes().into()
}
}
impl FieldElement {
pub fn from_square(value: [u8; 32]) -> FieldElement {
let value = U256::from_le_bytes(value);
FieldElement(value) * FieldElement(value)
}
pub fn pow(&self, other: FieldElement) -> FieldElement {
let mut res = FieldElement(U256::ONE);
let mut m = *self;
for bit in other.to_le_bits() {
res *= FieldElement::conditional_select(&FieldElement(U256::ONE), &m, choice(bit));
m *= m;
}
res
}
}
#[test]
fn test_mul() {
assert_eq!(FieldElement(FIELD_MODULUS) * FieldElement::one(), FieldElement::zero());
assert_eq!(FieldElement(FIELD_MODULUS) * FieldElement::one().double(), FieldElement::zero());
assert_eq!(SQRT_M1.square(), -FieldElement::one());
}

View file

@ -1,114 +1,189 @@
#![no_std]
use core::{
ops::{Deref, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign},
borrow::Borrow,
iter::{Iterator, Sum}
};
use subtle::{ConstantTimeEq, ConditionallySelectable};
use rand_core::RngCore;
use digest::{consts::U64, Digest};
use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable};
use subtle::{Choice, CtOption};
pub use curve25519_dalek as dalek;
use dalek::{
constants,
traits::{Identity, IsIdentity},
traits::Identity,
scalar::Scalar as DScalar,
edwards::{
EdwardsPoint as DPoint,
EdwardsBasepointTable as DTable,
CompressedEdwardsY as DCompressed
EdwardsPoint as DEdwardsPoint,
EdwardsBasepointTable as DEdwardsBasepointTable,
CompressedEdwardsY as DCompressedEdwards
},
ristretto::{
RistrettoPoint as DRistrettoPoint,
RistrettoBasepointTable as DRistrettoBasepointTable,
CompressedRistretto as DCompressedRistretto
}
};
use ff::{Field, PrimeField};
use group::Group;
use ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
use group::{Group, GroupEncoding, prime::PrimeGroup};
pub mod field;
// Convert a boolean to a Choice in a *presumably* constant time manner
fn choice(value: bool) -> Choice {
let bit = value as u8;
debug_assert_eq!(bit | 1, 1);
Choice::from(bit)
}
macro_rules! deref_borrow {
($Source: ident, $Target: ident) => {
impl Deref for $Source {
type Target = $Target;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Borrow<$Target> for $Source {
fn borrow(&self) -> &$Target {
&self.0
}
}
impl Borrow<$Target> for &$Source {
fn borrow(&self) -> &$Target {
&self.0
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! constant_time {
($Value: ident, $Inner: ident) => {
impl ConstantTimeEq for $Value {
fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) }
}
impl ConditionallySelectable for $Value {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
$Value($Inner::conditional_select(&a.0, &b.0, choice))
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! math_op {
(
$Value: ident,
$Other: ident,
$Op: ident,
$op_fn: ident,
$Assign: ident,
$assign_fn: ident,
$function: expr
) => {
impl $Op<$Other> for $Value {
type Output = $Value;
fn $op_fn(self, other: $Other) -> Self::Output {
Self($function(self.0, other.0))
}
}
impl $Assign<$Other> for $Value {
fn $assign_fn(&mut self, other: $Other) {
self.0 = $function(self.0, other.0);
}
}
impl<'a> $Op<&'a $Other> for $Value {
type Output = $Value;
fn $op_fn(self, other: &'a $Other) -> Self::Output {
Self($function(self.0, other.0))
}
}
impl<'a> $Assign<&'a $Other> for $Value {
fn $assign_fn(&mut self, other: &'a $Other) {
self.0 = $function(self.0, other.0);
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! math {
($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => {
math_op!($Value, $Value, Add, add, AddAssign, add_assign, $add);
math_op!($Value, $Value, Sub, sub, SubAssign, sub_assign, $sub);
math_op!($Value, $Factor, Mul, mul, MulAssign, mul_assign, $mul);
}
}
macro_rules! math_neg {
($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => {
math!($Value, $Factor, $add, $sub, $mul);
impl Neg for $Value {
type Output = Self;
fn neg(self) -> Self::Output { Self(-self.0) }
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! from_wrapper {
($wrapper: ident, $inner: ident, $uint: ident) => {
impl From<$uint> for $wrapper {
fn from(a: $uint) -> $wrapper { Self($inner::from(a)) }
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! from_uint {
($wrapper: ident, $inner: ident) => {
from_wrapper!($wrapper, $inner, u8);
from_wrapper!($wrapper, $inner, u16);
from_wrapper!($wrapper, $inner, u32);
from_wrapper!($wrapper, $inner, u64);
}
}
/// Wrapper around the dalek Scalar type
#[derive(Clone, Copy, PartialEq, Eq, Debug, Default)]
pub struct Scalar(pub DScalar);
deref_borrow!(Scalar, DScalar);
constant_time!(Scalar, DScalar);
math_neg!(Scalar, Scalar, DScalar::add, DScalar::sub, DScalar::mul);
from_uint!(Scalar, DScalar);
impl Deref for Scalar {
type Target = DScalar;
fn deref(&self) -> &Self::Target {
&self.0
impl Scalar {
/// Perform wide reduction on a 64-byte array to create a Scalar without bias
pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar {
Self(DScalar::from_bytes_mod_order_wide(bytes))
}
}
impl Borrow<DScalar> for Scalar {
fn borrow(&self) -> &DScalar {
&self.0
/// Derive a Scalar without bias from a digest via wide reduction
pub fn from_hash<D: Digest<OutputSize = U64>>(hash: D) -> Scalar {
let mut output = [0u8; 64];
output.copy_from_slice(&hash.finalize());
Scalar(DScalar::from_bytes_mod_order_wide(&output))
}
}
impl Borrow<DScalar> for &Scalar {
fn borrow(&self) -> &DScalar {
&self.0
}
}
impl Add<Scalar> for Scalar {
type Output = Self;
fn add(self, other: Scalar) -> Scalar { Self(self.0 + other.0) }
}
impl AddAssign for Scalar {
fn add_assign(&mut self, other: Scalar) { self.0 += other.0 }
}
impl<'a> Add<&'a Scalar> for Scalar {
type Output = Self;
fn add(self, other: &'a Scalar) -> Scalar { Self(self.0 + other.0) }
}
impl<'a> AddAssign<&'a Scalar> for Scalar {
fn add_assign(&mut self, other: &'a Scalar) { self.0 += other.0 }
}
impl Sub<Scalar> for Scalar {
type Output = Self;
fn sub(self, other: Scalar) -> Scalar { Self(self.0 - other.0) }
}
impl SubAssign for Scalar {
fn sub_assign(&mut self, other: Scalar) { self.0 -= other.0 }
}
impl<'a> Sub<&'a Scalar> for Scalar {
type Output = Self;
fn sub(self, other: &'a Scalar) -> Scalar { Self(self.0 - other.0) }
}
impl<'a> SubAssign<&'a Scalar> for Scalar {
fn sub_assign(&mut self, other: &'a Scalar) { self.0 -= other.0 }
}
impl Neg for Scalar {
type Output = Self;
fn neg(self) -> Scalar { Self(-self.0) }
}
impl Mul<Scalar> for Scalar {
type Output = Self;
fn mul(self, other: Scalar) -> Scalar { Self(self.0 * other.0) }
}
impl MulAssign for Scalar {
fn mul_assign(&mut self, other: Scalar) { self.0 *= other.0 }
}
impl<'a> Mul<&'a Scalar> for Scalar {
type Output = Self;
fn mul(self, other: &'a Scalar) -> Scalar { Self(self.0 * other.0) }
}
impl<'a> MulAssign<&'a Scalar> for Scalar {
fn mul_assign(&mut self, other: &'a Scalar) { self.0 *= other.0 }
}
impl ConstantTimeEq for Scalar {
fn ct_eq(&self, _: &Self) -> Choice { unimplemented!() }
}
impl ConditionallySelectable for Scalar {
fn conditional_select(_: &Self, _: &Self, _: Choice) -> Self { unimplemented!() }
}
impl Field for Scalar {
fn random(mut rng: impl RngCore) -> Self {
let mut r = [0; 64];
@ -121,194 +196,145 @@ impl Field for Scalar {
fn square(&self) -> Self { *self * self }
fn double(&self) -> Self { *self + self }
fn invert(&self) -> CtOption<Self> {
CtOption::new(Self(self.0.invert()), Choice::from(1 as u8))
CtOption::new(Self(self.0.invert()), !self.is_zero())
}
fn sqrt(&self) -> CtOption<Self> { unimplemented!() }
fn is_zero(&self) -> Choice { Choice::from(if self.0 == DScalar::zero() { 1 } else { 0 }) }
fn is_zero(&self) -> Choice { self.0.ct_eq(&DScalar::zero()) }
fn cube(&self) -> Self { *self * self * self }
fn pow_vartime<S: AsRef<[u64]>>(&self, _exp: S) -> Self { unimplemented!() }
}
impl From<u64> for Scalar {
fn from(a: u64) -> Scalar { Self(DScalar::from(a)) }
}
impl PrimeField for Scalar {
type Repr = [u8; 32];
const NUM_BITS: u32 = 253;
const CAPACITY: u32 = 252;
fn from_repr(bytes: [u8; 32]) -> CtOption<Self> {
let scalar = DScalar::from_canonical_bytes(bytes).map(|x| Scalar(x));
CtOption::new(
scalar.unwrap_or(Scalar::zero()),
Choice::from(if scalar.is_some() { 1 } else { 0 })
)
let scalar = DScalar::from_canonical_bytes(bytes);
// TODO: This unwrap_or isn't constant time, yet do we have an alternative?
CtOption::new(Scalar(scalar.unwrap_or(DScalar::zero())), choice(scalar.is_some()))
}
fn to_repr(&self) -> [u8; 32] { self.0.to_bytes() }
const S: u32 = 0;
const S: u32 = 2;
fn is_odd(&self) -> Choice { unimplemented!() }
fn multiplicative_generator() -> Self { unimplemented!() }
fn multiplicative_generator() -> Self { 2u64.into() }
fn root_of_unity() -> Self { unimplemented!() }
}
impl Scalar {
pub fn from_hash<D: Digest<OutputSize = U64>>(hash: D) -> Scalar {
let mut output = [0u8; 64];
output.copy_from_slice(&hash.finalize());
Scalar(DScalar::from_bytes_mod_order_wide(&output))
impl PrimeFieldBits for Scalar {
type ReprBits = [u8; 32];
fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
self.to_repr().into()
}
fn char_le_bits() -> FieldBits<Self::ReprBits> {
let mut bytes = (Scalar::zero() - Scalar::one()).to_repr();
bytes[0] += 1;
debug_assert_eq!(DScalar::from_bytes_mod_order(bytes), DScalar::zero());
bytes.into()
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct EdwardsPoint(pub DPoint);
pub const ED25519_BASEPOINT_POINT: EdwardsPoint = EdwardsPoint(constants::ED25519_BASEPOINT_POINT);
macro_rules! dalek_group {
(
$Point: ident,
$DPoint: ident,
$torsion_free: expr,
impl Deref for EdwardsPoint {
type Target = DPoint;
$Table: ident,
$DTable: ident,
fn deref(&self) -> &Self::Target {
&self.0
$DCompressed: ident,
$BASEPOINT_POINT: ident,
$BASEPOINT_TABLE: ident
) => {
/// Wrapper around the dalek Point type. For Ed25519, this is restricted to the prime subgroup
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct $Point(pub $DPoint);
deref_borrow!($Point, $DPoint);
constant_time!($Point, $DPoint);
math_neg!($Point, Scalar, $DPoint::add, $DPoint::sub, $DPoint::mul);
pub const $BASEPOINT_POINT: $Point = $Point(constants::$BASEPOINT_POINT);
impl Sum<$Point> for $Point {
fn sum<I: Iterator<Item = $Point>>(iter: I) -> $Point { Self($DPoint::sum(iter)) }
}
impl<'a> Sum<&'a $Point> for $Point {
fn sum<I: Iterator<Item = &'a $Point>>(iter: I) -> $Point { Self($DPoint::sum(iter)) }
}
impl Group for $Point {
type Scalar = Scalar;
// Ideally, this would be cryptographically secure, yet that's not a bound on the trait
// k256 also does this
fn random(rng: impl RngCore) -> Self { &$BASEPOINT_TABLE * Scalar::random(rng) }
fn identity() -> Self { Self($DPoint::identity()) }
fn generator() -> Self { $BASEPOINT_POINT }
fn is_identity(&self) -> Choice { self.0.ct_eq(&$DPoint::identity()) }
fn double(&self) -> Self { *self + self }
}
impl GroupEncoding for $Point {
type Repr = [u8; 32];
fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {
let decompressed = $DCompressed(*bytes).decompress();
// TODO: Same note on unwrap_or as above
let point = decompressed.unwrap_or($DPoint::identity());
CtOption::new($Point(point), choice(decompressed.is_some()) & choice($torsion_free(point)))
}
fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {
$Point::from_bytes(bytes)
}
fn to_bytes(&self) -> Self::Repr {
self.0.compress().to_bytes()
}
}
impl PrimeGroup for $Point {}
/// Wrapper around the dalek Table type, offering efficient multiplication against the
/// basepoint
pub struct $Table(pub $DTable);
deref_borrow!($Table, $DTable);
pub const $BASEPOINT_TABLE: $Table = $Table(constants::$BASEPOINT_TABLE);
impl Mul<Scalar> for &$Table {
type Output = $Point;
fn mul(self, b: Scalar) -> $Point { $Point(&b.0 * &self.0) }
}
};
}
impl Borrow<DPoint> for EdwardsPoint {
fn borrow(&self) -> &DPoint {
&self.0
}
}
dalek_group!(
EdwardsPoint,
DEdwardsPoint,
|point: DEdwardsPoint| point.is_torsion_free(),
impl Borrow<DPoint> for &EdwardsPoint {
fn borrow(&self) -> &DPoint {
&self.0
}
}
EdwardsBasepointTable,
DEdwardsBasepointTable,
impl Add<EdwardsPoint> for EdwardsPoint {
type Output = Self;
fn add(self, b: EdwardsPoint) -> EdwardsPoint { Self(self.0 + b.0) }
}
impl AddAssign<EdwardsPoint> for EdwardsPoint {
fn add_assign(&mut self, other: EdwardsPoint) { self.0 += other.0 }
}
impl Sum<EdwardsPoint> for EdwardsPoint {
fn sum<I: Iterator<Item = EdwardsPoint>>(iter: I) -> EdwardsPoint { Self(DPoint::sum(iter)) }
}
DCompressedEdwards,
impl<'a> Add<&'a EdwardsPoint> for EdwardsPoint {
type Output = Self;
fn add(self, b: &'a EdwardsPoint) -> EdwardsPoint { Self(self.0 + b.0) }
}
impl<'a> AddAssign<&'a EdwardsPoint> for EdwardsPoint {
fn add_assign(&mut self, other: &'a EdwardsPoint) { self.0 += other.0 }
}
impl<'a> Sum<&'a EdwardsPoint> for EdwardsPoint {
fn sum<I: Iterator<Item = &'a EdwardsPoint>>(iter: I) -> EdwardsPoint { Self(DPoint::sum(iter)) }
}
impl Sub<EdwardsPoint> for EdwardsPoint {
type Output = Self;
fn sub(self, b: EdwardsPoint) -> EdwardsPoint { Self(self.0 - b.0) }
}
impl SubAssign<EdwardsPoint> for EdwardsPoint {
fn sub_assign(&mut self, other: EdwardsPoint) { self.0 -= other.0 }
}
impl<'a> Sub<&'a EdwardsPoint> for EdwardsPoint {
type Output = Self;
fn sub(self, b: &'a EdwardsPoint) -> EdwardsPoint { Self(self.0 - b.0) }
}
impl<'a> SubAssign<&'a EdwardsPoint> for EdwardsPoint {
fn sub_assign(&mut self, other: &'a EdwardsPoint) { self.0 -= other.0 }
}
impl Neg for EdwardsPoint {
type Output = Self;
fn neg(self) -> EdwardsPoint { Self(-self.0) }
}
impl Mul<Scalar> for EdwardsPoint {
type Output = Self;
fn mul(self, b: Scalar) -> EdwardsPoint { Self(b.0 * self.0) }
}
impl MulAssign<Scalar> for EdwardsPoint {
fn mul_assign(&mut self, other: Scalar) { self.0 *= other.0 }
}
impl<'a> Mul<&'a Scalar> for EdwardsPoint {
type Output = Self;
fn mul(self, b: &'a Scalar) -> EdwardsPoint { Self(b.0 * self.0) }
}
impl<'a> MulAssign<&'a Scalar> for EdwardsPoint {
fn mul_assign(&mut self, other: &'a Scalar) { self.0 *= other.0 }
}
impl Group for EdwardsPoint {
type Scalar = Scalar;
fn random(mut _rng: impl RngCore) -> Self { unimplemented!() }
fn identity() -> Self { Self(DPoint::identity()) }
fn generator() -> Self { ED25519_BASEPOINT_POINT }
fn is_identity(&self) -> Choice { (self.0.is_identity() as u8).into() }
fn double(&self) -> Self { *self + self }
}
impl Scalar {
pub fn from_canonical_bytes(bytes: [u8; 32]) -> Option<Scalar> {
DScalar::from_canonical_bytes(bytes).map(|x| Self(x))
}
pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
Self(DScalar::from_bytes_mod_order(bytes))
}
pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar {
Self(DScalar::from_bytes_mod_order_wide(bytes))
}
}
pub struct CompressedEdwardsY(pub DCompressed);
impl CompressedEdwardsY {
pub fn new(y: [u8; 32]) -> CompressedEdwardsY {
Self(DCompressed(y))
}
pub fn decompress(&self) -> Option<EdwardsPoint> {
self.0.decompress().map(|x| EdwardsPoint(x))
}
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
}
impl EdwardsPoint {
pub fn is_torsion_free(&self) -> bool {
self.0.is_torsion_free()
}
pub fn compress(&self) -> CompressedEdwardsY {
CompressedEdwardsY(self.0.compress())
}
}
pub struct EdwardsBasepointTable(pub DTable);
pub const ED25519_BASEPOINT_TABLE: EdwardsBasepointTable = EdwardsBasepointTable(
constants::ED25519_BASEPOINT_TABLE
ED25519_BASEPOINT_POINT,
ED25519_BASEPOINT_TABLE
);
impl Deref for EdwardsBasepointTable {
type Target = DTable;
dalek_group!(
RistrettoPoint,
DRistrettoPoint,
|_| true,
fn deref(&self) -> &Self::Target {
&self.0
}
}
RistrettoBasepointTable,
DRistrettoBasepointTable,
impl Borrow<DTable> for &EdwardsBasepointTable {
fn borrow(&self) -> &DTable {
&self.0
}
}
DCompressedRistretto,
impl Mul<Scalar> for &EdwardsBasepointTable {
type Output = EdwardsPoint;
fn mul(self, b: Scalar) -> EdwardsPoint { EdwardsPoint(&b.0 * &self.0) }
}
RISTRETTO_BASEPOINT_POINT,
RISTRETTO_BASEPOINT_TABLE
);

38
crypto/dleq/Cargo.toml Normal file
View file

@ -0,0 +1,38 @@
[package]
name = "dleq"
version = "0.1.0"
description = "Implementation of single and cross-curve Discrete Log Equality proofs"
license = "MIT"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
[dependencies]
thiserror = "1"
rand_core = "0.6"
digest = "0.10"
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" }
ff = "0.12"
group = "0.12"
multiexp = { path = "../multiexp", features = ["batch"], optional = true }
[dev-dependencies]
hex-literal = "0.3"
blake2 = "0.10"
k256 = { version = "0.11", features = ["arithmetic", "bits"] }
dalek-ff-group = { path = "../dalek-ff-group" }
transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] }
[features]
serialize = []
experimental = ["multiexp"]
secure_capacity_difference = []
# Only applies to cross_group, yet is default to ensure security
default = ["secure_capacity_difference"]

21
crypto/dleq/LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020-2022 Luke Parker, Lee Bousfield
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

63
crypto/dleq/README.md Normal file
View file

@ -0,0 +1,63 @@
# Discrete Log Equality
Implementation of discrete log equality proofs for curves implementing
`ff`/`group`. There is also a highly experimental cross-group DLEq proof, under
the `experimental` feature, which has no formal proofs available yet is
available here regardless. This library has NOT undergone auditing.
### Cross-Group DLEq
The present cross-group DLEq is based off
[MRL-0010](https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf),
which isn't computationally correct as while it proves both keys have the same
discrete logarithm for their `G'`/`H'` component, it doesn't prove a lack of a
`G`/`H` component. Accordingly, it was augmented with a pair of Schnorr Proof of
Knowledges, proving a known `G'`/`H'` component, guaranteeing a lack of a
`G`/`H` component (assuming an unknown relation between `G`/`H` and `G'`/`H'`).
The challenges for the ring signatures were also merged, removing one-element
from each bit's proof with only a slight reduction to challenge security (as
instead of being uniform over each scalar field, they're uniform over the
mutual bit capacity of each scalar field). This reduction is identical to the
one applied to the proved-for scalar, and accordingly should not reduce overall
security. It does create a lack of domain separation, yet that shouldn't be an
issue.
The following variants are available:
- `ClassicLinear`. This is only for reference purposes, being the above
described proof, with no further optimizations.
- `ConciseLinear`. This proves for 2 bits at a time, not increasing the
signature size for both bits yet decreasing the amount of
commitments/challenges in total.
- `EfficientLinear`. This provides ring signatures in the form
`((R_G, R_H), s)`, instead of `(e, s)`, and accordingly enables a batch
verification of their final step. It is the most performant, and also the
largest, option.
- `CompromiseLinear`. This provides signatures in the form `((R_G, R_H), s)` AND
proves for 2-bits at a time. While this increases the amount of steps in
verifying the ring signatures, which aren't batch verified, and decreases the
amount of items batched (an operation which grows in efficiency with
quantity), it strikes a balance between speed and size.
The following numbers are from benchmarks performed with k256/curve25519_dalek
on a Intel i7-118567:
| Algorithm | Size | Verification Time |
|--------------------|-------------------------|-------------------|
| `ClassicLinear` | 56829 bytes (+27%) | 157ms (0%) |
| `ConciseLinear` | 44607 bytes (Reference) | 156ms (Reference) |
| `EfficientLinear` | 65145 bytes (+46%) | 122ms (-22%) |
| `CompromiseLinear` | 48765 bytes (+9%) | 137ms (-12%) |
`CompromiseLinear` is the best choice by only being marginally sub-optimal
regarding size, yet still achieving most of the desired performance
improvements. That said, neither the original postulation (which had flaws) nor
any construction here has been proven nor audited. Accordingly, they are solely
experimental, and none are recommended.
All proofs are suffixed "Linear" in the hope a logarithmic proof makes itself
available, which would likely immediately become the most efficient option.

View file

@ -0,0 +1,230 @@
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::{
Generators,
cross_group::{DLEqError, scalar::{scalar_convert, mutual_scalar_from_bytes}}
};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use ff::PrimeField;
#[cfg(feature = "serialize")]
use crate::{read_scalar, cross_group::read_point};
#[allow(non_camel_case_types)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) enum Re<G0: PrimeGroup, G1: PrimeGroup> {
R(G0, G1),
// Merged challenges have a slight security reduction, yet one already applied to the scalar
// being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed,
// present here, which is then hashed for each of the two challenges, remaining unbiased/unique
// while maintaining the bandwidth savings, yet also while adding 252 hashes for
// Secp256k1/Ed25519
e(G0::Scalar)
}
impl<G0: PrimeGroup, G1: PrimeGroup> Re<G0, G1> {
#[allow(non_snake_case)]
pub(crate) fn R_default() -> Re<G0, G1> {
Re::R(G0::identity(), G1::identity())
}
pub(crate) fn e_default() -> Re<G0, G1> {
Re::e(G0::Scalar::zero())
}
}
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct Aos<G0: PrimeGroup, G1: PrimeGroup, const RING_LEN: usize> {
Re_0: Re<G0, G1>,
s: [(G0::Scalar, G1::Scalar); RING_LEN]
}
impl<
G0: PrimeGroup,
G1: PrimeGroup,
const RING_LEN: usize
> Aos<G0, G1, RING_LEN> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits {
#[allow(non_snake_case)]
fn nonces<T: Transcript>(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) {
transcript.domain_separate(b"aos_membership_proof");
transcript.append_message(b"ring_len", &u8::try_from(RING_LEN).unwrap().to_le_bytes());
transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref());
transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref());
mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref())
}
#[allow(non_snake_case)]
fn R(
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar)
) -> (G0, G1) {
(((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1)))
}
#[allow(non_snake_case)]
fn R_batch(
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar)
) -> (Vec<(G0::Scalar, G0)>, Vec<(G1::Scalar, G1)>) {
(vec![(-s.0, generators.0.alt), (e.0, A.0)], vec![(-s.1, generators.1.alt), (e.1, A.1)])
}
#[allow(non_snake_case)]
fn R_nonces<T: Transcript>(
transcript: T,
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar)
) -> (G0::Scalar, G1::Scalar) {
Self::nonces(transcript, Self::R(generators, s, A, e))
}
#[allow(non_snake_case)]
pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: T,
generators: (Generators<G0>, Generators<G1>),
ring: &[(G0, G1)],
actual: usize,
blinding_key: (G0::Scalar, G1::Scalar),
mut Re_0: Re<G0, G1>
) -> Self {
// While it is possible to use larger values, it's not efficient to do so
// 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3
debug_assert!((RING_LEN == 2) || (RING_LEN == 4));
debug_assert_eq!(RING_LEN, ring.len());
let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN];
let r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));
#[allow(non_snake_case)]
let original_R = (generators.0.alt * r.0, generators.1.alt * r.1);
#[allow(non_snake_case)]
let mut R = original_R;
for i in ((actual + 1) .. (actual + RING_LEN + 1)).map(|i| i % RING_LEN) {
let e = Self::nonces(transcript.clone(), R);
if i == 0 {
match Re_0 {
Re::R(ref mut R0_0, ref mut R1_0) => { *R0_0 = R.0; *R1_0 = R.1 },
Re::e(ref mut e_0) => *e_0 = e.0
}
}
// Solve for the real index
if i == actual {
s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1));
debug_assert_eq!(Self::R(generators, s[i], ring[actual], e), original_R);
break;
// Generate a decoy response
} else {
s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));
}
R = Self::R(generators, s[i], ring[i], e);
}
Aos { Re_0, s }
}
// Assumes the ring has already been transcripted in some form. Critically insecure if it hasn't
pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: T,
generators: (Generators<G0>, Generators<G1>),
batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>),
ring: &[(G0, G1)]
) -> Result<(), DLEqError> {
debug_assert!((RING_LEN == 2) || (RING_LEN == 4));
debug_assert_eq!(RING_LEN, ring.len());
#[allow(non_snake_case)]
match self.Re_0 {
Re::R(R0_0, R1_0) => {
let mut e = Self::nonces(transcript.clone(), (R0_0, R1_0));
for i in 0 .. (RING_LEN - 1) {
e = Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e);
}
let mut statements = Self::R_batch(
generators,
*self.s.last().unwrap(),
*ring.last().unwrap(),
e
);
statements.0.push((G0::Scalar::one(), R0_0));
statements.1.push((G1::Scalar::one(), R1_0));
batch.0.queue(&mut *rng, (), statements.0);
batch.1.queue(&mut *rng, (), statements.1);
},
Re::e(e_0) => {
let e_0 = (e_0, scalar_convert(e_0).ok_or(DLEqError::InvalidChallenge)?);
let mut e = None;
for i in 0 .. RING_LEN {
e = Some(
Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e.unwrap_or(e_0))
);
}
// Will panic if the above loop is never run somehow
// If e wasn't an Option, and instead initially set to e_0, it'd always pass
if e_0 != e.unwrap() {
Err(DLEqError::InvalidProof)?;
}
}
}
Ok(())
}
#[cfg(feature = "serialize")]
pub(crate) fn serialize<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
#[allow(non_snake_case)]
match self.Re_0 {
Re::R(R0, R1) => {
w.write_all(R0.to_bytes().as_ref())?;
w.write_all(R1.to_bytes().as_ref())?;
},
Re::e(e) => w.write_all(e.to_repr().as_ref())?
}
for i in 0 .. RING_LEN {
w.write_all(self.s[i].0.to_repr().as_ref())?;
w.write_all(self.s[i].1.to_repr().as_ref())?;
}
Ok(())
}
#[allow(non_snake_case)]
#[cfg(feature = "serialize")]
pub(crate) fn deserialize<R: Read>(r: &mut R, mut Re_0: Re<G0, G1>) -> std::io::Result<Self> {
match Re_0 {
Re::R(ref mut R0, ref mut R1) => { *R0 = read_point(r)?; *R1 = read_point(r)? },
Re::e(ref mut e) => *e = read_scalar(r)?
}
let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN];
for i in 0 .. RING_LEN {
s[i] = (read_scalar(r)?, read_scalar(r)?);
}
Ok(Aos { Re_0, s })
}
}

View file

@ -0,0 +1,175 @@
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use group::{ff::PrimeFieldBits, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::{Generators, cross_group::{DLEqError, aos::{Re, Aos}}};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use crate::cross_group::read_point;
pub(crate) enum BitSignature {
ClassicLinear,
ConciseLinear,
EfficientLinear,
CompromiseLinear
}
impl BitSignature {
pub(crate) const fn to_u8(&self) -> u8 {
match self {
BitSignature::ClassicLinear => 0,
BitSignature::ConciseLinear => 1,
BitSignature::EfficientLinear => 2,
BitSignature::CompromiseLinear => 3
}
}
pub(crate) const fn from(algorithm: u8) -> BitSignature {
match algorithm {
0 => BitSignature::ClassicLinear,
1 => BitSignature::ConciseLinear,
2 => BitSignature::EfficientLinear,
3 => BitSignature::CompromiseLinear,
_ => panic!("Unknown algorithm")
}
}
pub(crate) const fn bits(&self) -> usize {
match self {
BitSignature::ClassicLinear => 1,
BitSignature::ConciseLinear => 2,
BitSignature::EfficientLinear => 1,
BitSignature::CompromiseLinear => 2
}
}
pub(crate) const fn ring_len(&self) -> usize {
2_usize.pow(self.bits() as u32)
}
fn aos_form<G0: PrimeGroup, G1: PrimeGroup>(&self) -> Re<G0, G1> {
match self {
BitSignature::ClassicLinear => Re::e_default(),
BitSignature::ConciseLinear => Re::e_default(),
BitSignature::EfficientLinear => Re::R_default(),
BitSignature::CompromiseLinear => Re::R_default()
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct Bits<
G0: PrimeGroup,
G1: PrimeGroup,
const SIGNATURE: u8,
const RING_LEN: usize
> {
pub(crate) commitments: (G0, G1),
signature: Aos<G0, G1, RING_LEN>
}
impl<
G0: PrimeGroup,
G1: PrimeGroup,
const SIGNATURE: u8,
const RING_LEN: usize
> Bits<G0, G1, SIGNATURE, RING_LEN> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits {
fn transcript<T: Transcript>(transcript: &mut T, i: usize, commitments: (G0, G1)) {
transcript.domain_separate(b"bits");
transcript.append_message(b"group", &u16::try_from(i).unwrap().to_le_bytes());
transcript.append_message(b"commitment_0", commitments.0.to_bytes().as_ref());
transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref());
}
fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> Vec<(G0, G1)> {
let mut res = vec![commitments; RING_LEN];
for i in 1 .. RING_LEN {
res[i] = (res[i - 1].0 - pow_2.0, res[i - 1].1 - pow_2.1);
}
res
}
fn shift(pow_2: &mut (G0, G1)) {
for _ in 0 .. BitSignature::from(SIGNATURE).bits() {
pow_2.0 = pow_2.0.double();
pow_2.1 = pow_2.1.double();
}
}
pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
i: usize,
pow_2: &mut (G0, G1),
bits: u8,
blinding_key: (G0::Scalar, G1::Scalar)
) -> Self {
let mut commitments = (
(generators.0.alt * blinding_key.0),
(generators.1.alt * blinding_key.1)
);
commitments.0 += pow_2.0 * G0::Scalar::from(bits.into());
commitments.1 += pow_2.1 * G1::Scalar::from(bits.into());
Self::transcript(transcript, i, commitments);
let signature = Aos::prove(
rng,
transcript.clone(),
generators,
&Self::ring(*pow_2, commitments),
usize::from(bits),
blinding_key,
BitSignature::from(SIGNATURE).aos_form()
);
Self::shift(pow_2);
Bits { commitments, signature }
}
pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>),
i: usize,
pow_2: &mut (G0, G1)
) -> Result<(), DLEqError> {
Self::transcript(transcript, i, self.commitments);
self.signature.verify(
rng,
transcript.clone(),
generators,
batch,
&Self::ring(*pow_2, self.commitments)
)?;
Self::shift(pow_2);
Ok(())
}
#[cfg(feature = "serialize")]
pub(crate) fn serialize<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
w.write_all(self.commitments.0.to_bytes().as_ref())?;
w.write_all(self.commitments.1.to_bytes().as_ref())?;
self.signature.serialize(w)
}
#[cfg(feature = "serialize")]
pub(crate) fn deserialize<R: Read>(r: &mut R) -> std::io::Result<Self> {
Ok(
Bits {
commitments: (read_point(r)?, read_point(r)?),
signature: Aos::deserialize(r, BitSignature::from(SIGNATURE).aos_form())?
}
)
}
}

View file

@ -0,0 +1,366 @@
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use digest::Digest;
use transcript::Transcript;
use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::Generators;
pub mod scalar;
use scalar::{scalar_convert, mutual_scalar_from_bytes};
pub(crate) mod schnorr;
use schnorr::SchnorrPoK;
pub(crate) mod aos;
mod bits;
use bits::{BitSignature, Bits};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
pub(crate) fn read_point<R: Read, G: PrimeGroup>(r: &mut R) -> std::io::Result<G> {
let mut repr = G::Repr::default();
r.read_exact(repr.as_mut())?;
let point = G::from_bytes(&repr);
if point.is_none().into() {
Err(std::io::Error::new(std::io::ErrorKind::Other, "invalid point"))?;
}
Ok(point.unwrap())
}
#[derive(Error, PartialEq, Eq, Debug)]
pub enum DLEqError {
#[error("invalid proof of knowledge")]
InvalidProofOfKnowledge,
#[error("invalid proof length")]
InvalidProofLength,
#[error("invalid challenge")]
InvalidChallenge,
#[error("invalid proof")]
InvalidProof
}
// This should never be directly instantiated and uses a u8 to represent internal values
// Any external usage is likely invalid
#[doc(hidden)]
// Debug would be such a dump of data this likely isn't helpful, but at least it's available to
// anyone who wants it
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct __DLEqProof<
G0: PrimeGroup,
G1: PrimeGroup,
const SIGNATURE: u8,
const RING_LEN: usize,
const REMAINDER_RING_LEN: usize
> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits {
bits: Vec<Bits<G0, G1, SIGNATURE, RING_LEN>>,
remainder: Option<Bits<G0, G1, SIGNATURE, REMAINDER_RING_LEN>>,
poks: (SchnorrPoK<G0>, SchnorrPoK<G1>)
}
macro_rules! dleq {
($name: ident, $signature: expr, $remainder: literal) => {
pub type $name<G0, G1> = __DLEqProof<
G0,
G1,
{ $signature.to_u8() },
{ $signature.ring_len() },
// There may not be a remainder, yet if there is one, it'll be just one bit
// A ring for one bit has a RING_LEN of 2
{ if $remainder { 2 } else { 0 } }
>;
}
}
// Proves for 1-bit at a time with the signature form (e, s), as originally described in MRL-0010.
// Uses a merged challenge, unlike MRL-0010, for the ring signature, saving an element from each
// bit and removing a hash while slightly reducing challenge security. This security reduction is
// already applied to the scalar being proven for, a result of the requirement it's mutually valid
// over both scalar fields, hence its application here as well. This is mainly here as a point of
// reference for the following DLEq proofs, all which use merged challenges, and isn't performant
// in comparison to the others
dleq!(ClassicLinearDLEq, BitSignature::ClassicLinear, false);
// Proves for 2-bits at a time to save 3/7 elements of every other bit
// <9% smaller than CompromiseLinear, yet ~12% slower
dleq!(ConciseLinearDLEq, BitSignature::ConciseLinear, true);
// Uses AOS signatures of the form R, s, to enable the final step of the ring signature to be
// batch verified, at the cost of adding an additional element per bit
dleq!(EfficientLinearDLEq, BitSignature::EfficientLinear, false);
// Proves for 2-bits at a time while using the R, s form. This saves 3/7 elements of every other
// bit, while adding 1 element to every bit, and is more efficient than ConciseLinear yet less
// efficient than EfficientLinear due to having more ring signature steps which aren't batched
// >25% smaller than EfficientLinear and just 11% slower, making it the recommended option
dleq!(CompromiseLinearDLEq, BitSignature::CompromiseLinear, true);
impl<
G0: PrimeGroup,
G1: PrimeGroup,
const SIGNATURE: u8,
const RING_LEN: usize,
const REMAINDER_RING_LEN: usize
> __DLEqProof<G0, G1, SIGNATURE, RING_LEN, REMAINDER_RING_LEN> where
G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits {
pub(crate) fn transcript<T: Transcript>(
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
keys: (G0, G1)
) {
transcript.domain_separate(b"cross_group_dleq");
generators.0.transcript(transcript);
generators.1.transcript(transcript);
transcript.domain_separate(b"points");
transcript.append_message(b"point_0", keys.0.to_bytes().as_ref());
transcript.append_message(b"point_1", keys.1.to_bytes().as_ref());
}
pub(crate) fn blinding_key<R: RngCore + CryptoRng, F: PrimeField>(
rng: &mut R,
total: &mut F,
last: bool
) -> F {
let blinding_key = if last {
-*total
} else {
F::random(&mut *rng)
};
*total += blinding_key;
blinding_key
}
fn reconstruct_keys(&self) -> (G0, G1) {
let mut res = (
self.bits.iter().map(|bit| bit.commitments.0).sum::<G0>(),
self.bits.iter().map(|bit| bit.commitments.1).sum::<G1>()
);
if let Some(bit) = &self.remainder {
res.0 += bit.commitments.0;
res.1 += bit.commitments.1;
}
res
}
fn prove_internal<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
f: (G0::Scalar, G1::Scalar)
) -> (Self, (G0::Scalar, G1::Scalar)) {
Self::transcript(
transcript,
generators,
((generators.0.primary * f.0), (generators.1.primary * f.1))
);
let poks = (
SchnorrPoK::<G0>::prove(rng, transcript, generators.0.primary, f.0),
SchnorrPoK::<G1>::prove(rng, transcript, generators.1.primary, f.1)
);
let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero());
let mut blinding_key = |rng: &mut R, last| {
let blinding_key = (
Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last),
Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last)
);
if last {
debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero());
debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero());
}
blinding_key
};
let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap();
let bits_per_group = BitSignature::from(SIGNATURE).bits();
let mut pow_2 = (generators.0.primary, generators.1.primary);
let raw_bits = f.0.to_le_bits();
let mut bits = Vec::with_capacity(capacity);
let mut these_bits: u8 = 0;
for (i, bit) in raw_bits.iter().enumerate() {
if i == capacity {
break;
}
let bit = *bit as u8;
debug_assert_eq!(bit | 1, 1);
// Accumulate this bit
these_bits |= bit << (i % bits_per_group);
if (i % bits_per_group) == (bits_per_group - 1) {
let last = i == (capacity - 1);
let blinding_key = blinding_key(&mut *rng, last);
bits.push(
Bits::prove(
&mut *rng,
transcript,
generators,
i / bits_per_group,
&mut pow_2,
these_bits,
blinding_key
)
);
these_bits = 0;
}
}
debug_assert_eq!(bits.len(), capacity / bits_per_group);
let mut remainder = None;
if capacity != ((capacity / bits_per_group) * bits_per_group) {
let blinding_key = blinding_key(&mut *rng, true);
remainder = Some(
Bits::prove(
&mut *rng,
transcript,
generators,
capacity / bits_per_group,
&mut pow_2,
these_bits,
blinding_key
)
);
}
let proof = __DLEqProof { bits, remainder, poks };
debug_assert_eq!(
proof.reconstruct_keys(),
(generators.0.primary * f.0, generators.1.primary * f.1)
);
(proof, f)
}
/// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as
/// the output of the passed in Digest. Given the non-standard requirements to achieve
/// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way
/// to safely and securely generate a Scalar, without risk of failure, nor bias
/// It also ensures a lack of determinable relation between keys, guaranteeing security in the
/// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing
/// the relationship between keys would allow breaking all swaps after just one
pub fn prove<R: RngCore + CryptoRng, T: Clone + Transcript, D: Digest>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
digest: D
) -> (Self, (G0::Scalar, G1::Scalar)) {
Self::prove_internal(
rng,
transcript,
generators,
mutual_scalar_from_bytes(digest.finalize().as_ref())
)
}
/// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in,
/// failing if it's not mutually valid. This allows for rejection sampling externally derived
/// scalars until they're safely usable, as needed
pub fn prove_without_bias<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
f0: G0::Scalar
) -> Option<(Self, (G0::Scalar, G1::Scalar))> {
scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1)))
}
/// Verify a cross-Group Discrete Log Equality statement, returning the points proven for
pub fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>)
) -> Result<(G0, G1), DLEqError> {
let capacity = usize::try_from(
G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)
).unwrap();
let bits_per_group = BitSignature::from(SIGNATURE).bits();
let has_remainder = (capacity % bits_per_group) != 0;
// These shouldn't be possible, as locally created and deserialized proofs should be properly
// formed in these regards, yet it doesn't hurt to check and would be problematic if true
if (self.bits.len() != (capacity / bits_per_group)) || (
(self.remainder.is_none() && has_remainder) || (self.remainder.is_some() && !has_remainder)
) {
return Err(DLEqError::InvalidProofLength);
}
let keys = self.reconstruct_keys();
Self::transcript(transcript, generators, keys);
let batch_capacity = match BitSignature::from(SIGNATURE) {
BitSignature::ClassicLinear => 3,
BitSignature::ConciseLinear => 3,
BitSignature::EfficientLinear => (self.bits.len() + 1) * 3,
BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3
};
let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity));
self.poks.0.verify(&mut *rng, transcript, generators.0.primary, keys.0, &mut batch.0);
self.poks.1.verify(&mut *rng, transcript, generators.1.primary, keys.1, &mut batch.1);
let mut pow_2 = (generators.0.primary, generators.1.primary);
for (i, bits) in self.bits.iter().enumerate() {
bits.verify(&mut *rng, transcript, generators, &mut batch, i, &mut pow_2)?;
}
if let Some(bit) = &self.remainder {
bit.verify(&mut *rng, transcript, generators, &mut batch, self.bits.len(), &mut pow_2)?;
}
if (!batch.0.verify_vartime()) || (!batch.1.verify_vartime()) {
Err(DLEqError::InvalidProof)?;
}
Ok(keys)
}
#[cfg(feature = "serialize")]
pub fn serialize<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
for bit in &self.bits {
bit.serialize(w)?;
}
if let Some(bit) = &self.remainder {
bit.serialize(w)?;
}
self.poks.0.serialize(w)?;
self.poks.1.serialize(w)
}
#[cfg(feature = "serialize")]
pub fn deserialize<R: Read>(r: &mut R) -> std::io::Result<Self> {
let capacity = usize::try_from(
G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)
).unwrap();
let bits_per_group = BitSignature::from(SIGNATURE).bits();
let mut bits = Vec::with_capacity(capacity / bits_per_group);
for _ in 0 .. (capacity / bits_per_group) {
bits.push(Bits::deserialize(r)?);
}
let mut remainder = None;
if (capacity % bits_per_group) != 0 {
remainder = Some(Bits::deserialize(r)?);
}
Ok(
__DLEqProof {
bits,
remainder,
poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?)
}
)
}
}

View file

@ -0,0 +1,49 @@
use ff::PrimeFieldBits;
/// Convert a uniform scalar into one usable on both fields, clearing the top bits as needed
pub fn scalar_normalize<F0: PrimeFieldBits, F1: PrimeFieldBits>(scalar: F0) -> (F0, F1) {
let mutual_capacity = F0::CAPACITY.min(F1::CAPACITY);
// The security of a mutual key is the security of the lower field. Accordingly, this bans a
// difference of more than 4 bits
#[cfg(feature = "secure_capacity_difference")]
assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) < 4);
let mut res1 = F0::zero();
let mut res2 = F1::zero();
// Uses the bit view API to ensure a consistent endianess
let mut bits = scalar.to_le_bits();
// Convert it to big endian
bits.reverse();
for bit in bits.iter().skip(bits.len() - usize::try_from(mutual_capacity).unwrap()) {
res1 = res1.double();
res2 = res2.double();
let bit = *bit as u8;
debug_assert_eq!(bit | 1, 1);
res1 += F0::from(bit.into());
res2 += F1::from(bit.into());
}
(res1, res2)
}
/// Helper to convert a scalar between fields. Returns None if the scalar isn't mutually valid
pub fn scalar_convert<F0: PrimeFieldBits, F1: PrimeFieldBits>(scalar: F0) -> Option<F1> {
let (valid, converted) = scalar_normalize(scalar);
Some(converted).filter(|_| scalar == valid)
}
/// Create a mutually valid scalar from bytes via bit truncation to not introduce bias
pub fn mutual_scalar_from_bytes<F0: PrimeFieldBits, F1: PrimeFieldBits>(bytes: &[u8]) -> (F0, F1) {
let capacity = usize::try_from(F0::CAPACITY.min(F1::CAPACITY)).unwrap();
debug_assert!((bytes.len() * 8) >= capacity);
let mut accum = F0::zero();
for b in 0 .. capacity {
accum = accum.double();
accum += F0::from(((bytes[b / 8] >> (b % 8)) & 1).into());
}
(accum, scalar_convert(accum).unwrap())
}

View file

@ -0,0 +1,79 @@
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::challenge;
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use ff::PrimeField;
#[cfg(feature = "serialize")]
use crate::{read_scalar, cross_group::read_point};
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct SchnorrPoK<G: PrimeGroup> {
R: G,
s: G::Scalar
}
impl<G: PrimeGroup> SchnorrPoK<G> where G::Scalar: PrimeFieldBits {
// Not hram due to the lack of m
#[allow(non_snake_case)]
fn hra<T: Transcript>(transcript: &mut T, generator: G, R: G, A: G) -> G::Scalar {
transcript.domain_separate(b"schnorr_proof_of_knowledge");
transcript.append_message(b"generator", generator.to_bytes().as_ref());
transcript.append_message(b"nonce", R.to_bytes().as_ref());
transcript.append_message(b"public_key", A.to_bytes().as_ref());
challenge(transcript)
}
pub(crate) fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
transcript: &mut T,
generator: G,
private_key: G::Scalar
) -> SchnorrPoK<G> {
let nonce = G::Scalar::random(rng);
#[allow(non_snake_case)]
let R = generator * nonce;
SchnorrPoK {
R,
s: nonce + (private_key * SchnorrPoK::hra(transcript, generator, R, generator * private_key))
}
}
pub(crate) fn verify<R: RngCore + CryptoRng, T: Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generator: G,
public_key: G,
batch: &mut BatchVerifier<(), G>
) {
batch.queue(
rng,
(),
[
(-self.s, generator),
(G::Scalar::one(), self.R),
(Self::hra(transcript, generator, self.R, public_key), public_key)
]
);
}
#[cfg(feature = "serialize")]
pub fn serialize<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
w.write_all(self.R.to_bytes().as_ref())?;
w.write_all(self.s.to_repr().as_ref())
}
#[cfg(feature = "serialize")]
pub fn deserialize<R: Read>(r: &mut R) -> std::io::Result<SchnorrPoK<G>> {
Ok(SchnorrPoK { R: read_point(r)?, s: read_scalar(r)? })
}
}

151
crypto/dleq/src/lib.rs Normal file
View file

@ -0,0 +1,151 @@
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use ff::{Field, PrimeField};
use group::prime::PrimeGroup;
#[cfg(feature = "serialize")]
use std::io::{self, ErrorKind, Error, Read, Write};
#[cfg(feature = "experimental")]
pub mod cross_group;
#[cfg(test)]
mod tests;
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Generators<G: PrimeGroup> {
primary: G,
alt: G
}
impl<G: PrimeGroup> Generators<G> {
pub fn new(primary: G, alt: G) -> Generators<G> {
Generators { primary, alt }
}
fn transcript<T: Transcript>(&self, transcript: &mut T) {
transcript.domain_separate(b"generators");
transcript.append_message(b"primary", self.primary.to_bytes().as_ref());
transcript.append_message(b"alternate", self.alt.to_bytes().as_ref());
}
}
pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
// From here, there are three ways to get a scalar under the ff/group API
// 1: Scalar::random(ChaCha12Rng::from_seed(self.transcript.rng_seed(b"challenge")))
// 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess
// and loading it in
// 3: Iterating over each byte and manually doubling/adding. This is simplest
// Get a wide amount of bytes to safely reduce without bias
let target = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2;
let mut challenge_bytes = transcript.challenge(b"challenge").as_ref().to_vec();
while challenge_bytes.len() < target {
// Secure given transcripts updating on challenge
challenge_bytes.extend(transcript.challenge(b"challenge_extension").as_ref());
}
challenge_bytes.truncate(target);
let mut challenge = F::zero();
for b in challenge_bytes {
for _ in 0 .. 8 {
challenge = challenge.double();
}
challenge += F::from(u64::from(b));
}
challenge
}
#[cfg(feature = "serialize")]
fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {
let mut repr = F::Repr::default();
r.read_exact(repr.as_mut())?;
let scalar = F::from_repr(repr);
if scalar.is_none().into() {
Err(Error::new(ErrorKind::Other, "invalid scalar"))?;
}
Ok(scalar.unwrap())
}
#[derive(Error, Debug)]
pub enum DLEqError {
#[error("invalid proof")]
InvalidProof
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct DLEqProof<G: PrimeGroup> {
c: G::Scalar,
s: G::Scalar
}
#[allow(non_snake_case)]
impl<G: PrimeGroup> DLEqProof<G> {
fn challenge<T: Transcript>(
transcript: &mut T,
generators: Generators<G>,
nonces: (G, G),
points: (G, G)
) -> G::Scalar {
generators.transcript(transcript);
transcript.domain_separate(b"dleq");
transcript.append_message(b"nonce_primary", nonces.0.to_bytes().as_ref());
transcript.append_message(b"nonce_alternate", nonces.1.to_bytes().as_ref());
transcript.append_message(b"point_primary", points.0.to_bytes().as_ref());
transcript.append_message(b"point_alternate", points.1.to_bytes().as_ref());
challenge(transcript)
}
pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
transcript: &mut T,
generators: Generators<G>,
scalar: G::Scalar
) -> DLEqProof<G> {
let r = G::Scalar::random(rng);
let c = Self::challenge(
transcript,
generators,
(generators.primary * r, generators.alt * r),
(generators.primary * scalar, generators.alt * scalar)
);
let s = r + (c * scalar);
DLEqProof { c, s }
}
pub fn verify<T: Transcript>(
&self,
transcript: &mut T,
generators: Generators<G>,
points: (G, G)
) -> Result<(), DLEqError> {
if self.c != Self::challenge(
transcript,
generators,
(
(generators.primary * self.s) - (points.0 * self.c),
(generators.alt * self.s) - (points.1 * self.c)
),
points
) {
Err(DLEqError::InvalidProof)?;
}
Ok(())
}
#[cfg(feature = "serialize")]
pub fn serialize<W: Write>(&self, w: &mut W) -> io::Result<()> {
w.write_all(self.c.to_repr().as_ref())?;
w.write_all(self.s.to_repr().as_ref())
}
#[cfg(feature = "serialize")]
pub fn deserialize<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> {
Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? })
}
}

View file

@ -0,0 +1,69 @@
use rand_core::OsRng;
use group::{ff::Field, Group};
use multiexp::BatchVerifier;
use crate::{
cross_group::aos::{Re, Aos},
tests::cross_group::{G0, G1, transcript, generators}
};
#[allow(non_snake_case)]
#[cfg(feature = "serialize")]
fn test_aos_serialization<const RING_LEN: usize>(proof: Aos<G0, G1, RING_LEN>, Re_0: Re<G0, G1>) {
let mut buf = vec![];
proof.serialize(&mut buf).unwrap();
let deserialized = Aos::deserialize(&mut std::io::Cursor::new(buf), Re_0).unwrap();
assert_eq!(proof, deserialized);
}
fn test_aos<const RING_LEN: usize>(default: Re<G0, G1>) {
let generators = generators();
let mut ring_keys = [(<G0 as Group>::Scalar::zero(), <G1 as Group>::Scalar::zero()); RING_LEN];
// Side-effect of G0 being a type-alias with identity() deprecated
#[allow(deprecated)]
let mut ring = [(G0::identity(), G1::identity()); RING_LEN];
for i in 0 .. RING_LEN {
ring_keys[i] = (
<G0 as Group>::Scalar::random(&mut OsRng),
<G1 as Group>::Scalar::random(&mut OsRng)
);
ring[i] = (generators.0.alt * ring_keys[i].0, generators.1.alt * ring_keys[i].1);
}
for actual in 0 .. RING_LEN {
let proof = Aos::<_, _, RING_LEN>::prove(
&mut OsRng,
transcript(),
generators,
&ring,
actual,
ring_keys[actual],
default.clone()
);
let mut batch = (BatchVerifier::new(0), BatchVerifier::new(0));
proof.verify(&mut OsRng, transcript(), generators, &mut batch, &ring).unwrap();
// For e, these should have nothing. For R, these should have 6 elements each which sum to 0
assert!(batch.0.verify_vartime());
assert!(batch.1.verify_vartime());
#[cfg(feature = "serialize")]
test_aos_serialization(proof, default.clone());
}
}
#[test]
fn test_aos_e() {
test_aos::<2>(Re::e_default());
test_aos::<4>(Re::e_default());
}
#[allow(non_snake_case)]
#[test]
fn test_aos_R() {
// Batch verification appreciates the longer vectors, which means not batching bits
test_aos::<2>(Re::R_default());
}

View file

@ -0,0 +1,192 @@
use hex_literal::hex;
use rand_core::{RngCore, OsRng};
use ff::{Field, PrimeField};
use group::{Group, GroupEncoding};
use blake2::{Digest, Blake2b512};
use k256::{Scalar, ProjectivePoint};
use dalek_ff_group::{self as dfg, EdwardsPoint, CompressedEdwardsY};
use transcript::RecommendedTranscript;
use crate::{
Generators,
cross_group::{
scalar::mutual_scalar_from_bytes,
ClassicLinearDLEq, EfficientLinearDLEq, ConciseLinearDLEq, CompromiseLinearDLEq
}
};
mod scalar;
mod schnorr;
mod aos;
type G0 = ProjectivePoint;
type G1 = EdwardsPoint;
pub(crate) fn transcript() -> RecommendedTranscript {
RecommendedTranscript::new(b"Cross-Group DLEq Proof Test")
}
pub(crate) fn generators() -> (Generators<G0>, Generators<G1>) {
(
Generators::new(
ProjectivePoint::GENERATOR,
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into())
).unwrap()
),
Generators::new(
EdwardsPoint::generator(),
CompressedEdwardsY::new(
hex!("8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94")
).decompress().unwrap()
)
)
}
macro_rules! verify_and_deserialize {
($type: ty, $proof: ident, $generators: ident, $keys: ident) => {
let public_keys = $proof.verify(&mut OsRng, &mut transcript(), $generators).unwrap();
assert_eq!($generators.0.primary * $keys.0, public_keys.0);
assert_eq!($generators.1.primary * $keys.1, public_keys.1);
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
$proof.serialize(&mut buf).unwrap();
let deserialized = <$type>::deserialize(&mut std::io::Cursor::new(&buf)).unwrap();
assert_eq!($proof, deserialized);
}
}
}
macro_rules! test_dleq {
($str: literal, $benchmark: ident, $name: ident, $type: ident) => {
#[ignore]
#[test]
fn $benchmark() {
println!("Benchmarking with Secp256k1/Ed25519");
let generators = generators();
let mut seed = [0; 32];
OsRng.fill_bytes(&mut seed);
let key = Blake2b512::new().chain_update(seed);
let runs = 200;
let mut proofs = Vec::with_capacity(usize::try_from(runs).unwrap());
let time = std::time::Instant::now();
for _ in 0 .. runs {
proofs.push($type::prove(&mut OsRng, &mut transcript(), generators, key.clone()).0);
}
println!("{} had a average prove time of {}ms", $str, time.elapsed().as_millis() / runs);
let time = std::time::Instant::now();
for proof in &proofs {
proof.verify(&mut OsRng, &mut transcript(), generators).unwrap();
}
println!("{} had a average verify time of {}ms", $str, time.elapsed().as_millis() / runs);
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
proofs[0].serialize(&mut buf).unwrap();
println!("{} had a proof size of {} bytes", $str, buf.len());
}
}
#[test]
fn $name() {
let generators = generators();
for i in 0 .. 1 {
let (proof, keys) = if i == 0 {
let mut seed = [0; 32];
OsRng.fill_bytes(&mut seed);
$type::prove(
&mut OsRng,
&mut transcript(),
generators,
Blake2b512::new().chain_update(seed)
)
} else {
let mut key;
let mut res;
while {
key = Scalar::random(&mut OsRng);
res = $type::prove_without_bias(&mut OsRng, &mut transcript(), generators, key);
res.is_none()
} {}
let res = res.unwrap();
assert_eq!(key, res.1.0);
res
};
verify_and_deserialize!($type::<G0, G1>, proof, generators, keys);
}
}
}
}
test_dleq!("ClassicLinear", benchmark_classic_linear, test_classic_linear, ClassicLinearDLEq);
test_dleq!("ConciseLinear", benchmark_concise_linear, test_concise_linear, ConciseLinearDLEq);
test_dleq!(
"EfficientLinear",
benchmark_efficient_linear,
test_efficient_linear,
EfficientLinearDLEq
);
test_dleq!(
"CompromiseLinear",
benchmark_compromise_linear,
test_compromise_linear,
CompromiseLinearDLEq
);
#[test]
fn test_rejection_sampling() {
let mut pow_2 = Scalar::one();
for _ in 0 .. dfg::Scalar::CAPACITY {
pow_2 = pow_2.double();
}
assert!(
// Either would work
EfficientLinearDLEq::prove_without_bias(
&mut OsRng,
&mut transcript(),
generators(),
pow_2
).is_none()
);
}
#[test]
fn test_remainder() {
// Uses Secp256k1 for both to achieve an odd capacity of 255
assert_eq!(Scalar::CAPACITY, 255);
let generators = (generators().0, generators().0);
// This will ignore any unused bits, ensuring every remaining one is set
let keys = mutual_scalar_from_bytes(&[0xFF; 32]);
assert_eq!(keys.0 + Scalar::one(), Scalar::from(2u64).pow_vartime(&[255]));
assert_eq!(keys.0, keys.1);
let (proof, res) = ConciseLinearDLEq::prove_without_bias(
&mut OsRng,
&mut transcript(),
generators,
keys.0
).unwrap();
assert_eq!(keys, res);
verify_and_deserialize!(
ConciseLinearDLEq::<ProjectivePoint, ProjectivePoint>,
proof,
generators,
keys
);
}

View file

@ -0,0 +1,47 @@
use rand_core::OsRng;
use ff::{Field, PrimeField};
use k256::Scalar as K256Scalar;
use dalek_ff_group::Scalar as DalekScalar;
use crate::cross_group::scalar::{scalar_normalize, scalar_convert};
#[test]
fn test_scalar() {
assert_eq!(
scalar_normalize::<_, DalekScalar>(K256Scalar::zero()),
(K256Scalar::zero(), DalekScalar::zero())
);
assert_eq!(
scalar_normalize::<_, DalekScalar>(K256Scalar::one()),
(K256Scalar::one(), DalekScalar::one())
);
let mut initial;
while {
initial = K256Scalar::random(&mut OsRng);
let (k, ed) = scalar_normalize::<_, DalekScalar>(initial);
// The initial scalar should equal the new scalar with Ed25519's capacity
let mut initial_bytes = (&initial.to_repr()).to_vec();
// Drop the first 4 bits to hit 252
initial_bytes[0] = initial_bytes[0] & 0b00001111;
let k_bytes = (&k.to_repr()).to_vec();
assert_eq!(initial_bytes, k_bytes);
let mut ed_bytes = ed.to_repr().as_ref().to_vec();
// Reverse to big endian
ed_bytes.reverse();
assert_eq!(k_bytes, ed_bytes);
// Verify conversion works as expected
assert_eq!(scalar_convert::<_, DalekScalar>(k), Some(ed));
// Run this test again if this secp256k1 scalar didn't have any bits cleared
initial == k
} {}
// Verify conversion returns None when the scalar isn't mutually valid
assert!(scalar_convert::<_, DalekScalar>(initial).is_none());
}

View file

@ -0,0 +1,38 @@
use rand_core::OsRng;
use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup};
use multiexp::BatchVerifier;
use transcript::RecommendedTranscript;
use crate::cross_group::schnorr::SchnorrPoK;
fn test_schnorr<G: PrimeGroup>() where G::Scalar: PrimeFieldBits {
let private = G::Scalar::random(&mut OsRng);
let transcript = RecommendedTranscript::new(b"Schnorr Test");
let mut batch = BatchVerifier::new(3);
SchnorrPoK::prove(
&mut OsRng,
&mut transcript.clone(),
G::generator(),
private
).verify(
&mut OsRng,
&mut transcript.clone(),
G::generator(),
G::generator() * private,
&mut batch
);
assert!(batch.verify_vartime());
}
#[test]
fn test_secp256k1() {
test_schnorr::<k256::ProjectivePoint>();
}
#[test]
fn test_ed25519() {
test_schnorr::<dalek_ff_group::EdwardsPoint>();
}

View file

@ -0,0 +1,43 @@
#[cfg(feature = "experimental")]
mod cross_group;
use hex_literal::hex;
use rand_core::OsRng;
use ff::Field;
use group::GroupEncoding;
use k256::{Scalar, ProjectivePoint};
use transcript::RecommendedTranscript;
use crate::{Generators, DLEqProof};
#[test]
fn test_dleq() {
let transcript = || RecommendedTranscript::new(b"DLEq Proof Test");
let generators = Generators::new(
ProjectivePoint::GENERATOR,
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into())
).unwrap()
);
let key = Scalar::random(&mut OsRng);
let proof = DLEqProof::prove(&mut OsRng, &mut transcript(), generators, key);
let keys = (generators.primary * key, generators.alt * key);
proof.verify(&mut transcript(), generators, keys).unwrap();
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
proof.serialize(&mut buf).unwrap();
let deserialized = DLEqProof::<ProjectivePoint>::deserialize(
&mut std::io::Cursor::new(&buf)
).unwrap();
assert_eq!(proof, deserialized);
deserialized.verify(&mut transcript(), generators, keys).unwrap();
}
}

View file

@ -1,24 +1,46 @@
[package]
name = "frost"
name = "modular-frost"
version = "0.1.0"
description = "Implementation of FROST over ff/group"
description = "Modular implementation of FROST over ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["frost", "multisig", "threshold"]
edition = "2021"
[dependencies]
thiserror = "1"
rand_core = "0.6"
hex = "0.4"
ff = "0.11"
group = "0.11"
sha2 = { version = "0.10", optional = true }
transcript = { path = "../transcript" }
ff = "0.12"
group = "0.12"
multiexp = { path = "../multiexp", features = ["batch"] }
elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true }
p256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true }
k256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true }
dalek-ff-group = { path = "../dalek-ff-group", version = "0.1", optional = true }
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" }
multiexp = { path = "../multiexp", version = "0.1", features = ["batch"] }
dleq = { package = "dleq", path = "../dleq", version = "0.1", features = ["serialize"] }
[dev-dependencies]
rand = "0.8"
sha2 = "0.10"
k256 = { version = "0.10", features = ["arithmetic"] }
dalek-ff-group = { path = "../dalek-ff-group" }
[features]
curves = ["sha2"] # All officially denoted curves use the SHA2 family of hashes
kp256 = ["elliptic-curve", "curves"]
p256 = ["kp256", "dep:p256"]
secp256k1 = ["kp256", "k256"]
dalek = ["curves", "dalek-ff-group"]
ed25519 = ["dalek"]
ristretto = ["dalek"]

View file

@ -1,3 +1,6 @@
# FROST
# Modular FROST
Implementation of FROST for any curve with a ff/group API.
A modular implementation of FROST for any curve with a ff/group API. Notably,
beyond curve modularity, custom algorithms may be specified, providing support
for privacy coins. The provided Schnorr algorithm also has a modular HRAM due
to the variety in existence, enabling integration with existing systems.

View file

@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use crate::{Curve, FrostError, MultisigView, schnorr};
use crate::{Curve, FrostError, FrostView, schnorr};
pub use schnorr::SchnorrSignature;
/// Algorithm to use FROST with
@ -13,22 +13,25 @@ pub trait Algorithm<C: Curve>: Clone {
/// The resulting type of the signatures this algorithm will produce
type Signature: Clone + PartialEq + Debug;
/// Obtain a mutable borrow of the underlying transcript
fn transcript(&mut self) -> &mut Self::Transcript;
/// Obtain the list of nonces to generate, as specified by the basepoints to create commitments
/// against per-nonce. These are not committed to by FROST on the underlying transcript
fn nonces(&self) -> Vec<Vec<C::G>>;
/// Generate an addendum to FROST"s preprocessing stage
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
params: &MultisigView<C>,
nonces: &[C::F; 2],
params: &FrostView<C>,
) -> Vec<u8>;
/// Proccess the addendum for the specified participant. Guaranteed to be ordered
fn process_addendum(
&mut self,
params: &MultisigView<C>,
params: &FrostView<C>,
l: u16,
commitments: &[C::G; 2],
serialized: &[u8],
) -> Result<(), FrostError>;
@ -38,23 +41,24 @@ pub trait Algorithm<C: Curve>: Clone {
/// The nonce will already have been processed into the combined form d + (e * p)
fn sign_share(
&mut self,
params: &MultisigView<C>,
nonce_sum: C::G,
binding: C::F,
nonce: C::F,
params: &FrostView<C>,
nonce_sums: &[Vec<C::G>],
nonces: &[C::F],
msg: &[u8],
) -> C::F;
/// Verify a signature
fn verify(&self, group_key: C::G, nonce: C::G, sum: C::F) -> Option<Self::Signature>;
#[must_use]
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature>;
/// Verify a specific share given as a response. Used to determine blame if signature
/// verification fails
#[must_use]
fn verify_share(
&self,
l: u16,
verification_share: C::G,
nonce: C::G,
nonces: &[Vec<C::G>],
share: C::F,
) -> bool;
}
@ -63,6 +67,12 @@ pub trait Algorithm<C: Curve>: Clone {
#[derive(Clone, Debug)]
pub struct IetfTranscript(Vec<u8>);
impl Transcript for IetfTranscript {
type Challenge = Vec<u8>;
fn new(_: &'static [u8]) -> IetfTranscript {
unimplemented!("IetfTranscript should not be used with multiple nonce protocols");
}
fn domain_separate(&mut self, _: &[u8]) {}
fn append_message(&mut self, _: &'static [u8], message: &[u8]) {
@ -112,20 +122,22 @@ impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
&mut self.transcript
}
fn nonces(&self) -> Vec<Vec<C::G>> {
vec![vec![C::GENERATOR]]
}
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
_: &mut R,
_: &MultisigView<C>,
_: &[C::F; 2],
_: &FrostView<C>,
) -> Vec<u8> {
vec![]
}
fn process_addendum(
&mut self,
_: &MultisigView<C>,
_: &FrostView<C>,
_: u16,
_: &[C::G; 2],
_: &[u8],
) -> Result<(), FrostError> {
Ok(())
@ -133,19 +145,19 @@ impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
fn sign_share(
&mut self,
params: &MultisigView<C>,
nonce_sum: C::G,
_: C::F,
nonce: C::F,
params: &FrostView<C>,
nonce_sums: &[Vec<C::G>],
nonces: &[C::F],
msg: &[u8],
) -> C::F {
let c = H::hram(&nonce_sum, &params.group_key(), msg);
let c = H::hram(&nonce_sums[0][0], &params.group_key(), msg);
self.c = Some(c);
schnorr::sign::<C>(params.secret_share(), nonce, c).s
schnorr::sign::<C>(params.secret_share(), nonces[0], c).s
}
fn verify(&self, group_key: C::G, nonce: C::G, sum: C::F) -> Option<Self::Signature> {
let sig = SchnorrSignature { R: nonce, s: sum };
#[must_use]
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature> {
let sig = SchnorrSignature { R: nonces[0][0], s: sum };
if schnorr::verify::<C>(group_key, self.c.unwrap(), &sig) {
Some(sig)
} else {
@ -153,17 +165,18 @@ impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
}
}
#[must_use]
fn verify_share(
&self,
_: u16,
verification_share: C::G,
nonce: C::G,
nonces: &[Vec<C::G>],
share: C::F,
) -> bool {
schnorr::verify::<C>(
verification_share,
self.c.unwrap(),
&SchnorrSignature { R: nonce, s: share}
&SchnorrSignature { R: nonces[0][0], s: share}
)
}
}

View file

@ -0,0 +1,91 @@
use rand_core::{RngCore, CryptoRng};
use sha2::{Digest, Sha512};
use dalek_ff_group::Scalar;
use crate::{curve::Curve, algorithm::Hram};
macro_rules! dalek_curve {
(
$Curve: ident,
$Hram: ident,
$Point: ident,
$POINT: ident,
$ID: literal,
$CONTEXT: literal,
$chal: literal,
$digest: literal,
) => {
use dalek_ff_group::{$Point, $POINT};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct $Curve;
impl Curve for $Curve {
type F = Scalar;
type G = $Point;
const ID: &'static [u8] = $ID;
const GENERATOR: Self::G = $POINT;
fn random_nonce<R: RngCore + CryptoRng>(secret: Self::F, rng: &mut R) -> Self::F {
let mut seed = vec![0; 32];
rng.fill_bytes(&mut seed);
seed.extend(&secret.to_bytes());
Self::hash_to_F(b"nonce", &seed)
}
fn hash_msg(msg: &[u8]) -> Vec<u8> {
Sha512::new()
.chain_update($CONTEXT)
.chain_update($digest)
.chain_update(msg)
.finalize()
.to_vec()
}
fn hash_binding_factor(binding: &[u8]) -> Self::F {
Self::hash_to_F(b"rho", binding)
}
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
Scalar::from_hash(Sha512::new().chain_update($CONTEXT).chain_update(dst).chain_update(msg))
}
}
#[derive(Copy, Clone)]
pub struct $Hram;
impl Hram<$Curve> for $Hram {
#[allow(non_snake_case)]
fn hram(R: &$Point, A: &$Point, m: &[u8]) -> Scalar {
$Curve::hash_to_F($chal, &[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat())
}
}
}
}
#[cfg(any(test, feature = "ristretto"))]
dalek_curve!(
Ristretto,
IetfRistrettoHram,
RistrettoPoint,
RISTRETTO_BASEPOINT_POINT,
b"ristretto",
b"FROST-RISTRETTO255-SHA512-v5",
b"chal",
b"digest",
);
#[cfg(feature = "ed25519")]
dalek_curve!(
Ed25519,
IetfEd25519Hram,
EdwardsPoint,
ED25519_BASEPOINT_POINT,
b"edwards25519",
b"",
b"",
b"",
);

View file

@ -0,0 +1,105 @@
use rand_core::{RngCore, CryptoRng};
use sha2::{digest::Update, Digest, Sha256};
use group::{ff::Field, GroupEncoding};
use elliptic_curve::{bigint::{Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}};
use crate::{curve::{Curve, F_from_slice}, algorithm::Hram};
macro_rules! kp_curve {
(
$lib: ident,
$Curve: ident,
$Hram: ident,
$ID: literal,
$CONTEXT: literal
) => {
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct $Curve;
impl Curve for $Curve {
type F = $lib::Scalar;
type G = $lib::ProjectivePoint;
const ID: &'static [u8] = $ID;
const GENERATOR: Self::G = $lib::ProjectivePoint::GENERATOR;
fn random_nonce<R: RngCore + CryptoRng>(secret: Self::F, rng: &mut R) -> Self::F {
let mut seed = vec![0; 32];
rng.fill_bytes(&mut seed);
seed.extend(secret.to_bytes());
Self::hash_to_F(&[$CONTEXT as &[u8], b"nonce"].concat(), &seed)
}
fn hash_msg(msg: &[u8]) -> Vec<u8> {
(&Sha256::new()
.chain($CONTEXT)
.chain(b"digest")
.chain(msg)
.finalize()
).to_vec()
}
fn hash_binding_factor(binding: &[u8]) -> Self::F {
Self::hash_to_F(&[$CONTEXT as &[u8], b"rho"].concat(), binding)
}
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
let mut dst = dst;
let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat());
if dst.len() > 255 {
dst = &oversize;
}
// While one of these two libraries does support directly hashing to the Scalar field, the
// other doesn't. While that's probably an oversight, this is a universally working method
let mut modulus = vec![0; 16];
modulus.extend((Self::F::zero() - Self::F::one()).to_bytes());
let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE);
F_from_slice::<Self::F>(
&U384::from_be_slice(&{
let mut bytes = [0; 48];
ExpandMsgXmd::<Sha256>::expand_message(
&[msg],
dst,
48
).unwrap().fill_bytes(&mut bytes);
bytes
}).reduce(&modulus).unwrap().to_be_bytes()[16 ..]
).unwrap()
}
}
#[derive(Clone)]
pub struct $Hram;
impl Hram<$Curve> for $Hram {
#[allow(non_snake_case)]
fn hram(R: &$lib::ProjectivePoint, A: &$lib::ProjectivePoint, m: &[u8]) -> $lib::Scalar {
$Curve::hash_to_F(
&[$CONTEXT as &[u8], b"chal"].concat(),
&[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat()
)
}
}
}
}
#[cfg(feature = "p256")]
kp_curve!(
p256,
P256,
IetfP256Hram,
b"P-256",
b"FROST-P256-SHA256-v5"
);
#[cfg(feature = "secp256k1")]
kp_curve!(
k256,
Secp256k1,
NonIetfSecp256k1Hram,
b"secp256k1",
b"FROST-secp256k1-SHA256-v5"
);

View file

@ -0,0 +1,117 @@
use core::fmt::Debug;
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use ff::{PrimeField, PrimeFieldBits};
use group::{Group, GroupOps, GroupEncoding, prime::PrimeGroup};
#[cfg(any(test, feature = "dalek"))]
mod dalek;
#[cfg(any(test, feature = "ristretto"))]
pub use dalek::{Ristretto, IetfRistrettoHram};
#[cfg(feature = "ed25519")]
pub use dalek::{Ed25519, IetfEd25519Hram};
#[cfg(feature = "kp256")]
mod kp256;
#[cfg(feature = "secp256k1")]
pub use kp256::{Secp256k1, NonIetfSecp256k1Hram};
#[cfg(feature = "p256")]
pub use kp256::{P256, IetfP256Hram};
/// Set of errors for curve-related operations, namely encoding and decoding
#[derive(Clone, Error, Debug)]
pub enum CurveError {
#[error("invalid length for data (expected {0}, got {0})")]
InvalidLength(usize, usize),
#[error("invalid scalar")]
InvalidScalar,
#[error("invalid point")]
InvalidPoint,
}
/// Unified trait to manage a field/group
// This should be moved into its own crate if the need for generic cryptography over ff/group
// continues, which is the exact reason ff/group exists (to provide a generic interface)
// elliptic-curve exists, yet it doesn't really serve the same role, nor does it use &[u8]/Vec<u8>
// It uses GenericArray which will hopefully be deprecated as Rust evolves and doesn't offer enough
// advantages in the modern day to be worth the hassle -- Kayaba
pub trait Curve: Clone + Copy + PartialEq + Eq + Debug {
/// Scalar field element type
// This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses
type F: PrimeField + PrimeFieldBits;
/// Group element type
type G: Group<Scalar = Self::F> + GroupOps + PrimeGroup;
/// ID for this curve
const ID: &'static [u8];
/// Generator for the group
// While group does provide this in its API, privacy coins may want to use a custom basepoint
const GENERATOR: Self::G;
/// Securely generate a random nonce. H4 from the IETF draft
fn random_nonce<R: RngCore + CryptoRng>(secret: Self::F, rng: &mut R) -> Self::F;
/// Hash the message for the binding factor. H3 from the IETF draft
// This doesn't actually need to be part of Curve as it does nothing with the curve
// This also solely relates to FROST and with a proper Algorithm/HRAM, all projects using
// aggregatable signatures over this curve will work without issue
// It is kept here as Curve + H{1, 2, 3} is effectively a ciphersuite according to the IETF draft
// and moving it to Schnorr would force all of them into being ciphersuite-specific
// H2 is left to the Schnorr Algorithm as H2 is the H used in HRAM, which Schnorr further
// modularizes
fn hash_msg(msg: &[u8]) -> Vec<u8>;
/// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft
fn hash_binding_factor(binding: &[u8]) -> Self::F;
// The following methods would optimally be F:: and G:: yet developers can't control F/G
// They can control a trait they pass into this library
/// Field element from hash. Used during key gen and by other crates under Serai as a general
/// utility
// Not parameterized by Digest as it's fine for it to use its own hash function as relevant to
// hash_msg and hash_binding_factor
#[allow(non_snake_case)]
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F;
}
#[allow(non_snake_case)]
pub(crate) fn F_len<C: Curve>() -> usize {
<C::F as PrimeField>::Repr::default().as_ref().len()
}
#[allow(non_snake_case)]
pub(crate) fn G_len<C: Curve>() -> usize {
<C::G as GroupEncoding>::Repr::default().as_ref().len()
}
/// Field element from slice
#[allow(non_snake_case)]
pub(crate) fn F_from_slice<F: PrimeField>(slice: &[u8]) -> Result<F, CurveError> {
let mut encoding = F::Repr::default();
encoding.as_mut().copy_from_slice(slice);
let point = Option::<F>::from(F::from_repr(encoding)).ok_or(CurveError::InvalidScalar)?;
if point.to_repr().as_ref() != slice {
Err(CurveError::InvalidScalar)?;
}
Ok(point)
}
/// Group element from slice
#[allow(non_snake_case)]
pub(crate) fn G_from_slice<G: PrimeGroup>(slice: &[u8]) -> Result<G, CurveError> {
let mut encoding = G::Repr::default();
encoding.as_mut().copy_from_slice(slice);
let point = Option::<G>::from(G::from_bytes(&encoding)).ok_or(CurveError::InvalidPoint)?;
// Ban the identity, per the FROST spec, and non-canonical points
if (point.is_identity().into()) || (point.to_bytes().as_ref() != slice) {
Err(CurveError::InvalidPoint)?;
}
Ok(point)
}

View file

@ -1,14 +1,14 @@
use core::fmt;
use std::collections::HashMap;
use std::{marker::PhantomData, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use ff::{Field, PrimeField};
use group::{ff::{Field, PrimeField}, GroupEncoding};
use multiexp::{multiexp_vartime, BatchVerifier};
use crate::{
Curve, MultisigParams, MultisigKeys, FrostError,
curve::{Curve, F_len, G_len, F_from_slice, G_from_slice},
FrostError, FrostParams, FrostKeys,
schnorr::{self, SchnorrSignature},
validate_map
};
@ -16,29 +16,34 @@ use crate::{
#[allow(non_snake_case)]
fn challenge<C: Curve>(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F {
const DST: &'static [u8] = b"FROST Schnorr Proof of Knowledge";
// Uses hash_msg to get a fixed size value out of the context string
C::hash_to_F(&[DST, &C::hash_msg(context.as_bytes()), &l.to_be_bytes(), R, Am].concat())
let mut transcript = C::hash_msg(context.as_bytes());
transcript.extend(l.to_be_bytes());
transcript.extend(R);
transcript.extend(Am);
C::hash_to_F(DST, &transcript)
}
// Implements steps 1 through 3 of round 1 of FROST DKG. Returns the coefficients, commitments, and
// the serialized commitments to be broadcasted over an authenticated channel to all parties
fn generate_key_r1<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &MultisigParams,
params: &FrostParams,
context: &str,
) -> (Vec<C::F>, Vec<u8>) {
let t = usize::from(params.t);
let mut coefficients = Vec::with_capacity(t);
let mut commitments = Vec::with_capacity(t);
let mut serialized = Vec::with_capacity((C::G_len() * t) + C::G_len() + C::F_len());
let mut serialized = Vec::with_capacity((G_len::<C>() * t) + G_len::<C>() + F_len::<C>());
for i in 0 .. t {
// Step 1: Generate t random values to form a polynomial with
coefficients.push(C::F::random(&mut *rng));
// Step 3: Generate public commitments
commitments.push(C::generator_table() * coefficients[i]);
commitments.push(C::GENERATOR * coefficients[i]);
// Serialize them for publication
serialized.extend(&C::G_to_bytes(&commitments[i]));
serialized.extend(commitments[i].to_bytes().as_ref());
}
// Step 2: Provide a proof of knowledge
@ -54,7 +59,7 @@ fn generate_key_r1<R: RngCore + CryptoRng, C: Curve>(
challenge::<C>(
context,
params.i(),
&C::G_to_bytes(&(C::generator_table() * r)),
(C::GENERATOR * r).to_bytes().as_ref(),
&serialized
)
).serialize()
@ -67,7 +72,7 @@ fn generate_key_r1<R: RngCore + CryptoRng, C: Curve>(
// Verify the received data from the first round of key generation
fn verify_r1<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &MultisigParams,
params: &FrostParams,
context: &str,
our_commitments: Vec<u8>,
mut serialized: HashMap<u16, Vec<u8>>,
@ -78,19 +83,19 @@ fn verify_r1<R: RngCore + CryptoRng, C: Curve>(
(params.i(), our_commitments)
)?;
let commitments_len = usize::from(params.t()) * C::G_len();
let commitments_len = usize::from(params.t()) * G_len::<C>();
let mut commitments = HashMap::new();
#[allow(non_snake_case)]
let R_bytes = |l| &serialized[&l][commitments_len .. commitments_len + C::G_len()];
let R_bytes = |l| &serialized[&l][commitments_len .. commitments_len + G_len::<C>()];
#[allow(non_snake_case)]
let R = |l| C::G_from_slice(R_bytes(l)).map_err(|_| FrostError::InvalidProofOfKnowledge(l));
let R = |l| G_from_slice::<C::G>(R_bytes(l)).map_err(|_| FrostError::InvalidProofOfKnowledge(l));
#[allow(non_snake_case)]
let Am = |l| &serialized[&l][0 .. commitments_len];
let s = |l| C::F_from_slice(
&serialized[&l][commitments_len + C::G_len() ..]
let s = |l| F_from_slice::<C::F>(
&serialized[&l][commitments_len + G_len::<C>() ..]
).map_err(|_| FrostError::InvalidProofOfKnowledge(l));
let mut signatures = Vec::with_capacity(usize::from(params.n() - 1));
@ -98,8 +103,8 @@ fn verify_r1<R: RngCore + CryptoRng, C: Curve>(
let mut these_commitments = vec![];
for c in 0 .. usize::from(params.t()) {
these_commitments.push(
C::G_from_slice(
&serialized[&l][(c * C::G_len()) .. ((c + 1) * C::G_len())]
G_from_slice::<C::G>(
&serialized[&l][(c * G_len::<C>()) .. ((c + 1) * G_len::<C>())]
).map_err(|_| FrostError::InvalidCommitment(l.try_into().unwrap()))?
);
}
@ -144,7 +149,7 @@ fn polynomial<F: PrimeField>(
// counterparty to receive
fn generate_key_r2<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &MultisigParams,
params: &FrostParams,
context: &str,
coefficients: Vec<C::F>,
our_commitments: Vec<u8>,
@ -161,7 +166,7 @@ fn generate_key_r2<R: RngCore + CryptoRng, C: Curve>(
continue;
}
res.insert(l, C::F_to_bytes(&polynomial(&coefficients, l)));
res.insert(l, polynomial(&coefficients, l).to_repr().as_ref().to_vec());
}
// Calculate our own share
@ -185,22 +190,22 @@ fn generate_key_r2<R: RngCore + CryptoRng, C: Curve>(
/// broadcasted initially
fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: MultisigParams,
params: FrostParams,
mut secret_share: C::F,
commitments: HashMap<u16, Vec<C::G>>,
// Vec to preserve ownership
mut serialized: HashMap<u16, Vec<u8>>,
) -> Result<MultisigKeys<C>, FrostError> {
) -> Result<FrostKeys<C>, FrostError> {
validate_map(
&mut serialized,
&(1 ..= params.n()).into_iter().collect::<Vec<_>>(),
(params.i(), C::F_to_bytes(&secret_share))
(params.i(), secret_share.to_repr().as_ref().to_vec())
)?;
// Step 2. Verify each share
let mut shares = HashMap::new();
for (l, share) in serialized {
shares.insert(l, C::F_from_slice(&share).map_err(|_| FrostError::InvalidShare(l))?);
shares.insert(l, F_from_slice::<C::F>(&share).map_err(|_| FrostError::InvalidShare(l))?);
}
// Calculate the exponent for a given participant and apply it to a series of commitments
@ -219,7 +224,7 @@ fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
res
};
let mut batch = BatchVerifier::new(shares.len(), C::little_endian());
let mut batch = BatchVerifier::new(shares.len());
for (l, share) in &shares {
if *l == params.i() {
continue;
@ -232,7 +237,7 @@ fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
// ensure that malleability isn't present is to use this n * t algorithm, which runs
// per sender and not as an aggregate of all senders, which also enables blame
let mut values = exponential(params.i, &commitments[l]);
values.push((-*share, C::generator()));
values.push((-*share, C::GENERATOR));
batch.queue(rng, *l, values);
}
batch.verify_with_vartime_blame().map_err(|l| FrostError::InvalidCommitment(l))?;
@ -249,14 +254,15 @@ fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
// Calculate each user's verification share
let mut verification_shares = HashMap::new();
for i in 1 ..= params.n() {
verification_shares.insert(i, multiexp_vartime(exponential(i, &stripes), C::little_endian()));
verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes)));
}
debug_assert_eq!(C::generator_table() * secret_share, verification_shares[&params.i()]);
// Removing this check would enable optimizing the above from t + (n * t) to t + ((n - 1) * t)
debug_assert_eq!(C::GENERATOR * secret_share, verification_shares[&params.i()]);
// TODO: Clear serialized and shares
Ok(
MultisigKeys {
FrostKeys {
params,
secret_share,
group_key: stripes[0],
@ -266,100 +272,76 @@ fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
)
}
/// State of a Key Generation machine
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum State {
Fresh,
GeneratedCoefficients,
GeneratedSecretShares,
Complete,
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/// State machine which manages key generation
#[allow(non_snake_case)]
pub struct StateMachine<C: Curve> {
params: MultisigParams,
pub struct KeyGenMachine<C: Curve> {
params: FrostParams,
context: String,
state: State,
coefficients: Option<Vec<C::F>>,
our_commitments: Option<Vec<u8>>,
secret: Option<C::F>,
commitments: Option<HashMap<u16, Vec<C::G>>>
_curve: PhantomData<C>,
}
impl<C: Curve> StateMachine<C> {
pub struct SecretShareMachine<C: Curve> {
params: FrostParams,
context: String,
coefficients: Vec<C::F>,
our_commitments: Vec<u8>,
}
pub struct KeyMachine<C: Curve> {
params: FrostParams,
secret: C::F,
commitments: HashMap<u16, Vec<C::G>>,
}
impl<C: Curve> KeyGenMachine<C> {
/// Creates a new machine to generate a key for the specified curve in the specified multisig
// The context string must be unique among multisigs
pub fn new(params: MultisigParams, context: String) -> StateMachine<C> {
StateMachine {
params,
context,
state: State::Fresh,
coefficients: None,
our_commitments: None,
secret: None,
commitments: None
}
pub fn new(params: FrostParams, context: String) -> KeyGenMachine<C> {
KeyGenMachine { params, context, _curve: PhantomData }
}
/// Start generating a key according to the FROST DKG spec
/// Returns a serialized list of commitments to be sent to all parties over an authenticated
/// channel. If any party submits multiple sets of commitments, they MUST be treated as malicious
pub fn generate_coefficients<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R
) -> Result<Vec<u8>, FrostError> {
if self.state != State::Fresh {
Err(FrostError::InvalidKeyGenTransition(State::Fresh, self.state))?;
}
let (coefficients, serialized) = generate_key_r1::<R, C>(
rng,
&self.params,
&self.context,
);
self.coefficients = Some(coefficients);
self.our_commitments = Some(serialized.clone());
self.state = State::GeneratedCoefficients;
Ok(serialized)
) -> (SecretShareMachine<C>, Vec<u8>) {
let (coefficients, serialized) = generate_key_r1::<R, C>(rng, &self.params, &self.context);
(
SecretShareMachine {
params: self.params,
context: self.context,
coefficients,
our_commitments: serialized.clone()
},
serialized,
)
}
}
impl<C: Curve> SecretShareMachine<C> {
/// Continue generating a key
/// Takes in everyone else's commitments, which are expected to be in a Vec where participant
/// index = Vec index. An empty vector is expected at index 0 to allow for this. An empty vector
/// is also expected at index i which is locally handled. Returns a byte vector representing a
/// secret share for each other participant which should be encrypted before sending
pub fn generate_secret_shares<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R,
commitments: HashMap<u16, Vec<u8>>,
) -> Result<HashMap<u16, Vec<u8>>, FrostError> {
if self.state != State::GeneratedCoefficients {
Err(FrostError::InvalidKeyGenTransition(State::GeneratedCoefficients, self.state))?;
}
) -> Result<(KeyMachine<C>, HashMap<u16, Vec<u8>>), FrostError> {
let (secret, commitments, shares) = generate_key_r2::<R, C>(
rng,
&self.params,
&self.context,
self.coefficients.take().unwrap(),
self.our_commitments.take().unwrap(),
self.coefficients,
self.our_commitments,
commitments,
)?;
self.secret = Some(secret);
self.commitments = Some(commitments);
self.state = State::GeneratedSecretShares;
Ok(shares)
Ok((KeyMachine { params: self.params, secret, commitments }, shares))
}
}
impl<C: Curve> KeyMachine<C> {
/// Complete key generation
/// Takes in everyone elses' shares submitted to us as a Vec, expecting participant index =
/// Vec index with an empty vector at index 0 and index i. Returns a byte vector representing the
@ -367,31 +349,10 @@ impl<C: Curve> StateMachine<C> {
/// must report completion without issue before this key can be considered usable, yet you should
/// wait for all participants to report as such
pub fn complete<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R,
shares: HashMap<u16, Vec<u8>>,
) -> Result<MultisigKeys<C>, FrostError> {
if self.state != State::GeneratedSecretShares {
Err(FrostError::InvalidKeyGenTransition(State::GeneratedSecretShares, self.state))?;
}
let keys = complete_r2(
rng,
self.params,
self.secret.take().unwrap(),
self.commitments.take().unwrap(),
shares,
)?;
self.state = State::Complete;
Ok(keys)
}
pub fn params(&self) -> MultisigParams {
self.params.clone()
}
pub fn state(&self) -> State {
self.state
) -> Result<FrostKeys<C>, FrostError> {
complete_r2(rng, self.params, self.secret, self.commitments, shares)
}
}

View file

@ -1,124 +1,24 @@
use core::{ops::Mul, fmt::Debug};
use core::fmt::Debug;
use std::collections::HashMap;
use thiserror::Error;
use ff::{Field, PrimeField};
use group::{Group, GroupOps};
use group::{ff::{Field, PrimeField}, GroupEncoding};
mod schnorr;
pub mod curve;
use curve::{Curve, F_len, G_len, F_from_slice, G_from_slice};
pub mod key_gen;
pub mod algorithm;
pub mod sign;
pub mod tests;
/// Set of errors for curve-related operations, namely encoding and decoding
#[derive(Clone, Error, Debug)]
pub enum CurveError {
#[error("invalid length for data (expected {0}, got {0})")]
InvalidLength(usize, usize),
#[error("invalid scalar")]
InvalidScalar,
#[error("invalid point")]
InvalidPoint,
}
/// Unified trait to manage a field/group
// This should be moved into its own crate if the need for generic cryptography over ff/group
// continues, which is the exact reason ff/group exists (to provide a generic interface)
// elliptic-curve exists, yet it doesn't really serve the same role, nor does it use &[u8]/Vec<u8>
// It uses GenericArray which will hopefully be deprecated as Rust evolves and doesn't offer enough
// advantages in the modern day to be worth the hassle -- Kayaba
pub trait Curve: Clone + Copy + PartialEq + Eq + Debug {
/// Field element type
// This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses
type F: PrimeField;
/// Group element type
type G: Group<Scalar = Self::F> + GroupOps;
/// Precomputed table type
type T: Mul<Self::F, Output = Self::G>;
/// ID for this curve
fn id() -> String;
/// Byte length of the curve ID
// While curve.id().len() is trivial, this bounds it to u8 and lets us ignore the possibility it
// contains Unicode, therefore having a String length which is different from its byte length
fn id_len() -> u8;
/// Generator for the group
// While group does provide this in its API, Jubjub users will want to use a custom basepoint
fn generator() -> Self::G;
/// Table for the generator for the group
/// If there isn't a precomputed table available, the generator itself should be used
fn generator_table() -> Self::T;
/// If little endian is used for the scalar field's Repr
fn little_endian() -> bool;
/// Hash the message for the binding factor. H3 from the IETF draft
// This doesn't actually need to be part of Curve as it does nothing with the curve
// This also solely relates to FROST and with a proper Algorithm/HRAM, all projects using
// aggregatable signatures over this curve will work without issue
// It is kept here as Curve + H{1, 2, 3} is effectively a ciphersuite according to the IETF draft
// and moving it to Schnorr would force all of them into being ciphersuite-specific
// H2 is left to the Schnorr Algorithm as H2 is the H used in HRAM, which Schnorr further
// modularizes
fn hash_msg(msg: &[u8]) -> Vec<u8>;
/// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft
fn hash_binding_factor(binding: &[u8]) -> Self::F;
// The following methods would optimally be F:: and G:: yet developers can't control F/G
// They can control a trait they pass into this library
/// Field element from hash. Used during key gen and by other crates under Serai as a general
/// utility
// Not parameterized by Digest as it's fine for it to use its own hash function as relevant to
// hash_msg and hash_binding_factor
#[allow(non_snake_case)]
fn hash_to_F(data: &[u8]) -> Self::F;
/// Constant size of a serialized field element
// The alternative way to grab this would be either serializing a junk element and getting its
// length or doing a naive division of its BITS property by 8 and assuming a lack of padding
#[allow(non_snake_case)]
fn F_len() -> usize;
/// Constant size of a serialized group element
// We could grab the serialization as described above yet a naive developer may use a
// non-constant size encoding, proving yet another reason to force this to be a provided constant
// A naive developer could still provide a constant for a variable length encoding, yet at least
// that is on them
#[allow(non_snake_case)]
fn G_len() -> usize;
/// Field element from slice. Preferred to be canonical yet does not have to be
// Required due to the lack of standardized encoding functions provided by ff/group
// While they do technically exist, their usage of Self::Repr breaks all potential library usage
// without helper functions like this
#[allow(non_snake_case)]
fn F_from_slice(slice: &[u8]) -> Result<Self::F, CurveError>;
/// Group element from slice. Must require canonicity or risks differing binding factors
#[allow(non_snake_case)]
fn G_from_slice(slice: &[u8]) -> Result<Self::G, CurveError>;
/// Obtain a vector of the byte encoding of F
#[allow(non_snake_case)]
fn F_to_bytes(f: &Self::F) -> Vec<u8>;
/// Obtain a vector of the byte encoding of G
#[allow(non_snake_case)]
fn G_to_bytes(g: &Self::G) -> Vec<u8>;
}
/// Parameters for a multisig
// These fields can not be made public as they should be static
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct MultisigParams {
pub struct FrostParams {
/// Participants needed to sign on behalf of the group
t: u16,
/// Amount of participants
@ -127,12 +27,12 @@ pub struct MultisigParams {
i: u16,
}
impl MultisigParams {
impl FrostParams {
pub fn new(
t: u16,
n: u16,
i: u16
) -> Result<MultisigParams, FrostError> {
) -> Result<FrostParams, FrostError> {
if (t == 0) || (n == 0) {
Err(FrostError::ZeroParameter(t, n))?;
}
@ -146,7 +46,7 @@ impl MultisigParams {
Err(FrostError::InvalidParticipantIndex(n, i))?;
}
Ok(MultisigParams{ t, n, i })
Ok(FrostParams{ t, n, i })
}
pub fn t(&self) -> u16 { self.t }
@ -179,11 +79,6 @@ pub enum FrostError {
InvalidProofOfKnowledge(u16),
#[error("invalid share (participant {0})")]
InvalidShare(u16),
#[error("invalid key generation state machine transition (expected {0}, was {1})")]
InvalidKeyGenTransition(key_gen::State, key_gen::State),
#[error("invalid sign state machine transition (expected {0}, was {1})")]
InvalidSignTransition(sign::State, sign::State),
#[error("internal error ({0})")]
InternalError(String),
@ -191,14 +86,14 @@ pub enum FrostError {
// View of keys passable to algorithm implementations
#[derive(Clone)]
pub struct MultisigView<C: Curve> {
pub struct FrostView<C: Curve> {
group_key: C::G,
included: Vec<u16>,
secret_share: C::F,
verification_shares: HashMap<u16, C::G>,
}
impl<C: Curve> MultisigView<C> {
impl<C: Curve> FrostView<C> {
pub fn group_key(&self) -> C::G {
self.group_key
}
@ -239,9 +134,9 @@ pub fn lagrange<F: PrimeField>(
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct MultisigKeys<C: Curve> {
/// Multisig Parameters
params: MultisigParams,
pub struct FrostKeys<C: Curve> {
/// FROST Parameters
params: FrostParams,
/// Secret share key
secret_share: C::F,
@ -254,21 +149,26 @@ pub struct MultisigKeys<C: Curve> {
offset: Option<C::F>,
}
impl<C: Curve> MultisigKeys<C> {
pub fn offset(&self, offset: C::F) -> MultisigKeys<C> {
impl<C: Curve> FrostKeys<C> {
/// Offset the keys by a given scalar to allow for account and privacy schemes
/// This offset is ephemeral and will not be included when these keys are serialized
/// Keys offset multiple times will form a new offset of their sum
/// Not IETF compliant
pub fn offset(&self, offset: C::F) -> FrostKeys<C> {
let mut res = self.clone();
// Carry any existing offset
// Enables schemes like Monero's subaddresses which have a per-subaddress offset and then a
// one-time-key offset
res.offset = Some(offset + res.offset.unwrap_or(C::F::zero()));
res.group_key += C::GENERATOR * offset;
res
}
pub fn params(&self) -> MultisigParams {
pub fn params(&self) -> FrostParams {
self.params
}
pub fn secret_share(&self) -> C::F {
fn secret_share(&self) -> C::F {
self.secret_share
}
@ -276,11 +176,11 @@ impl<C: Curve> MultisigKeys<C> {
self.group_key
}
pub fn verification_shares(&self) -> HashMap<u16, C::G> {
fn verification_shares(&self) -> HashMap<u16, C::G> {
self.verification_shares.clone()
}
pub fn view(&self, included: &[u16]) -> Result<MultisigView<C>, FrostError> {
pub fn view(&self, included: &[u16]) -> Result<FrostView<C>, FrostError> {
if (included.len() < self.params.t.into()) || (usize::from(self.params.n) < included.len()) {
Err(FrostError::InvalidSigningSet("invalid amount of participants included".to_string()))?;
}
@ -289,13 +189,13 @@ impl<C: Curve> MultisigKeys<C> {
let offset = self.offset.unwrap_or(C::F::zero());
let offset_share = offset * C::F::from(included.len().try_into().unwrap()).invert().unwrap();
Ok(MultisigView {
group_key: self.group_key + (C::generator_table() * offset),
Ok(FrostView {
group_key: self.group_key,
secret_share: secret_share + offset_share,
verification_shares: self.verification_shares.iter().map(
|(l, share)| (
*l,
(*share * lagrange::<C::F>(*l, &included)) + (C::generator_table() * offset_share)
(*share * lagrange::<C::F>(*l, &included)) + (C::GENERATOR * offset_share)
)
).collect(),
included: included.to_vec(),
@ -303,84 +203,76 @@ impl<C: Curve> MultisigKeys<C> {
}
pub fn serialized_len(n: u16) -> usize {
1 + usize::from(C::id_len()) + (3 * 2) + C::F_len() + C::G_len() + (usize::from(n) * C::G_len())
8 + C::ID.len() + (3 * 2) + F_len::<C>() + G_len::<C>() + (usize::from(n) * G_len::<C>())
}
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = Vec::with_capacity(
1 + usize::from(C::id_len()) + MultisigKeys::<C>::serialized_len(self.params.n)
);
serialized.push(C::id_len());
serialized.extend(C::id().as_bytes());
serialized.extend(&self.params.n.to_le_bytes());
serialized.extend(&self.params.t.to_le_bytes());
serialized.extend(&self.params.i.to_le_bytes());
serialized.extend(&C::F_to_bytes(&self.secret_share));
serialized.extend(&C::G_to_bytes(&self.group_key));
let mut serialized = Vec::with_capacity(FrostKeys::<C>::serialized_len(self.params.n));
serialized.extend(u64::try_from(C::ID.len()).unwrap().to_be_bytes());
serialized.extend(C::ID);
serialized.extend(&self.params.t.to_be_bytes());
serialized.extend(&self.params.n.to_be_bytes());
serialized.extend(&self.params.i.to_be_bytes());
serialized.extend(self.secret_share.to_repr().as_ref());
serialized.extend(self.group_key.to_bytes().as_ref());
for l in 1 ..= self.params.n.into() {
serialized.extend(&C::G_to_bytes(&self.verification_shares[&l]));
serialized.extend(self.verification_shares[&l].to_bytes().as_ref());
}
serialized
}
pub fn deserialize(serialized: &[u8]) -> Result<MultisigKeys<C>, FrostError> {
if serialized.len() < 1 {
Err(FrostError::InternalError("MultisigKeys serialization is empty".to_string()))?;
pub fn deserialize(serialized: &[u8]) -> Result<FrostKeys<C>, FrostError> {
let mut start = u64::try_from(C::ID.len()).unwrap().to_be_bytes().to_vec();
start.extend(C::ID);
let mut cursor = start.len();
if serialized.len() < (cursor + 4) {
Err(
FrostError::InternalError(
"FrostKeys serialization is missing its curve/participant quantities".to_string()
)
)?;
}
let id_len: usize = serialized[0].into();
let mut cursor = 1;
if serialized.len() < (cursor + id_len) {
Err(FrostError::InternalError("ID wasn't included".to_string()))?;
}
let id = &serialized[cursor .. (cursor + id_len)];
if C::id().as_bytes() != id {
if &start != &serialized[.. cursor] {
Err(
FrostError::InternalError(
"curve is distinct between serialization and deserialization".to_string()
)
)?;
}
cursor += id_len;
if serialized.len() < (cursor + 8) {
Err(FrostError::InternalError("participant quantity wasn't included".to_string()))?;
}
let n = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
let t = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
cursor += 2;
if serialized.len() != MultisigKeys::<C>::serialized_len(n) {
let n = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
cursor += 2;
if serialized.len() != FrostKeys::<C>::serialized_len(n) {
Err(FrostError::InternalError("incorrect serialization length".to_string()))?;
}
let t = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
cursor += 2;
let i = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
let i = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
cursor += 2;
let secret_share = C::F_from_slice(&serialized[cursor .. (cursor + C::F_len())])
let secret_share = F_from_slice::<C::F>(&serialized[cursor .. (cursor + F_len::<C>())])
.map_err(|_| FrostError::InternalError("invalid secret share".to_string()))?;
cursor += C::F_len();
let group_key = C::G_from_slice(&serialized[cursor .. (cursor + C::G_len())])
cursor += F_len::<C>();
let group_key = G_from_slice::<C::G>(&serialized[cursor .. (cursor + G_len::<C>())])
.map_err(|_| FrostError::InternalError("invalid group key".to_string()))?;
cursor += C::G_len();
cursor += G_len::<C>();
let mut verification_shares = HashMap::new();
for l in 1 ..= n {
verification_shares.insert(
l,
C::G_from_slice(&serialized[cursor .. (cursor + C::G_len())])
G_from_slice::<C::G>(&serialized[cursor .. (cursor + G_len::<C>())])
.map_err(|_| FrostError::InternalError("invalid verification share".to_string()))?
);
cursor += C::G_len();
cursor += G_len::<C>();
}
Ok(
MultisigKeys {
params: MultisigParams::new(t, n, i)
FrostKeys {
params: FrostParams::new(t, n, i)
.map_err(|_| FrostError::InternalError("invalid parameters".to_string()))?,
secret_share,
group_key,

View file

@ -1,10 +1,10 @@
use rand_core::{RngCore, CryptoRng};
use ff::Field;
use group::{ff::{Field, PrimeField}, GroupEncoding};
use multiexp::BatchVerifier;
use crate::Curve;
use crate::{Curve, F_len, G_len};
#[allow(non_snake_case)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
@ -15,9 +15,9 @@ pub struct SchnorrSignature<C: Curve> {
impl<C: Curve> SchnorrSignature<C> {
pub fn serialize(&self) -> Vec<u8> {
let mut res = Vec::with_capacity(C::G_len() + C::F_len());
res.extend(C::G_to_bytes(&self.R));
res.extend(C::F_to_bytes(&self.s));
let mut res = Vec::with_capacity(G_len::<C>() + F_len::<C>());
res.extend(self.R.to_bytes().as_ref());
res.extend(self.s.to_repr().as_ref());
res
}
}
@ -28,25 +28,26 @@ pub(crate) fn sign<C: Curve>(
challenge: C::F
) -> SchnorrSignature<C> {
SchnorrSignature {
R: C::generator_table() * nonce,
R: C::GENERATOR * nonce,
s: nonce + (private_key * challenge)
}
}
#[must_use]
pub(crate) fn verify<C: Curve>(
public_key: C::G,
challenge: C::F,
signature: &SchnorrSignature<C>
) -> bool {
(C::generator_table() * signature.s) == (signature.R + (public_key * challenge))
(C::GENERATOR * signature.s) == (signature.R + (public_key * challenge))
}
pub(crate) fn batch_verify<C: Curve, R: RngCore + CryptoRng>(
rng: &mut R,
triplets: &[(u16, C::G, C::F, SchnorrSignature<C>)]
) -> Result<(), u16> {
let mut values = [(C::F::one(), C::generator()); 3];
let mut batch = BatchVerifier::new(triplets.len(), C::little_endian());
let mut values = [(C::F::one(), C::GENERATOR); 3];
let mut batch = BatchVerifier::new(triplets.len());
for triple in triplets {
// s = r + ca
// sG == R + cA

View file

@ -1,33 +1,35 @@
use core::fmt;
use std::{rc::Rc, collections::HashMap};
use std::{sync::Arc, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use ff::Field;
use group::{ff::{Field, PrimeField}, Group, GroupEncoding};
use transcript::Transcript;
use dleq::{Generators, DLEqProof};
use crate::{
Curve,
curve::{Curve, F_len, G_len, F_from_slice, G_from_slice},
FrostError,
MultisigParams, MultisigKeys, MultisigView,
FrostParams, FrostKeys, FrostView,
algorithm::Algorithm,
validate_map
};
/// Pairing of an Algorithm with a MultisigKeys instance and this specific signing set
/// Pairing of an Algorithm with a FrostKeys instance and this specific signing set
#[derive(Clone)]
pub struct Params<C: Curve, A: Algorithm<C>> {
algorithm: A,
keys: Rc<MultisigKeys<C>>,
view: MultisigView<C>,
keys: Arc<FrostKeys<C>>,
view: FrostView<C>,
}
// Currently public to enable more complex operations as desired, yet solely used in testing
impl<C: Curve, A: Algorithm<C>> Params<C, A> {
pub fn new(
algorithm: A,
keys: Rc<MultisigKeys<C>>,
keys: Arc<FrostKeys<C>>,
included: &[u16],
) -> Result<Params<C, A>, FrostError> {
let mut included = included.to_vec();
@ -60,18 +62,22 @@ impl<C: Curve, A: Algorithm<C>> Params<C, A> {
Ok(Params { algorithm, view: keys.view(&included).unwrap(), keys })
}
pub fn multisig_params(&self) -> MultisigParams {
pub fn multisig_params(&self) -> FrostParams {
self.keys.params
}
pub fn view(&self) -> MultisigView<C> {
pub fn view(&self) -> FrostView<C> {
self.view.clone()
}
}
struct PreprocessPackage<C: Curve> {
nonces: [C::F; 2],
serialized: Vec<u8>,
fn nonce_transcript<T: Transcript>() -> T {
T::new(b"FROST_nonce_dleq")
}
pub(crate) struct PreprocessPackage<C: Curve> {
pub(crate) nonces: Vec<[C::F; 2]>,
pub(crate) serialized: Vec<u8>,
}
// This library unifies the preprocessing step with signing due to security concerns and to provide
@ -80,27 +86,53 @@ fn preprocess<R: RngCore + CryptoRng, C: Curve, A: Algorithm<C>>(
rng: &mut R,
params: &mut Params<C, A>,
) -> PreprocessPackage<C> {
let nonces = [C::F::random(&mut *rng), C::F::random(&mut *rng)];
let commitments = [C::generator_table() * nonces[0], C::generator_table() * nonces[1]];
let mut serialized = C::G_to_bytes(&commitments[0]);
serialized.extend(&C::G_to_bytes(&commitments[1]));
let mut serialized = Vec::with_capacity(2 * G_len::<C>());
let nonces = params.algorithm.nonces().iter().cloned().map(
|mut generators| {
let nonces = [
C::random_nonce(params.view().secret_share(), &mut *rng),
C::random_nonce(params.view().secret_share(), &mut *rng)
];
serialized.extend(
&params.algorithm.preprocess_addendum(
rng,
&params.view,
&nonces
)
);
let commit = |generator: C::G| {
let commitments = [generator * nonces[0], generator * nonces[1]];
[commitments[0].to_bytes().as_ref(), commitments[1].to_bytes().as_ref()].concat().to_vec()
};
let first = generators.remove(0);
serialized.extend(commit(first));
// Iterate over the rest
for generator in generators.iter() {
serialized.extend(commit(*generator));
// Provide a DLEq to verify these commitments are for the same nonce
// TODO: Provide a single DLEq. See https://github.com/serai-dex/serai/issues/34
for nonce in nonces {
DLEqProof::prove(
&mut *rng,
// Uses an independent transcript as each signer must do this now, yet we validate them
// sequentially by the global order. Avoids needing to clone the transcript around
&mut nonce_transcript::<A::Transcript>(),
Generators::new(first, *generator),
nonce
).serialize(&mut serialized).unwrap();
}
}
nonces
}
).collect::<Vec<_>>();
serialized.extend(&params.algorithm.preprocess_addendum(rng, &params.view));
PreprocessPackage { nonces, serialized }
}
#[allow(non_snake_case)]
struct Package<C: Curve> {
B: HashMap<u16, [C::G; 2]>,
B: HashMap<u16, Vec<Vec<[C::G; 2]>>>,
binding: C::F,
R: C::G,
Rs: Vec<Vec<C::G>>,
share: Vec<u8>
}
@ -126,7 +158,7 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
transcript.domain_separate(b"FROST");
// Include the offset, if one exists
if let Some(offset) = params.keys.offset {
transcript.append_message(b"offset", &C::F_to_bytes(&offset));
transcript.append_message(b"offset", offset.to_repr().as_ref());
}
}
@ -134,61 +166,98 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
let mut B = HashMap::<u16, _>::with_capacity(params.view.included.len());
// Get the binding factor
let nonces = params.algorithm.nonces();
let mut addendums = HashMap::new();
let binding = {
let transcript = params.algorithm.transcript();
// Parse the commitments
for l in &params.view.included {
transcript.append_message(b"participant", &l.to_be_bytes());
let serialized = commitments.remove(l).unwrap();
let commitments = commitments.remove(l).unwrap();
let mut read_commitment = |c, label| {
let commitment = &commitments[c .. (c + C::G_len())];
let commitment = &serialized[c .. (c + G_len::<C>())];
transcript.append_message(label, commitment);
C::G_from_slice(commitment).map_err(|_| FrostError::InvalidCommitment(*l))
G_from_slice::<C::G>(commitment).map_err(|_| FrostError::InvalidCommitment(*l))
};
// While this doesn't note which nonce/basepoint this is for, those are expected to be
// static. Beyond that, they're committed to in the DLEq proof transcripts, ensuring
// consistency. While this is suboptimal, it maintains IETF compliance, and Algorithm is
// documented accordingly
#[allow(non_snake_case)]
let mut read_D_E = || Ok(
[read_commitment(0, b"commitment_D")?, read_commitment(C::G_len(), b"commitment_E")?]
);
let mut read_D_E = |c| Ok([
read_commitment(c, b"commitment_D")?,
read_commitment(c + G_len::<C>(), b"commitment_E")?
]);
B.insert(*l, read_D_E()?);
addendums.insert(*l, commitments[(C::G_len() * 2) ..].to_vec());
let mut c = 0;
let mut commitments = Vec::with_capacity(nonces.len());
for (n, nonce_generators) in nonces.clone().iter_mut().enumerate() {
commitments.push(Vec::with_capacity(nonce_generators.len()));
let first = nonce_generators.remove(0);
commitments[n].push(read_D_E(c)?);
c += 2 * G_len::<C>();
let mut c = 2 * G_len::<C>();
for generator in nonce_generators {
commitments[n].push(read_D_E(c)?);
c += 2 * G_len::<C>();
for de in 0 .. 2 {
DLEqProof::deserialize(
&mut std::io::Cursor::new(&serialized[c .. (c + (2 * F_len::<C>()))])
).map_err(|_| FrostError::InvalidCommitment(*l))?.verify(
&mut nonce_transcript::<A::Transcript>(),
Generators::new(first, *generator),
(commitments[n][0][de], commitments[n][commitments[n].len() - 1][de])
).map_err(|_| FrostError::InvalidCommitment(*l))?;
c += 2 * F_len::<C>();
}
}
addendums.insert(*l, serialized[c ..].to_vec());
}
B.insert(*l, commitments);
}
// Append the message to the transcript
transcript.append_message(b"message", &C::hash_msg(&msg));
// Calculate the binding factor
C::hash_binding_factor(&transcript.challenge(b"binding"))
C::hash_binding_factor(transcript.challenge(b"binding").as_ref())
};
// Process the addendums
for l in &params.view.included {
params.algorithm.process_addendum(&params.view, *l, &B[l], &addendums[l])?;
params.algorithm.process_addendum(&params.view, *l, &addendums[l])?;
}
#[allow(non_snake_case)]
let R = {
B.values().map(|B| B[0]).sum::<C::G>() + (B.values().map(|B| B[1]).sum::<C::G>() * binding)
};
let share = C::F_to_bytes(
&params.algorithm.sign_share(
&params.view,
R,
binding,
our_preprocess.nonces[0] + (our_preprocess.nonces[1] * binding),
msg
)
);
let mut Rs = Vec::with_capacity(nonces.len());
for n in 0 .. nonces.len() {
Rs.push(vec![C::G::identity(); nonces[n].len()]);
#[allow(non_snake_case)]
for g in 0 .. nonces[n].len() {
Rs[n][g] = {
B.values().map(|B| B[n][g][0]).sum::<C::G>() +
(B.values().map(|B| B[n][g][1]).sum::<C::G>() * binding)
};
}
}
Ok((Package { B, binding, R, share: share.clone() }, share))
let share = params.algorithm.sign_share(
&params.view,
&Rs,
&our_preprocess.nonces.iter().map(
|nonces| nonces[0] + (nonces[1] * binding)
).collect::<Vec<_>>(),
msg
).to_repr().as_ref().to_vec();
Ok((Package { B, binding, Rs, share: share.clone() }, share))
}
// This doesn't check the signing set is as expected and unexpected changes can cause false blames
// if legitimate participants are still using the original, expected, signing set. This library
// could be made more robust in that regard
fn complete<C: Curve, A: Algorithm<C>>(
sign_params: &Params<C, A>,
sign: Package<C>,
@ -200,7 +269,7 @@ fn complete<C: Curve, A: Algorithm<C>>(
let mut responses = HashMap::new();
let mut sum = C::F::zero();
for l in &sign_params.view.included {
let part = C::F_from_slice(&shares[l]).map_err(|_| FrostError::InvalidShare(*l))?;
let part = F_from_slice::<C::F>(&shares[l]).map_err(|_| FrostError::InvalidShare(*l))?;
sum += part;
responses.insert(*l, part);
}
@ -208,7 +277,7 @@ fn complete<C: Curve, A: Algorithm<C>>(
// Perform signature validation instead of individual share validation
// For the success route, which should be much more frequent, this should be faster
// It also acts as an integrity check of this library's signing function
let res = sign_params.algorithm.verify(sign_params.view.group_key, sign.R, sum);
let res = sign_params.algorithm.verify(sign_params.view.group_key, &sign.Rs, sum);
if let Some(res) = res {
return Ok(res);
}
@ -219,7 +288,11 @@ fn complete<C: Curve, A: Algorithm<C>>(
if !sign_params.algorithm.verify_share(
*l,
sign_params.view.verification_share(*l),
sign.B[l][0] + (sign.B[l][1] * sign.binding),
&sign.B[l].iter().map(
|nonces| nonces.iter().map(
|commitments| commitments[0] + (commitments[1] * sign.binding)
).collect()
).collect::<Vec<_>>(),
responses[l]
) {
Err(FrostError::InvalidShare(*l))?;
@ -234,31 +307,21 @@ fn complete<C: Curve, A: Algorithm<C>>(
)
}
/// State of a Sign machine
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum State {
Fresh,
Preprocessed,
Signed,
Complete,
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
pub trait StateMachine {
pub trait PreprocessMachine {
type Signature: Clone + PartialEq + fmt::Debug;
type SignMachine: SignMachine<Self::Signature>;
/// Perform the preprocessing round required in order to sign
/// Returns a byte vector which must be transmitted to all parties selected for this signing
/// process, over an authenticated channel
fn preprocess<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R
) -> Result<Vec<u8>, FrostError>;
) -> (Self::SignMachine, Vec<u8>);
}
pub trait SignMachine<S> {
type SignatureMachine: SignatureMachine<S>;
/// Sign a message
/// Takes in the participant's commitments, which are expected to be in a Vec where participant
@ -266,107 +329,88 @@ pub trait StateMachine {
/// index i which is locally handled. Returns a byte vector representing a share of the signature
/// for every other participant to receive, over an authenticated channel
fn sign(
&mut self,
self,
commitments: HashMap<u16, Vec<u8>>,
msg: &[u8],
) -> Result<Vec<u8>, FrostError>;
) -> Result<(Self::SignatureMachine, Vec<u8>), FrostError>;
}
pub trait SignatureMachine<S> {
/// Complete signing
/// Takes in everyone elses' shares submitted to us as a Vec, expecting participant index =
/// Vec index with None at index 0 and index i. Returns a byte vector representing the serialized
/// signature
fn complete(&mut self, shares: HashMap<u16, Vec<u8>>) -> Result<Self::Signature, FrostError>;
fn multisig_params(&self) -> MultisigParams;
fn state(&self) -> State;
fn complete(self, shares: HashMap<u16, Vec<u8>>) -> Result<S, FrostError>;
}
/// State machine which manages signing for an arbitrary signature algorithm
#[allow(non_snake_case)]
pub struct AlgorithmMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>
}
pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
state: State,
preprocess: Option<PreprocessPackage<C>>,
sign: Option<Package<C>>,
preprocess: PreprocessPackage<C>,
}
pub struct AlgorithmSignatureMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
sign: Package<C>,
}
impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
/// Creates a new machine to generate a key for the specified curve in the specified multisig
pub fn new(
algorithm: A,
keys: Rc<MultisigKeys<C>>,
keys: Arc<FrostKeys<C>>,
included: &[u16],
) -> Result<AlgorithmMachine<C, A>, FrostError> {
Ok(
AlgorithmMachine {
params: Params::new(algorithm, keys, included)?,
state: State::Fresh,
preprocess: None,
sign: None,
}
)
Ok(AlgorithmMachine { params: Params::new(algorithm, keys, included)? })
}
pub(crate) fn unsafe_override_preprocess(
self,
preprocess: PreprocessPackage<C>
) -> (AlgorithmSignMachine<C, A>, Vec<u8>) {
let serialized = preprocess.serialized.clone();
(AlgorithmSignMachine { params: self.params, preprocess }, serialized)
}
}
impl<C: Curve, A: Algorithm<C>> StateMachine for AlgorithmMachine<C, A> {
impl<C: Curve, A: Algorithm<C>> PreprocessMachine for AlgorithmMachine<C, A> {
type Signature = A::Signature;
type SignMachine = AlgorithmSignMachine<C, A>;
fn preprocess<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R
) -> Result<Vec<u8>, FrostError> {
if self.state != State::Fresh {
Err(FrostError::InvalidSignTransition(State::Fresh, self.state))?;
}
let preprocess = preprocess::<R, C, A>(rng, &mut self.params);
) -> (Self::SignMachine, Vec<u8>) {
let mut params = self.params;
let preprocess = preprocess::<R, C, A>(rng, &mut params);
let serialized = preprocess.serialized.clone();
self.preprocess = Some(preprocess);
self.state = State::Preprocessed;
Ok(serialized)
}
fn sign(
&mut self,
commitments: HashMap<u16, Vec<u8>>,
msg: &[u8],
) -> Result<Vec<u8>, FrostError> {
if self.state != State::Preprocessed {
Err(FrostError::InvalidSignTransition(State::Preprocessed, self.state))?;
}
let (sign, serialized) = sign_with_share(
&mut self.params,
self.preprocess.take().unwrap(),
commitments,
msg,
)?;
self.sign = Some(sign);
self.state = State::Signed;
Ok(serialized)
}
fn complete(&mut self, shares: HashMap<u16, Vec<u8>>) -> Result<A::Signature, FrostError> {
if self.state != State::Signed {
Err(FrostError::InvalidSignTransition(State::Signed, self.state))?;
}
let signature = complete(
&self.params,
self.sign.take().unwrap(),
shares,
)?;
self.state = State::Complete;
Ok(signature)
}
fn multisig_params(&self) -> MultisigParams {
self.params.multisig_params().clone()
}
fn state(&self) -> State {
self.state
(AlgorithmSignMachine { params, preprocess }, serialized)
}
}
impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachine<C, A> {
type SignatureMachine = AlgorithmSignatureMachine<C, A>;
fn sign(
self,
commitments: HashMap<u16, Vec<u8>>,
msg: &[u8]
) -> Result<(Self::SignatureMachine, Vec<u8>), FrostError> {
let mut params = self.params;
let (sign, serialized) = sign_with_share(&mut params, self.preprocess, commitments, msg)?;
Ok((AlgorithmSignatureMachine { params, sign }, serialized))
}
}
impl<
C: Curve,
A: Algorithm<C>
> SignatureMachine<A::Signature> for AlgorithmSignatureMachine<C, A> {
fn complete(self, shares: HashMap<u16, Vec<u8>>) -> Result<A::Signature, FrostError> {
complete(&self.params, self.sign, shares)
}
}

View file

@ -1,9 +1,8 @@
use rand_core::{RngCore, CryptoRng};
use crate::{
Curve, MultisigKeys,
tests::{schnorr::{sign, verify, batch_verify}, key_gen}
};
use group::{ff::Field, Group};
use crate::{Curve, FrostKeys, tests::key_gen};
// Test generation of FROST keys
fn key_generation<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
@ -14,21 +13,30 @@ fn key_generation<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Test serialization of generated keys
fn keys_serialization<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
for (_, keys) in key_gen::<_, C>(rng) {
assert_eq!(&MultisigKeys::<C>::deserialize(&keys.serialize()).unwrap(), &*keys);
assert_eq!(&FrostKeys::<C>::deserialize(&keys.serialize()).unwrap(), &*keys);
}
}
pub fn test_curve<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// TODO: Test the Curve functions themselves
// Test Schnorr signatures work as expected
// This is a bit unnecessary, as they should for any valid curve, yet this provides tests with
// meaning, which the above tests won't have
sign::<_, C>(rng);
verify::<_, C>(rng);
batch_verify::<_, C>(rng);
// Test successful multiexp, with enough pairs to trigger its variety of algorithms
// Multiexp has its own tests, yet only against k256 and Ed25519 (which should be sufficient
// as-is to prove multiexp), and this doesn't hurt
{
let mut pairs = Vec::with_capacity(1000);
let mut sum = C::G::identity();
for _ in 0 .. 10 {
for _ in 0 .. 100 {
pairs.push((C::F::random(&mut *rng), C::GENERATOR * C::F::random(&mut *rng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
assert_eq!(multiexp::multiexp(&pairs), sum);
assert_eq!(multiexp::multiexp_vartime(&pairs), sum);
}
}
// Test FROST key generation and serialization of MultisigKeys works as expected
// Test FROST key generation and serialization of FrostKeys works as expected
key_generation::<_, C>(rng);
keys_serialization::<_, C>(rng);
}

View file

@ -0,0 +1,77 @@
use rand::rngs::OsRng;
use crate::{curve, tests::vectors::{Vectors, test_with_vectors}};
#[cfg(any(test, feature = "ristretto"))]
#[test]
fn ristretto_vectors() {
test_with_vectors::<_, curve::Ristretto, curve::IetfRistrettoHram>(
&mut OsRng,
Vectors {
threshold: 2,
shares: &[
"5c3430d391552f6e60ecdc093ff9f6f4488756aa6cebdbad75a768010b8f830e",
"b06fc5eac20b4f6e1b271d9df2343d843e1e1fb03c4cbb673f2872d459ce6f01",
"f17e505f0e2581c6acfe54d3846a622834b5e7b50cad9a2109a97ba7a80d5c04"
],
group_secret: "1b25a55e463cfd15cf14a5d3acc3d15053f08da49c8afcf3ab265f2ebc4f970b",
group_key: "e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f57",
msg: "74657374",
included: &[1, 3],
nonces: &[
[
"b358743151e33d84bf00c12f71808f4103957c3e2cabab7b895c436b5e70f90c",
"7bd112153b9ae1ab9b31f5e78f61f5c4ca9ee67b7ea6d1181799c409d14c350c"
],
[
"22acad88478e0d0373a991092a322ebd1b9a2dad90451a976d0db3215426af0e",
"9155e3d7bcf7cd468b980c7e20b2c77cbdfbe33a1dcae031fd8bc6b1403f4b04"
]
],
sig_shares: &[
"ff801b4e0839faa67f16dee4127b9f7fbcf5fd007900257b0e2bbc02cbe5e709",
"afdf5481023c855bf3411a5c8a5fafa92357296a078c3b80dc168f294cb4f504"
],
sig: "deae61af10e8ee48ba492573592fba547f5debeff6bd6e2024e8673584746f5e".to_owned() +
"ae6070cf0a757f027358f8409dda4e29e04c276b808c60fbea414b2c179add0e"
}
);
}
#[cfg(feature = "ed25519")]
#[test]
fn ed25519_vectors() {
test_with_vectors::<_, curve::Ed25519, curve::IetfEd25519Hram>(
&mut OsRng,
Vectors {
threshold: 2,
shares: &[
"929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509",
"a91e66e012e4364ac9aaa405fcafd370402d9859f7b6685c07eed76bf409e80d",
"d3cb090a075eb154e82fdb4b3cb507f110040905468bb9c46da8bdea643a9a02"
],
group_secret: "7b1c33d3f5291d85de664833beb1ad469f7fb6025a0ec78b3a790c6e13a98304",
group_key: "15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673",
msg: "74657374",
included: &[1, 3],
nonces: &[
[
"8c76af04340e83bb5fc427c117d38347fc8ef86d5397feea9aa6412d96c05b0a",
"14a37ddbeae8d9e9687369e5eb3c6d54f03dc19d76bb54fb5425131bc37a600b"
],
[
"5ca39ebab6874f5e7b5089f3521819a2aa1e2cf738bae6974ee80555de2ef70e",
"0afe3650c4815ff37becd3c6948066e906e929ea9b8f546c74e10002dbcc150c"
]
],
sig_shares: &[
"4369474a398aa10357b60d683da91ea6a767dcf53fd541a8ed6b4d780827ea0a",
"32fcc690d926075e45d2dfb746bab71447943cddbefe80d122c39174aa2e1004"
],
sig: "2b8d9c6995333c5990e3a3dd6568785539d3322f7f0376452487ea35cfda587b".to_owned() +
"75650edb12b1a8619c88ed1f8463d6baeefb18d3fed3c279102fdfecb255fa0e"
}
);
}

View file

@ -0,0 +1,55 @@
use rand::rngs::OsRng;
#[cfg(feature = "secp256k1")]
use crate::tests::{curve::test_curve, schnorr::test_schnorr};
#[cfg(feature = "secp256k1")]
use crate::curve::Secp256k1;
#[cfg(feature = "p256")]
use crate::tests::vectors::{Vectors, test_with_vectors};
#[cfg(feature = "p256")]
use crate::curve::{P256, IetfP256Hram};
#[cfg(feature = "secp256k1")]
#[test]
fn secp256k1_non_ietf() {
test_curve::<_, Secp256k1>(&mut OsRng);
test_schnorr::<_, Secp256k1>(&mut OsRng);
}
#[cfg(feature = "p256")]
#[test]
fn p256_vectors() {
test_with_vectors::<_, P256, IetfP256Hram>(
&mut OsRng,
Vectors {
threshold: 2,
shares: &[
"0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731",
"8d8e787bef0ff6c2f494ca45f4dad198c6bee01212d6c84067159c52e1863ad5",
"0e80d6e8f6192c003b5488ce1eec8f5429587d48cf001541e713b2d53c09d928"
],
group_secret: "8ba9bba2e0fd8c4767154d35a0b7562244a4aaf6f36c8fb8735fa48b301bd8de",
group_key: "023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70",
msg: "74657374",
included: &[1, 3],
nonces: &[
[
"081617b24375e069b39f649d4c4ce2fba6e38b73e7c16759de0b6079a22c4c7e",
"4de5fb77d99f03a2491a83a6a4cb91ca3c82a3f34ce94cec939174f47c9f95dd"
],
[
"d186ea92593f83ea83181b184d41aa93493301ac2bc5b4b1767e94d2db943e38",
"486e2ee25a3fbc8e6399d748b077a2755fde99fa85cc24fa647ea4ebf5811a15"
]
],
sig_shares: &[
"9e4d8865faf8c7b3193a3b35eda3d9e12118447114b1e7d5b4809ea28067f8a9",
"b7d094eab6305ae74daeed1acd31abba9ab81f638d38b72c132cb25a5dfae1fc"
],
sig: "0342c14c77f9d4ef9b8bd64fb0d7bbfdb9f8216a44e5f7bbe6ac0f3ed5e1a57367".to_owned() +
"561e1d51b129229966e92850bad5859bfee96926fad3007cd3f38639e1ffb554"
}
);
}

View file

@ -1,2 +1,4 @@
mod secp256k1;
mod schnorr;
#[cfg(any(test, feature = "dalek"))]
mod dalek;
#[cfg(feature = "kp256")]
mod kp256;

View file

@ -1,42 +0,0 @@
use std::rc::Rc;
use rand::rngs::OsRng;
use crate::{
Curve, schnorr, algorithm::{Hram, Schnorr},
tests::{key_gen, algorithm_machines, sign as sign_test, literal::secp256k1::{Secp256k1, TestHram}}
};
const MESSAGE: &[u8] = b"Hello World";
#[test]
fn sign() {
sign_test(
&mut OsRng,
algorithm_machines(
&mut OsRng,
Schnorr::<Secp256k1, TestHram>::new(),
&key_gen::<_, Secp256k1>(&mut OsRng)
),
MESSAGE
);
}
#[test]
fn sign_with_offset() {
let mut keys = key_gen::<_, Secp256k1>(&mut OsRng);
let group_key = keys[&1].group_key();
let offset = Secp256k1::hash_to_F(b"offset");
for i in 1 ..= u16::try_from(keys.len()).unwrap() {
keys.insert(i, Rc::new(keys[&i].offset(offset)));
}
let offset_key = group_key + (Secp256k1::generator_table() * offset);
let sig = sign_test(
&mut OsRng,
algorithm_machines(&mut OsRng, Schnorr::<Secp256k1, TestHram>::new(), &keys),
MESSAGE
);
assert!(schnorr::verify(offset_key, TestHram::hram(&sig.R, &offset_key, MESSAGE), &sig));
}

View file

@ -1,120 +0,0 @@
use core::convert::TryInto;
use rand::rngs::OsRng;
use ff::PrimeField;
use group::GroupEncoding;
use sha2::{Digest, Sha256, Sha512};
use k256::{
elliptic_curve::{generic_array::GenericArray, bigint::{ArrayEncoding, U512}, ops::Reduce},
Scalar,
ProjectivePoint
};
use crate::{CurveError, Curve, algorithm::Hram, tests::curve::test_curve};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Secp256k1;
impl Curve for Secp256k1 {
type F = Scalar;
type G = ProjectivePoint;
type T = ProjectivePoint;
fn id() -> String {
"secp256k1".to_string()
}
fn id_len() -> u8 {
u8::try_from(Self::id().len()).unwrap()
}
fn generator() -> Self::G {
Self::G::GENERATOR
}
fn generator_table() -> Self::T {
Self::G::GENERATOR
}
fn little_endian() -> bool {
false
}
// The IETF draft doesn't specify a secp256k1 ciphersuite
// This test just uses the simplest ciphersuite which would still be viable to deploy
// The comparable P-256 curve uses hash_to_field from the Hash To Curve IETF draft with a context
// string and further DST for H1 ("rho") and H3 ("digest"). It's not currently worth it to add
// that weight, yet if secp256k1 is ever officially acknowledged (not just a testing curve), it
// must be properly implemented.
fn hash_msg(msg: &[u8]) -> Vec<u8> {
(&Sha256::digest(msg)).to_vec()
}
fn hash_binding_factor(binding: &[u8]) -> Self::F {
Self::hash_to_F(&[b"rho", binding].concat())
}
// Use wide reduction for security
fn hash_to_F(data: &[u8]) -> Self::F {
Scalar::from_uint_reduced(U512::from_be_byte_array(Sha512::digest(data)))
}
fn F_len() -> usize {
32
}
fn G_len() -> usize {
33
}
fn F_from_slice(slice: &[u8]) -> Result<Self::F, CurveError> {
let bytes: [u8; 32] = slice.try_into()
.map_err(|_| CurveError::InvalidLength(32, slice.len()))?;
let scalar = Scalar::from_repr(bytes.into());
if scalar.is_none().unwrap_u8() == 1 {
Err(CurveError::InvalidScalar)?;
}
Ok(scalar.unwrap())
}
fn G_from_slice(slice: &[u8]) -> Result<Self::G, CurveError> {
let point = ProjectivePoint::from_bytes(GenericArray::from_slice(slice));
if point.is_none().unwrap_u8() == 1 {
Err(CurveError::InvalidScalar)?;
}
Ok(point.unwrap())
}
fn F_to_bytes(f: &Self::F) -> Vec<u8> {
(&f.to_bytes()).to_vec()
}
fn G_to_bytes(g: &Self::G) -> Vec<u8> {
(&g.to_bytes()).to_vec()
}
}
#[allow(non_snake_case)]
#[derive(Clone)]
pub struct TestHram {}
impl Hram<Secp256k1> for TestHram {
#[allow(non_snake_case)]
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
Scalar::from_uint_reduced(
U512::from_be_byte_array(
Sha512::new()
.chain_update(Secp256k1::G_to_bytes(R))
.chain_update(Secp256k1::G_to_bytes(A))
.chain_update(m)
.finalize()
)
)
}
}
#[test]
fn secp256k1_curve() {
test_curve::<_, Secp256k1>(&mut OsRng);
}

View file

@ -1,23 +1,22 @@
use std::{rc::Rc, collections::HashMap};
use std::{sync::Arc, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use ff::Field;
use group::ff::Field;
use crate::{
Curve,
MultisigParams, MultisigKeys,
FrostParams, FrostKeys,
lagrange,
key_gen,
key_gen::KeyGenMachine,
algorithm::Algorithm,
sign::{StateMachine, AlgorithmMachine}
sign::{PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine}
};
// Internal tests
mod schnorr;
// Test suites for public usage
pub mod curve;
pub mod schnorr;
pub mod vectors;
// Literal test definitions to run during `cargo test`
#[cfg(test)]
@ -37,50 +36,37 @@ pub fn clone_without<K: Clone + std::cmp::Eq + std::hash::Hash, V: Clone>(
pub fn key_gen<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R
) -> HashMap<u16, Rc<MultisigKeys<C>>> {
let mut params = HashMap::new();
) -> HashMap<u16, Arc<FrostKeys<C>>> {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
for i in 1 ..= PARTICIPANTS {
params.insert(
i,
MultisigParams::new(
THRESHOLD,
PARTICIPANTS,
i
).unwrap()
);
machines.insert(
i,
key_gen::StateMachine::<C>::new(
params[&i],
"FROST test key_gen".to_string()
)
);
commitments.insert(
i,
machines.get_mut(&i).unwrap().generate_coefficients(rng).unwrap()
let machine = KeyGenMachine::<C>::new(
FrostParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(),
"FROST Test key_gen".to_string()
);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(i, these_commitments);
}
let mut secret_shares = HashMap::new();
for (l, machine) in machines.iter_mut() {
secret_shares.insert(
*l,
let mut machines = machines.drain().map(|(l, machine)| {
let (machine, shares) = machine.generate_secret_shares(
rng,
// clone_without isn't necessary, as this machine's own data will be inserted without
// conflict, yet using it ensures the machine's own data is actually inserted as expected
machine.generate_secret_shares(rng, clone_without(&commitments, l)).unwrap()
);
}
clone_without(&commitments, &l)
).unwrap();
secret_shares.insert(l, shares);
(l, machine)
}).collect::<HashMap<_, _>>();
let mut verification_shares = None;
let mut group_key = None;
let mut keys = HashMap::new();
for (i, machine) in machines.iter_mut() {
machines.drain().map(|(i, machine)| {
let mut our_secret_shares = HashMap::new();
for (l, shares) in &secret_shares {
if i == l {
if i == *l {
continue;
}
our_secret_shares.insert(*l, shares[&i].clone());
@ -99,13 +85,11 @@ pub fn key_gen<R: RngCore + CryptoRng, C: Curve>(
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
keys.insert(*i, Rc::new(these_keys));
}
keys
(i, Arc::new(these_keys))
}).collect::<HashMap<_, _>>()
}
pub fn recover<C: Curve>(keys: &HashMap<u16, MultisigKeys<C>>) -> C::F {
pub fn recover<C: Curve>(keys: &HashMap<u16, FrostKeys<C>>) -> C::F {
let first = keys.values().next().expect("no keys provided");
assert!(keys.len() >= first.params().t().into(), "not enough keys provided");
let included = keys.keys().cloned().collect::<Vec<_>>();
@ -114,14 +98,14 @@ pub fn recover<C: Curve>(keys: &HashMap<u16, MultisigKeys<C>>) -> C::F {
C::F::zero(),
|accum, (i, keys)| accum + (keys.secret_share() * lagrange::<C::F>(*i, &included))
);
assert_eq!(C::generator_table() * group_private, first.group_key(), "failed to recover keys");
assert_eq!(C::GENERATOR * group_private, first.group_key(), "failed to recover keys");
group_private
}
pub fn algorithm_machines<R: RngCore, C: Curve, A: Algorithm<C>>(
rng: &mut R,
algorithm: A,
keys: &HashMap<u16, Rc<MultisigKeys<C>>>,
keys: &HashMap<u16, Arc<FrostKeys<C>>>,
) -> HashMap<u16, AlgorithmMachine<C, A>> {
let mut included = vec![];
while included.len() < usize::from(keys[&1].params().t()) {
@ -148,27 +132,28 @@ pub fn algorithm_machines<R: RngCore, C: Curve, A: Algorithm<C>>(
).collect()
}
pub fn sign<R: RngCore + CryptoRng, M: StateMachine>(
pub fn sign<R: RngCore + CryptoRng, M: PreprocessMachine>(
rng: &mut R,
mut machines: HashMap<u16, M>,
msg: &[u8]
) -> M::Signature {
let mut commitments = HashMap::new();
for (i, machine) in machines.iter_mut() {
commitments.insert(*i, machine.preprocess(rng).unwrap());
}
let mut machines = machines.drain().map(|(i, machine)| {
let (machine, preprocess) = machine.preprocess(rng);
commitments.insert(i, preprocess);
(i, machine)
}).collect::<HashMap<_, _>>();
let mut shares = HashMap::new();
for (i, machine) in machines.iter_mut() {
shares.insert(
*i,
machine.sign(clone_without(&commitments, i), msg).unwrap()
);
}
let mut machines = machines.drain().map(|(i, machine)| {
let (machine, share) = machine.sign(clone_without(&commitments, &i), msg).unwrap();
shares.insert(i, share);
(i, machine)
}).collect::<HashMap<_, _>>();
let mut signature = None;
for (i, machine) in machines.iter_mut() {
let sig = machine.complete(clone_without(&shares, i)).unwrap();
for (i, machine) in machines.drain() {
let sig = machine.complete(clone_without(&shares, &i)).unwrap();
if signature.is_none() {
signature = Some(sig.clone());
}

View file

@ -1,16 +1,21 @@
use std::{marker::PhantomData, sync::Arc, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use ff::Field;
use group::{ff::Field, GroupEncoding};
use crate::{Curve, schnorr, algorithm::SchnorrSignature};
use crate::{
Curve, FrostKeys, schnorr::{self, SchnorrSignature}, algorithm::{Hram, Schnorr},
tests::{key_gen, algorithm_machines, sign as sign_test}
};
pub(crate) fn sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
pub(crate) fn core_sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let private_key = C::F::random(&mut *rng);
let nonce = C::F::random(&mut *rng);
let challenge = C::F::random(rng); // Doesn't bother to craft an HRAM
assert!(
schnorr::verify::<C>(
C::generator_table() * private_key,
C::GENERATOR * private_key,
challenge,
&schnorr::sign(private_key, nonce, challenge)
)
@ -20,17 +25,17 @@ pub(crate) fn sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// The above sign function verifies signing works
// This verifies invalid signatures don't pass, using zero signatures, which should effectively be
// random
pub(crate) fn verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
pub(crate) fn core_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
assert!(
!schnorr::verify::<C>(
C::generator_table() * C::F::random(&mut *rng),
C::GENERATOR * C::F::random(&mut *rng),
C::F::random(rng),
&SchnorrSignature { R: C::generator_table() * C::F::zero(), s: C::F::zero() }
&SchnorrSignature { R: C::GENERATOR * C::F::zero(), s: C::F::zero() }
)
);
}
pub(crate) fn batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
pub(crate) fn core_batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Create 5 signatures
let mut keys = vec![];
let mut challenges = vec![];
@ -43,7 +48,7 @@ pub(crate) fn batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Batch verify
let triplets = (0 .. 5).map(
|i| (u16::try_from(i + 1).unwrap(), C::generator_table() * keys[i], challenges[i], sigs[i])
|i| (u16::try_from(i + 1).unwrap(), C::GENERATOR * keys[i], challenges[i], sigs[i])
).collect::<Vec<_>>();
schnorr::batch_verify(rng, &triplets).unwrap();
@ -71,3 +76,56 @@ pub(crate) fn batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
}
}
}
fn sign_core<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
group_key: C::G,
keys: &HashMap<u16, Arc<FrostKeys<C>>>
) {
const MESSAGE: &'static [u8] = b"Hello, World!";
let machines = algorithm_machines(rng, Schnorr::<C, TestHram<C>>::new(), keys);
let sig = sign_test(&mut *rng, machines, MESSAGE);
assert!(schnorr::verify(group_key, TestHram::<C>::hram(&sig.R, &group_key, MESSAGE), &sig));
}
#[derive(Clone)]
pub struct TestHram<C: Curve> {
_curve: PhantomData<C>
}
impl<C: Curve> Hram<C> for TestHram<C> {
#[allow(non_snake_case)]
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F {
C::hash_to_F(b"challenge", &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat())
}
}
fn sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<_, C>(&mut *rng);
sign_core(rng, keys[&1].group_key(), &keys);
}
fn sign_with_offset<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let mut keys = key_gen::<_, C>(&mut *rng);
let group_key = keys[&1].group_key();
let offset = C::hash_to_F(b"FROST Test sign_with_offset", b"offset");
for i in 1 ..= u16::try_from(keys.len()).unwrap() {
keys.insert(i, Arc::new(keys[&i].offset(offset)));
}
let offset_key = group_key + (C::GENERATOR * offset);
sign_core(rng, offset_key, &keys);
}
pub fn test_schnorr<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Test Schnorr signatures work as expected
// This is a bit unnecessary, as they should for any valid curve, yet this establishes sanity
core_sign::<_, C>(rng);
core_verify::<_, C>(rng);
core_batch_verify::<_, C>(rng);
// Test Schnorr signatures under FROST
sign::<_, C>(rng);
sign_with_offset::<_, C>(rng);
}

View file

@ -0,0 +1,136 @@
use std::{sync::Arc, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use group::{ff::PrimeField, GroupEncoding};
use crate::{
curve::{Curve, F_from_slice, G_from_slice}, FrostKeys,
algorithm::{Schnorr, Hram},
sign::{PreprocessPackage, SignMachine, SignatureMachine, AlgorithmMachine},
tests::{curve::test_curve, schnorr::test_schnorr, recover}
};
pub struct Vectors {
pub threshold: u16,
pub shares: &'static [&'static str],
pub group_secret: &'static str,
pub group_key: &'static str,
pub msg: &'static str,
pub included: &'static [u16],
pub nonces: &'static [[&'static str; 2]],
pub sig_shares: &'static [&'static str],
pub sig: String
}
// Load these vectors into FrostKeys using a custom serialization it'll deserialize
fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<u16, FrostKeys<C>> {
let shares = vectors.shares.iter().map(
|secret| F_from_slice::<C::F>(&hex::decode(secret).unwrap()).unwrap()
).collect::<Vec<_>>();
let verification_shares = shares.iter().map(
|secret| C::GENERATOR * secret
).collect::<Vec<_>>();
let mut keys = HashMap::new();
for i in 1 ..= u16::try_from(shares.len()).unwrap() {
let mut serialized = vec![];
serialized.extend(u64::try_from(C::ID.len()).unwrap().to_be_bytes());
serialized.extend(C::ID);
serialized.extend(vectors.threshold.to_be_bytes());
serialized.extend(u16::try_from(shares.len()).unwrap().to_be_bytes());
serialized.extend(i.to_be_bytes());
serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref());
serialized.extend(&hex::decode(vectors.group_key).unwrap());
for share in &verification_shares {
serialized.extend(share.to_bytes().as_ref());
}
let these_keys = FrostKeys::<C>::deserialize(&serialized).unwrap();
assert_eq!(these_keys.params().t(), vectors.threshold);
assert_eq!(usize::from(these_keys.params().n()), shares.len());
assert_eq!(these_keys.params().i(), i);
assert_eq!(these_keys.secret_share(), shares[usize::from(i - 1)]);
assert_eq!(&hex::encode(these_keys.group_key().to_bytes().as_ref()), vectors.group_key);
keys.insert(i, these_keys);
}
keys
}
pub fn test_with_vectors<
R: RngCore + CryptoRng,
C: Curve,
H: Hram<C>
>(rng: &mut R, vectors: Vectors) {
// Do basic tests before trying the vectors
test_curve::<_, C>(&mut *rng);
test_schnorr::<_, C>(rng);
// Test against the vectors
let keys = vectors_to_multisig_keys::<C>(&vectors);
let group_key = G_from_slice::<C::G>(&hex::decode(vectors.group_key).unwrap()).unwrap();
assert_eq!(
C::GENERATOR * F_from_slice::<C::F>(&hex::decode(vectors.group_secret).unwrap()).unwrap(),
group_key
);
assert_eq!(
recover(&keys),
F_from_slice::<C::F>(&hex::decode(vectors.group_secret).unwrap()).unwrap()
);
let mut machines = vec![];
for i in vectors.included {
machines.push((
*i,
AlgorithmMachine::new(
Schnorr::<C, H>::new(),
Arc::new(keys[i].clone()),
vectors.included.clone()
).unwrap()
));
}
let mut commitments = HashMap::new();
let mut c = 0;
let mut machines = machines.drain(..).map(|(i, machine)| {
let nonces = [
F_from_slice::<C::F>(&hex::decode(vectors.nonces[c][0]).unwrap()).unwrap(),
F_from_slice::<C::F>(&hex::decode(vectors.nonces[c][1]).unwrap()).unwrap()
];
c += 1;
let mut serialized = (C::GENERATOR * nonces[0]).to_bytes().as_ref().to_vec();
serialized.extend((C::GENERATOR * nonces[1]).to_bytes().as_ref());
let (machine, serialized) = machine.unsafe_override_preprocess(
PreprocessPackage { nonces: vec![nonces], serialized: serialized.clone() }
);
commitments.insert(i, serialized);
(i, machine)
}).collect::<Vec<_>>();
let mut shares = HashMap::new();
c = 0;
let mut machines = machines.drain(..).map(|(i, machine)| {
let (machine, share) = machine.sign(
commitments.clone(),
&hex::decode(vectors.msg).unwrap()
).unwrap();
assert_eq!(share, hex::decode(vectors.sig_shares[c]).unwrap());
c += 1;
shares.insert(i, share);
(i, machine)
}).collect::<HashMap<_, _>>();
for (_, machine) in machines.drain() {
let sig = machine.complete(shares.clone()).unwrap();
let mut serialized = sig.R.to_bytes().as_ref().to_vec();
serialized.extend(sig.s.to_repr().as_ref());
assert_eq!(hex::encode(serialized), vectors.sig);
}
}

View file

@ -3,13 +3,22 @@ name = "multiexp"
version = "0.1.0"
description = "Multiexponentation algorithms for ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["multiexp", "ff", "group"]
edition = "2021"
[dependencies]
group = "0.11"
ff = "0.12"
group = "0.12"
rand_core = { version = "0.6", optional = true }
[dev-dependencies]
rand_core = "0.6"
k256 = { version = "0.11", features = ["bits"] }
dalek-ff-group = { path = "../dalek-ff-group" }
[features]
batch = ["rand_core"]

View file

@ -0,0 +1,6 @@
# Multiexp
A multiexp implementation for ff/group implementing Straus and Pippenger. A
batch verification API is also available via the "batch" feature, which enables
secure multiexponentation batch verification given a series of values which
should sum to 0, identifying which doesn't via binary search if they don't.

View file

@ -0,0 +1,84 @@
use rand_core::{RngCore, CryptoRng};
use ff::{Field, PrimeFieldBits};
use group::Group;
use crate::{multiexp, multiexp_vartime};
#[cfg(feature = "batch")]
pub struct BatchVerifier<Id: Copy, G: Group>(Vec<(Id, Vec<(G::Scalar, G)>)>);
#[cfg(feature = "batch")]
impl<Id: Copy, G: Group> BatchVerifier<Id, G> where <G as Group>::Scalar: PrimeFieldBits {
pub fn new(capacity: usize) -> BatchVerifier<Id, G> {
BatchVerifier(Vec::with_capacity(capacity))
}
pub fn queue<
R: RngCore + CryptoRng,
I: IntoIterator<Item = (G::Scalar, G)>
>(&mut self, rng: &mut R, id: Id, pairs: I) {
// Define a unique scalar factor for this set of variables so individual items can't overlap
let u = if self.0.len() == 0 {
G::Scalar::one()
} else {
let mut weight;
// Ensure it's non-zero, as a zero scalar would cause this item to pass no matter what
while {
weight = G::Scalar::random(&mut *rng);
weight.is_zero().into()
} {}
weight
};
self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect()));
}
#[must_use]
pub fn verify(&self) -> bool {
multiexp(
&self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>()
).is_identity().into()
}
#[must_use]
pub fn verify_vartime(&self) -> bool {
multiexp_vartime(
&self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>()
).is_identity().into()
}
// A constant time variant may be beneficial for robust protocols
pub fn blame_vartime(&self) -> Option<Id> {
let mut slice = self.0.as_slice();
while slice.len() > 1 {
let split = slice.len() / 2;
if multiexp_vartime(
&slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>()
).is_identity().into() {
slice = &slice[split ..];
} else {
slice = &slice[.. split];
}
}
slice.get(0).filter(
|(_, value)| !bool::from(multiexp_vartime(value).is_identity())
).map(|(id, _)| *id)
}
pub fn verify_with_vartime_blame(&self) -> Result<(), Id> {
if self.verify() {
Ok(())
} else {
Err(self.blame_vartime().unwrap())
}
}
pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> {
if self.verify_vartime() {
Ok(())
} else {
Err(self.blame_vartime().unwrap())
}
}
}

View file

@ -1,156 +1,176 @@
use group::{ff::PrimeField, Group};
use ff::PrimeFieldBits;
use group::Group;
mod straus;
use straus::*;
mod pippenger;
use pippenger::*;
#[cfg(feature = "batch")]
use group::ff::Field;
mod batch;
#[cfg(feature = "batch")]
use rand_core::{RngCore, CryptoRng};
pub use batch::BatchVerifier;
fn prep<
G: Group,
I: IntoIterator<Item = (G::Scalar, G)>
>(pairs: I, little: bool) -> (Vec<Vec<u8>>, Vec<[G; 16]>) {
let mut nibbles = vec![];
let mut tables = vec![];
for pair in pairs.into_iter() {
let p = nibbles.len();
nibbles.push(vec![]);
{
let mut repr = pair.0.to_repr();
let bytes = repr.as_mut();
if !little {
bytes.reverse();
}
#[cfg(test)]
mod tests;
nibbles[p].resize(bytes.len() * 2, 0);
for i in 0 .. bytes.len() {
nibbles[p][i * 2] = bytes[i] & 0b1111;
nibbles[p][(i * 2) + 1] = (bytes[i] >> 4) & 0b1111;
}
pub(crate) fn prep_bits<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> Vec<Vec<u8>> where G::Scalar: PrimeFieldBits {
let w_usize = usize::from(window);
let mut groupings = vec![];
for pair in pairs {
let p = groupings.len();
let bits = pair.0.to_le_bits();
groupings.push(vec![0; (bits.len() + (w_usize - 1)) / w_usize]);
for (i, bit) in bits.into_iter().enumerate() {
let bit = bit as u8;
debug_assert_eq!(bit | 1, 1);
groupings[p][i / w_usize] |= bit << (i % w_usize);
}
}
tables.push([G::identity(); 16]);
groupings
}
pub(crate) fn prep_tables<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> Vec<Vec<G>> {
let mut tables = Vec::with_capacity(pairs.len());
for pair in pairs {
let p = tables.len();
tables.push(vec![G::identity(); 2_usize.pow(window.into())]);
let mut accum = G::identity();
for i in 1 .. 16 {
for i in 1 .. tables[p].len() {
accum += pair.1;
tables[p][i] = accum;
}
}
(nibbles, tables)
tables
}
// An implementation of Straus, with a extremely minimal API that lets us add other algorithms in
// the future. Takes in an iterator of scalars and points with a boolean for if the scalars are
// little endian encoded in their Reprs or not
pub fn multiexp<
G: Group,
I: IntoIterator<Item = (G::Scalar, G)>
>(pairs: I, little: bool) -> G {
let (nibbles, tables) = prep(pairs, little);
let mut res = G::identity();
for b in (0 .. nibbles[0].len()).rev() {
for _ in 0 .. 4 {
res = res.double();
}
for s in 0 .. tables.len() {
res += tables[s][usize::from(nibbles[s][b])];
}
}
res
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum Algorithm {
Null,
Single,
Straus(u8),
Pippenger(u8)
}
pub fn multiexp_vartime<
G: Group,
I: IntoIterator<Item = (G::Scalar, G)>
>(pairs: I, little: bool) -> G {
let (nibbles, tables) = prep(pairs, little);
/*
Release (with runs 20, so all of these are off by 20x):
let mut res = G::identity();
for b in (0 .. nibbles[0].len()).rev() {
for _ in 0 .. 4 {
res = res.double();
}
k256
Straus 3 is more efficient at 5 with 678µs per
Straus 4 is more efficient at 10 with 530µs per
Straus 5 is more efficient at 35 with 467µs per
for s in 0 .. tables.len() {
if nibbles[s][b] != 0 {
res += tables[s][usize::from(nibbles[s][b])];
}
}
}
res
}
Pippenger 5 is more efficient at 125 with 431µs per
Pippenger 6 is more efficient at 275 with 349µs per
Pippenger 7 is more efficient at 375 with 360µs per
#[cfg(feature = "batch")]
pub struct BatchVerifier<Id: Copy, G: Group>(Vec<(Id, Vec<(G::Scalar, G)>)>, bool);
dalek
Straus 3 is more efficient at 5 with 519µs per
Straus 4 is more efficient at 10 with 376µs per
Straus 5 is more efficient at 170 with 330µs per
#[cfg(feature = "batch")]
impl<Id: Copy, G: Group> BatchVerifier<Id, G> {
pub fn new(capacity: usize, endian: bool) -> BatchVerifier<Id, G> {
BatchVerifier(Vec::with_capacity(capacity), endian)
Pippenger 5 is more efficient at 125 with 305µs per
Pippenger 6 is more efficient at 275 with 250µs per
Pippenger 7 is more efficient at 450 with 205µs per
Pippenger 8 is more efficient at 800 with 213µs per
Debug (with runs 5, so...):
k256
Straus 3 is more efficient at 5 with 2532µs per
Straus 4 is more efficient at 10 with 1930µs per
Straus 5 is more efficient at 80 with 1632µs per
Pippenger 5 is more efficient at 150 with 1441µs per
Pippenger 6 is more efficient at 300 with 1235µs per
Pippenger 7 is more efficient at 475 with 1182µs per
Pippenger 8 is more efficient at 625 with 1170µs per
dalek:
Straus 3 is more efficient at 5 with 971µs per
Straus 4 is more efficient at 10 with 782µs per
Straus 5 is more efficient at 75 with 778µs per
Straus 6 is more efficient at 165 with 867µs per
Pippenger 5 is more efficient at 125 with 677µs per
Pippenger 6 is more efficient at 250 with 655µs per
Pippenger 7 is more efficient at 475 with 500µs per
Pippenger 8 is more efficient at 875 with 499µs per
*/
fn algorithm(len: usize) -> Algorithm {
#[cfg(not(debug_assertions))]
if len == 0 {
Algorithm::Null
} else if len == 1 {
Algorithm::Single
} else if len < 10 {
// Straus 2 never showed a performance benefit, even with just 2 elements
Algorithm::Straus(3)
} else if len < 20 {
Algorithm::Straus(4)
} else if len < 50 {
Algorithm::Straus(5)
} else if len < 100 {
Algorithm::Pippenger(4)
} else if len < 125 {
Algorithm::Pippenger(5)
} else if len < 275 {
Algorithm::Pippenger(6)
} else if len < 400 {
Algorithm::Pippenger(7)
} else {
Algorithm::Pippenger(8)
}
pub fn queue<
R: RngCore + CryptoRng,
I: IntoIterator<Item = (G::Scalar, G)>
>(&mut self, rng: &mut R, id: Id, pairs: I) {
// Define a unique scalar factor for this set of variables so individual items can't overlap
let u = if self.0.len() == 0 {
G::Scalar::one()
} else {
G::Scalar::random(rng)
};
self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect()));
}
pub fn verify(&self) -> bool {
multiexp(
self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned(),
self.1
).is_identity().into()
}
pub fn verify_vartime(&self) -> bool {
multiexp_vartime(
self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned(),
self.1
).is_identity().into()
}
// A constant time variant may be beneficial for robust protocols
pub fn blame_vartime(&self) -> Option<Id> {
let mut slice = self.0.as_slice();
while slice.len() > 1 {
let split = slice.len() / 2;
if multiexp_vartime(
slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned(),
self.1
).is_identity().into() {
slice = &slice[split ..];
} else {
slice = &slice[.. split];
}
}
slice.get(0).filter(
|(_, value)| !bool::from(multiexp_vartime(value.clone(), self.1).is_identity())
).map(|(id, _)| *id)
}
pub fn verify_with_vartime_blame(&self) -> Result<(), Id> {
if self.verify() {
Ok(())
} else {
Err(self.blame_vartime().unwrap())
}
}
pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> {
if self.verify_vartime() {
Ok(())
} else {
Err(self.blame_vartime().unwrap())
}
#[cfg(debug_assertions)]
if len == 0 {
Algorithm::Null
} else if len == 1 {
Algorithm::Single
} else if len < 10 {
Algorithm::Straus(3)
} else if len < 80 {
Algorithm::Straus(4)
} else if len < 100 {
Algorithm::Straus(5)
} else if len < 125 {
Algorithm::Pippenger(4)
} else if len < 275 {
Algorithm::Pippenger(5)
} else if len < 475 {
Algorithm::Pippenger(6)
} else if len < 750 {
Algorithm::Pippenger(7)
} else {
Algorithm::Pippenger(8)
}
}
// Performs a multiexp, automatically selecting the optimal algorithm based on amount of pairs
pub fn multiexp<G: Group>(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeFieldBits {
match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0,
Algorithm::Straus(window) => straus(pairs, window),
Algorithm::Pippenger(window) => pippenger(pairs, window)
}
}
pub fn multiexp_vartime<G: Group>(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeFieldBits {
match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0,
Algorithm::Straus(window) => straus_vartime(pairs, window),
Algorithm::Pippenger(window) => pippenger_vartime(pairs, window)
}
}

View file

@ -0,0 +1,63 @@
use ff::PrimeFieldBits;
use group::Group;
use crate::prep_bits;
pub(crate) fn pippenger<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> G where G::Scalar: PrimeFieldBits {
let bits = prep_bits(pairs, window);
let mut res = G::identity();
for n in (0 .. bits[0].len()).rev() {
for _ in 0 .. window {
res = res.double();
}
let mut buckets = vec![G::identity(); 2_usize.pow(window.into())];
for p in 0 .. bits.len() {
buckets[usize::from(bits[p][n])] += pairs[p].1;
}
let mut intermediate_sum = G::identity();
for b in (1 .. buckets.len()).rev() {
intermediate_sum += buckets[b];
res += intermediate_sum;
}
}
res
}
pub(crate) fn pippenger_vartime<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> G where G::Scalar: PrimeFieldBits {
let bits = prep_bits(pairs, window);
let mut res = G::identity();
for n in (0 .. bits[0].len()).rev() {
if n != (bits[0].len() - 1) {
for _ in 0 .. window {
res = res.double();
}
}
let mut buckets = vec![G::identity(); 2_usize.pow(window.into())];
for p in 0 .. bits.len() {
let nibble = usize::from(bits[p][n]);
if nibble != 0 {
buckets[nibble] += pairs[p].1;
}
}
let mut intermediate_sum = G::identity();
for b in (1 .. buckets.len()).rev() {
intermediate_sum += buckets[b];
res += intermediate_sum;
}
}
res
}

View file

@ -0,0 +1,49 @@
use ff::PrimeFieldBits;
use group::Group;
use crate::{prep_bits, prep_tables};
pub(crate) fn straus<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> G where G::Scalar: PrimeFieldBits {
let groupings = prep_bits(pairs, window);
let tables = prep_tables(pairs, window);
let mut res = G::identity();
for b in (0 .. groupings[0].len()).rev() {
for _ in 0 .. window {
res = res.double();
}
for s in 0 .. tables.len() {
res += tables[s][usize::from(groupings[s][b])];
}
}
res
}
pub(crate) fn straus_vartime<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> G where G::Scalar: PrimeFieldBits {
let groupings = prep_bits(pairs, window);
let tables = prep_tables(pairs, window);
let mut res = G::identity();
for b in (0 .. groupings[0].len()).rev() {
if b != (groupings[0].len() - 1) {
for _ in 0 .. window {
res = res.double();
}
}
for s in 0 .. tables.len() {
if groupings[s][b] != 0 {
res += tables[s][usize::from(groupings[s][b])];
}
}
}
res
}

View file

@ -0,0 +1,112 @@
use std::time::Instant;
use rand_core::OsRng;
use ff::{Field, PrimeFieldBits};
use group::Group;
use k256::ProjectivePoint;
use dalek_ff_group::EdwardsPoint;
use crate::{straus, pippenger, multiexp, multiexp_vartime};
#[allow(dead_code)]
fn benchmark_internal<G: Group>(straus_bool: bool) where G::Scalar: PrimeFieldBits {
let runs: usize = 20;
let mut start = 0;
let mut increment: usize = 5;
let mut total: usize = 250;
let mut current = 2;
if !straus_bool {
start = 100;
increment = 25;
total = 1000;
current = 4;
};
let mut pairs = Vec::with_capacity(total);
let mut sum = G::identity();
for _ in 0 .. start {
pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
for _ in 0 .. (total / increment) {
for _ in 0 .. increment {
pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
let now = Instant::now();
for _ in 0 .. runs {
if straus_bool {
assert_eq!(straus(&pairs, current), sum);
} else {
assert_eq!(pippenger(&pairs, current), sum);
}
}
let current_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap();
let now = Instant::now();
for _ in 0 .. runs {
if straus_bool {
assert_eq!(straus(&pairs, current + 1), sum);
} else {
assert_eq!(pippenger(&pairs, current + 1), sum);
}
}
let next_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap();
if next_per < current_per {
current += 1;
println!(
"{} {} is more efficient at {} with {}µs per",
if straus_bool { "Straus" } else { "Pippenger" }, current, pairs.len(), next_per
);
if current >= 8 {
return;
}
}
}
}
fn test_multiexp<G: Group>() where G::Scalar: PrimeFieldBits {
let mut pairs = Vec::with_capacity(1000);
let mut sum = G::identity();
for _ in 0 .. 10 {
for _ in 0 .. 100 {
pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
assert_eq!(multiexp(&pairs), sum);
assert_eq!(multiexp_vartime(&pairs), sum);
}
}
#[test]
fn test_secp256k1() {
test_multiexp::<ProjectivePoint>();
}
#[test]
fn test_ed25519() {
test_multiexp::<EdwardsPoint>();
}
#[ignore]
#[test]
fn benchmark() {
// Activate the processor's boost clock
for _ in 0 .. 30 {
test_multiexp::<ProjectivePoint>();
}
benchmark_internal::<ProjectivePoint>(true);
benchmark_internal::<ProjectivePoint>(false);
benchmark_internal::<EdwardsPoint>(true);
benchmark_internal::<EdwardsPoint>(false);
}

View file

@ -1,15 +1,19 @@
[package]
name = "transcript"
version = "0.1.0"
description = "A simple transcript definition"
name = "flexible-transcript"
version = "0.1.2"
description = "A simple transcript trait definition, along with viable options"
license = "MIT"
repository = "https://github.com/serai-dex/serai"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["transcript"]
edition = "2021"
[dependencies]
digest = "0.10"
blake2 = { version = "0.10", optional = true }
merlin = { version = "3", optional = true }
[features]
recommended = ["blake2"]
merlin = ["dep:merlin"]

View file

@ -0,0 +1,27 @@
# Flexible Transcript
Flexible Transcript is a crate offering:
- `Transcript`, a trait offering functions transcripts should implement.
- `DigestTranscript`, a competent transcript format instantiated against a
provided hash function.
- `MerlinTranscript`, a wrapper of `merlin` into the trait (available via the
`merlin` feature).
- `RecommendedTranscript`, a transcript recommended for usage in applications.
Currently, this is `DigestTranscript<Blake2b512>` (available via the
`recommended` feature).
The trait was created while working on an IETF draft which defined an incredibly
simple transcript format. Extensions of the protocol would quickly require a
more competent format, yet implementing the one specified was mandatory to meet
the specification. Accordingly, the library implementing the draft defined an
`IetfTranscript`, dropping labels and not allowing successive challenges, yet
thanks to the trait, allowed protocols building on top to provide their own
transcript format as needed.
`DigestTranscript` takes in any hash function implementing `Digest`, offering a
secure transcript format around it. All items are prefixed by a flag, denoting
their type, and their length.
`MerlinTranscript` was used to justify the API, and if any issues existed with
`DigestTranscript`, enable a fallback. It was also meant as a way to be
compatible with existing Rust projects using `merlin`.

View file

@ -1,57 +1,101 @@
use core::{marker::PhantomData, fmt::Debug};
#![no_std]
#[cfg(features = "merlin")]
#[cfg(feature = "merlin")]
mod merlin;
#[cfg(features = "merlin")]
pub use merlin::MerlinTranscript;
#[cfg(feature = "merlin")]
pub use crate::merlin::MerlinTranscript;
use digest::Digest;
use digest::{typenum::type_operators::IsGreaterOrEqual, consts::U256, Digest, Output};
pub trait Transcript {
fn domain_separate(&mut self, label: &[u8]);
type Challenge: Clone + Send + Sync + AsRef<[u8]>;
/// Create a new transcript with the specified name
fn new(name: &'static [u8]) -> Self;
/// Apply a domain separator to the transcript
fn domain_separate(&mut self, label: &'static [u8]);
/// Append a message to the transcript
fn append_message(&mut self, label: &'static [u8], message: &[u8]);
fn challenge(&mut self, label: &'static [u8]) -> Vec<u8>;
/// Produce a challenge. This MUST update the transcript as it does so, preventing the same
/// challenge from being generated multiple times
fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge;
/// Produce a RNG seed. Helper function for parties needing to generate random data from an
/// agreed upon state. Internally calls the challenge function for the needed bytes, converting
/// them to the seed format rand_core expects
fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32];
}
enum DigestTranscriptMember {
Name,
Domain,
Label,
Value,
Challenge
}
impl DigestTranscriptMember {
fn as_u8(&self) -> u8 {
match self {
DigestTranscriptMember::Name => 0,
DigestTranscriptMember::Domain => 1,
DigestTranscriptMember::Label => 2,
DigestTranscriptMember::Value => 3,
DigestTranscriptMember::Challenge => 4
}
}
}
/// A trait defining Digests with at least a 256-byte output size, assuming at least a 128-bit
/// level of security accordingly
pub trait SecureDigest: Clone + Digest {}
impl<D: Clone + Digest> SecureDigest for D where D::OutputSize: IsGreaterOrEqual<U256> {}
/// A simple transcript format constructed around the specified hash algorithm
#[derive(Clone, Debug)]
pub struct DigestTranscript<D: Digest>(Vec<u8>, PhantomData<D>);
pub struct DigestTranscript<D: SecureDigest>(D);
impl<D: Digest> PartialEq for DigestTranscript<D> {
fn eq(&self, other: &DigestTranscript<D>) -> bool {
self.0 == other.0
impl<D: SecureDigest> DigestTranscript<D> {
fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) {
self.0.update(&[kind.as_u8()]);
// Assumes messages don't exceed 16 exabytes
self.0.update(u64::try_from(value.len()).unwrap().to_le_bytes());
self.0.update(value);
}
}
impl<D: Digest> DigestTranscript<D> {
pub fn new(label: Vec<u8>) -> Self {
DigestTranscript(label, PhantomData)
}
}
impl<D: SecureDigest> Transcript for DigestTranscript<D> {
type Challenge = Output<D>;
fn new(name: &'static [u8]) -> Self {
let mut res = DigestTranscript(D::new());
res.append(DigestTranscriptMember::Name, name);
res
}
impl<D: Digest> Transcript for DigestTranscript<D> {
// It may be beneficial for each domain to be a nested transcript which is itself length prefixed
// This would go further than Merlin though and require an accurate end_domain function which has
// frustrations not worth bothering with when this shouldn't actually be meaningful
fn domain_separate(&mut self, label: &[u8]) {
self.append_message(b"domain", label);
self.append(DigestTranscriptMember::Domain, label);
}
fn append_message(&mut self, label: &'static [u8], message: &[u8]) {
self.0.extend(label);
// Assumes messages don't exceed 16 exabytes
self.0.extend(u64::try_from(message.len()).unwrap().to_le_bytes());
self.0.extend(message);
self.append(DigestTranscriptMember::Label, label);
self.append(DigestTranscriptMember::Value, message);
}
fn challenge(&mut self, label: &'static [u8]) -> Vec<u8> {
self.0.extend(label);
D::new().chain_update(&self.0).finalize().to_vec()
fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {
self.append(DigestTranscriptMember::Challenge, label);
self.0.clone().finalize()
}
fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {
let mut seed = [0; 32];
seed.copy_from_slice(&self.challenge(label)[0 .. 32]);
seed.copy_from_slice(&self.challenge(label)[.. 32]);
seed
}
}
#[cfg(feature = "recommended")]
pub type RecommendedTranscript = DigestTranscript<blake2::Blake2b512>;

View file

@ -1,16 +1,27 @@
use core::{marker::PhantomData, fmt::{Debug, Formatter}};
use core::fmt::{Debug, Formatter};
use digest::Digest;
use crate::Transcript;
#[derive(Clone, PartialEq)]
#[derive(Clone)]
pub struct MerlinTranscript(pub merlin::Transcript);
// Merlin doesn't implement Debug so provide a stub which won't panic
impl Debug for MerlinTranscript {
fn fmt(&self, _: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { Ok(()) }
fn fmt(&self, _: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { Ok(()) }
}
impl Transcript for MerlinTranscript {
fn domain_separate(&mut self, label: &[u8]) {
// Uses a challenge length of 64 bytes to support wide reduction on generated scalars
// From a security level standpoint, this should just be 32 bytes
// From a Merlin standpoint, this should be variable per call
// From a practical standpoint, this is a demo file not planned to be used and anything using
// this wrapper should be secure with this setting
type Challenge = [u8; 64];
fn new(name: &'static [u8]) -> Self {
MerlinTranscript(merlin::Transcript::new(name))
}
fn domain_separate(&mut self, label: &'static [u8]) {
self.append_message(b"dom-sep", label);
}
@ -18,21 +29,15 @@ impl Transcript for MerlinTranscript {
self.0.append_message(label, message);
}
fn challenge(&mut self, label: &'static [u8]) -> Vec<u8> {
let mut challenge = vec![];
// Uses a challenge length of 64 bytes to support wide reduction on generated scalars
// From a security level standpoint, this should just be 32 bytes
// From a Merlin standpoint, this should be variable per call
// From a practical standpoint, this is a demo file not planned to be used and anything using
// this wrapper is fine without any settings it uses
challenge.resize(64, 0);
fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {
let mut challenge = [0; 64];
self.0.challenge_bytes(label, &mut challenge);
challenge
}
fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {
let mut seed = [0; 32];
transcript.challenge_bytes(label, &mut seed);
seed.copy_from_slice(&self.challenge(label)[.. 32]);
seed
}
}

View file

@ -2,23 +2,34 @@
name = "serai-processor"
version = "0.1.0"
description = "Multichain processor premised on canonicity to reach distributed consensus automatically"
license = "MIT"
license = "AGPL-3.0-only"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
publish = false
[dependencies]
async-trait = "0.1"
rand_core = "0.6"
thiserror = "1"
hex = "0.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
curve25519-dalek = { version = "3", features = ["std"] }
blake2 = "0.10"
group = "0.12"
transcript = { package = "flexible-transcript", path = "../crypto/transcript", features = ["recommended"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group" }
frost = { path = "../crypto/frost" }
frost = { package = "modular-frost", path = "../crypto/frost" }
monero = { version = "0.16", features = ["experimental"] }
monero-serai = { path = "../coins/monero", features = ["multisig"] }
[dev-dependencies]
group = "0.12"
rand = "0.8"
futures = "0.3"
tokio = { version = "1", features = ["full"] }

84
processor/src/coin/mod.rs Normal file
View file

@ -0,0 +1,84 @@
use std::{marker::Send, sync::Arc};
use async_trait::async_trait;
use thiserror::Error;
use transcript::RecommendedTranscript;
use frost::{curve::Curve, FrostKeys, sign::PreprocessMachine};
pub mod monero;
pub use self::monero::Monero;
#[derive(Clone, Error, Debug)]
pub enum CoinError {
#[error("failed to connect to coin daemon")]
ConnectionError
}
pub trait Output: Sized + Clone {
type Id: AsRef<[u8]>;
fn id(&self) -> Self::Id;
fn amount(&self) -> u64;
fn serialize(&self) -> Vec<u8>;
fn deserialize<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self>;
}
#[async_trait]
pub trait Coin {
type Curve: Curve;
type Fee: Copy;
type Transaction;
type Block;
type Output: Output;
type SignableTransaction;
type TransactionMachine: PreprocessMachine<Signature = Self::Transaction>;
type Address: Send;
const ID: &'static [u8];
const CONFIRMATIONS: usize;
const MAX_INPUTS: usize;
const MAX_OUTPUTS: usize; // TODO: Decide if this includes change or not
// Doesn't have to take self, enables some level of caching which is pleasant
fn address(&self, key: <Self::Curve as Curve>::G) -> Self::Address;
async fn get_height(&self) -> Result<usize, CoinError>;
async fn get_block(&self, height: usize) -> Result<Self::Block, CoinError>;
async fn get_outputs(
&self,
block: &Self::Block,
key: <Self::Curve as Curve>::G
) -> Vec<Self::Output>;
async fn prepare_send(
&self,
keys: Arc<FrostKeys<Self::Curve>>,
transcript: RecommendedTranscript,
height: usize,
inputs: Vec<Self::Output>,
payments: &[(Self::Address, u64)],
fee: Self::Fee
) -> Result<Self::SignableTransaction, CoinError>;
async fn attempt_send(
&self,
transaction: Self::SignableTransaction,
included: &[u16]
) -> Result<Self::TransactionMachine, CoinError>;
async fn publish_transaction(
&self,
tx: &Self::Transaction
) -> Result<(Vec<u8>, Vec<<Self::Output as Output>::Id>), CoinError>;
#[cfg(test)]
async fn mine_block(&self);
#[cfg(test)]
async fn test_send(&self, key: Self::Address);
}

View file

@ -0,0 +1,224 @@
use std::sync::Arc;
use async_trait::async_trait;
use curve25519_dalek::scalar::Scalar;
use dalek_ff_group as dfg;
use transcript::RecommendedTranscript;
use frost::{curve::Ed25519, FrostKeys};
use monero_serai::{
transaction::Transaction,
rpc::Rpc,
wallet::{
ViewPair, address::{Network, AddressType, Address},
Fee, SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine
}
};
use crate::{coin::{CoinError, Output as OutputTrait, Coin}, view_key};
#[derive(Clone, Debug)]
pub struct Output(SpendableOutput);
impl OutputTrait for Output {
// While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug.
// While the Monero library offers a variant which allows senders to ensure their TXs have unique
// output keys, Serai can still be targeted using the classic burning bug
type Id = [u8; 32];
fn id(&self) -> Self::Id {
self.0.key.compress().to_bytes()
}
fn amount(&self) -> u64 {
self.0.commitment.amount
}
fn serialize(&self) -> Vec<u8> {
self.0.serialize()
}
fn deserialize<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
SpendableOutput::deserialize(reader).map(|o| Output(o))
}
}
impl From<SpendableOutput> for Output {
fn from(output: SpendableOutput) -> Output {
Output(output)
}
}
#[derive(Debug)]
pub struct SignableTransaction(
Arc<FrostKeys<Ed25519>>,
RecommendedTranscript,
usize,
MSignableTransaction
);
#[derive(Clone, Debug)]
pub struct Monero {
pub(crate) rpc: Rpc,
view: Scalar
}
impl Monero {
pub fn new(url: String) -> Monero {
let view = view_key::<Monero>(0).0;
Monero { rpc: Rpc::new(url), view }
}
fn view_pair(&self, spend: dfg::EdwardsPoint) -> ViewPair {
ViewPair { spend: spend.0, view: self.view }
}
#[cfg(test)]
fn empty_view_pair(&self) -> ViewPair {
use group::Group;
self.view_pair(dfg::EdwardsPoint::generator())
}
#[cfg(test)]
fn empty_address(&self) -> Address {
self.empty_view_pair().address(Network::Mainnet, AddressType::Standard, false)
}
}
#[async_trait]
impl Coin for Monero {
type Curve = Ed25519;
type Fee = Fee;
type Transaction = Transaction;
type Block = Vec<Transaction>;
type Output = Output;
type SignableTransaction = SignableTransaction;
type TransactionMachine = TransactionMachine;
type Address = Address;
const ID: &'static [u8] = b"Monero";
const CONFIRMATIONS: usize = 10;
// Testnet TX bb4d188a4c571f2f0de70dca9d475abc19078c10ffa8def26dd4f63ce1bcfd79 uses 146 inputs
// while using less than 100kb of space, albeit with just 2 outputs (though outputs share a BP)
// The TX size limit is half the contextual median block weight, where said weight is >= 300,000
// This means any TX which fits into 150kb will be accepted by Monero
// 128, even with 16 outputs, should fit into 100kb. Further efficiency by 192 may be viable
// TODO: Get hard numbers and tune
const MAX_INPUTS: usize = 128;
const MAX_OUTPUTS: usize = 16;
fn address(&self, key: dfg::EdwardsPoint) -> Self::Address {
self.view_pair(key).address(Network::Mainnet, AddressType::Standard, true)
}
async fn get_height(&self) -> Result<usize, CoinError> {
self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError)
}
async fn get_block(&self, height: usize) -> Result<Self::Block, CoinError> {
self.rpc.get_block_transactions_possible(height).await.map_err(|_| CoinError::ConnectionError)
}
async fn get_outputs(&self, block: &Self::Block, key: dfg::EdwardsPoint) -> Vec<Self::Output> {
block
.iter()
.flat_map(|tx| tx.scan(self.view_pair(key), true).not_locked())
.map(Output::from)
.collect()
}
async fn prepare_send(
&self,
keys: Arc<FrostKeys<Ed25519>>,
transcript: RecommendedTranscript,
height: usize,
mut inputs: Vec<Output>,
payments: &[(Address, u64)],
fee: Fee
) -> Result<SignableTransaction, CoinError> {
let spend = keys.group_key();
Ok(
SignableTransaction(
keys,
transcript,
height,
MSignableTransaction::new(
inputs.drain(..).map(|input| input.0).collect(),
payments.to_vec(),
Some(self.address(spend)),
fee
).map_err(|_| CoinError::ConnectionError)?
)
)
}
async fn attempt_send(
&self,
transaction: SignableTransaction,
included: &[u16]
) -> Result<Self::TransactionMachine, CoinError> {
transaction.3.clone().multisig(
&self.rpc,
(*transaction.0).clone(),
transaction.1.clone(),
transaction.2,
included.to_vec()
).await.map_err(|_| CoinError::ConnectionError)
}
async fn publish_transaction(
&self,
tx: &Self::Transaction
) -> Result<(Vec<u8>, Vec<<Self::Output as OutputTrait>::Id>), CoinError> {
self.rpc.publish_transaction(&tx).await.map_err(|_| CoinError::ConnectionError)?;
Ok((
tx.hash().to_vec(),
tx.prefix.outputs.iter().map(|output| output.key.compress().to_bytes()).collect()
))
}
#[cfg(test)]
async fn mine_block(&self) {
#[derive(serde::Deserialize, Debug)]
struct EmptyResponse {}
let _: EmptyResponse = self.rpc.rpc_call("json_rpc", Some(serde_json::json!({
"method": "generateblocks",
"params": {
"wallet_address": self.empty_address().to_string(),
"amount_of_blocks": 10
},
}))).await.unwrap();
}
#[cfg(test)]
async fn test_send(&self, address: Self::Address) {
use rand::rngs::OsRng;
let height = self.get_height().await.unwrap();
self.mine_block().await;
for _ in 0 .. 7 {
self.mine_block().await;
}
let outputs = self.rpc
.get_block_transactions_possible(height).await.unwrap()
.swap_remove(0).scan(self.empty_view_pair(), false).ignore_timelock();
let amount = outputs[0].commitment.amount;
let fee = 1000000000; // TODO
let tx = MSignableTransaction::new(
outputs,
vec![(address, amount - fee)],
Some(self.empty_address()),
self.rpc.get_fee().await.unwrap()
).unwrap().sign(&mut OsRng, &self.rpc, &Scalar::one()).await.unwrap();
self.rpc.publish_transaction(&tx).await.unwrap();
self.mine_block().await;
}
}

View file

@ -1 +0,0 @@
pub mod monero;

View file

@ -1,111 +0,0 @@
use async_trait::async_trait;
use rand_core::{RngCore, CryptoRng};
use curve25519_dalek::scalar::Scalar;
use dalek_ff_group as dfg;
use frost::MultisigKeys;
use monero::util::address::Address;
use monero_serai::{frost::Ed25519, rpc::Rpc, wallet::{SpendableOutput, SignableTransaction}};
use crate::{Output as OutputTrait, CoinError, Coin, view_key};
pub struct Output(SpendableOutput);
impl OutputTrait for Output {
// If Monero ever does support more than 255 outputs at once, which it could, this u8 could be a
// u16 which serializes as little endian, dropping the last byte if empty, without conflict
type Id = ([u8; 32], u8);
fn id(&self) -> Self::Id {
(self.0.tx, self.0.o.try_into().unwrap())
}
fn amount(&self) -> u64 {
self.0.commitment.amount
}
fn serialize(&self) -> Vec<u8> {
self.0.serialize()
}
fn deserialize<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
SpendableOutput::deserialize(reader).map(|o| Output(o))
}
}
impl From<SpendableOutput> for Output {
fn from(output: SpendableOutput) -> Output {
Output(output)
}
}
pub struct Monero {
rpc: Rpc,
view: Scalar
}
impl Monero {
pub fn new(url: String) -> Monero {
Monero {
rpc: Rpc::new(url),
view: dfg::Scalar::from_hash(view_key::<Monero>(0)).0
}
}
}
#[async_trait]
impl Coin for Monero {
type Curve = Ed25519;
type Output = Output;
type SignableTransaction = SignableTransaction;
type Address = Address;
fn id() -> &'static [u8] { b"Monero" }
async fn confirmations() -> usize { 10 }
// Testnet TX bb4d188a4c571f2f0de70dca9d475abc19078c10ffa8def26dd4f63ce1bcfd79 uses 146 inputs
// while using less than 100kb of space, albeit with just 2 outputs (though outputs share a BP)
// The TX size limit is half the contextual median block weight, where said weight is >= 300,000
// This means any TX which fits into 150kb will be accepted by Monero
// 128, even with 16 outputs, should fit into 100kb. Further efficiency by 192 may be viable
// TODO: Get hard numbers and tune
async fn max_inputs() -> usize { 128 }
async fn max_outputs() -> usize { 16 }
async fn get_height(&self) -> Result<usize, CoinError> {
self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError)
}
async fn get_outputs_in_block(
&self,
height: usize,
key: dfg::EdwardsPoint
) -> Result<Vec<Self::Output>, CoinError> {
Ok(
self.rpc.get_block_transactions_possible(height).await.map_err(|_| CoinError::ConnectionError)?
.iter().flat_map(|tx| tx.scan(self.view, key.0)).map(Output::from).collect()
)
}
async fn prepare_send<R: RngCore + CryptoRng>(
&self,
_keys: MultisigKeys<Ed25519>,
_label: Vec<u8>,
_height: usize,
_inputs: Vec<Output>,
_payments: &[(Address, u64)]
) -> Result<SignableTransaction, CoinError> {
todo!()
}
async fn attempt_send<R: RngCore + CryptoRng + std::marker::Send>(
&self,
_rng: &mut R,
_transaction: SignableTransaction,
_included: &[u16]
) -> Result<(Vec<u8>, Vec<<Self::Output as OutputTrait>::Id>), CoinError> {
todo!()
}
}

View file

@ -1,78 +1,38 @@
use std::marker::Send;
use std::{marker::Send, collections::HashMap};
use async_trait::async_trait;
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use blake2::{digest::{Digest, Update}, Blake2b512};
use frost::{curve::Curve, FrostError};
use frost::{Curve, MultisigKeys};
mod coins;
mod coin;
use coin::{CoinError, Coin};
mod wallet;
#[cfg(test)]
mod tests;
trait Output: Sized {
type Id;
#[derive(Clone, Error, Debug)]
pub enum NetworkError {}
fn id(&self) -> Self::Id;
fn amount(&self) -> u64;
fn serialize(&self) -> Vec<u8>;
fn deserialize<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self>;
#[async_trait]
pub trait Network: Send {
async fn round(&mut self, data: Vec<u8>) -> Result<HashMap<u16, Vec<u8>>, NetworkError>;
}
#[derive(Clone, Error, Debug)]
enum CoinError {
#[error("failed to connect to coin daemon")]
ConnectionError
pub enum SignError {
#[error("FROST had an error {0}")]
FrostError(FrostError),
#[error("coin had an error {0}")]
CoinError(CoinError),
#[error("network had an error {0}")]
NetworkError(NetworkError)
}
#[async_trait]
trait Coin {
type Curve: Curve;
type Output: Output;
type SignableTransaction;
type Address: Send;
fn id() -> &'static [u8];
async fn confirmations() -> usize;
async fn max_inputs() -> usize;
async fn max_outputs() -> usize;
async fn get_height(&self) -> Result<usize, CoinError>;
async fn get_outputs_in_block(
&self,
height: usize,
key: <Self::Curve as Curve>::G
) -> Result<Vec<Self::Output>, CoinError>;
async fn prepare_send<R: RngCore + CryptoRng>(
&self,
keys: MultisigKeys<Self::Curve>,
label: Vec<u8>,
height: usize,
inputs: Vec<Self::Output>,
payments: &[(Self::Address, u64)]
) -> Result<Self::SignableTransaction, CoinError>;
async fn attempt_send<R: RngCore + CryptoRng + Send>(
&self,
rng: &mut R,
transaction: Self::SignableTransaction,
included: &[u16]
) -> Result<(Vec<u8>, Vec<<Self::Output as Output>::Id>), CoinError>;
}
// Generate a view key for a given chain in a globally consistent manner regardless of the current
// group key
// Generate a static view key for a given chain in a globally consistent manner
// Doesn't consider the current group key to increase the simplicity of verifying Serai's status
// Takes an index, k, for more modern privacy protocols which use multiple view keys
// Doesn't run Curve::hash_to_F, instead returning the hash object, due to hash_to_F being a FROST
// definition instead of a wide reduction from a hash object
fn view_key<C: Coin>(k: u64) -> Blake2b512 {
Blake2b512::new().chain(b"Serai DEX View Key").chain(C::id()).chain(k.to_le_bytes())
pub fn view_key<C: Coin>(k: u64) -> <C::Curve as Curve>::F {
C::Curve::hash_to_F(b"Serai DEX View Key", &[C::ID, &k.to_le_bytes()].concat())
}

View file

@ -1,5 +0,0 @@
struct Scanner<C: Coin> {}
impl Scanner {
}

View file

@ -1,6 +1,110 @@
use crate::{Coin, coins::monero::Monero};
use std::{sync::{Arc, RwLock}, collections::HashMap};
use async_trait::async_trait;
use rand::rngs::OsRng;
use crate::{NetworkError, Network, coin::{Coin, Monero}, wallet::{WalletKeys, MemCoinDb, Wallet}};
#[derive(Clone)]
struct LocalNetwork {
i: u16,
size: u16,
round: usize,
rounds: Arc<RwLock<Vec<HashMap<u16, Vec<u8>>>>>
}
impl LocalNetwork {
fn new(size: u16) -> Vec<LocalNetwork> {
let rounds = Arc::new(RwLock::new(vec![]));
let mut res = vec![];
for i in 1 ..= size {
res.push(LocalNetwork { i, size, round: 0, rounds: rounds.clone() });
}
res
}
}
#[async_trait]
impl Network for LocalNetwork {
async fn round(&mut self, data: Vec<u8>) -> Result<HashMap<u16, Vec<u8>>, NetworkError> {
{
let mut rounds = self.rounds.write().unwrap();
if rounds.len() == self.round {
rounds.push(HashMap::new());
}
rounds[self.round].insert(self.i, data);
}
while {
let read = self.rounds.try_read().unwrap();
read[self.round].len() != usize::from(self.size)
} {
tokio::task::yield_now().await;
}
let res = self.rounds.try_read().unwrap()[self.round].clone();
self.round += 1;
Ok(res)
}
}
async fn test_send<C: Coin + Clone>(coin: C, fee: C::Fee) {
// Mine a block so there's a confirmed height
coin.mine_block().await;
let height = coin.get_height().await.unwrap();
let mut keys = frost::tests::key_gen::<_, C::Curve>(&mut OsRng);
let threshold = keys[&1].params().t();
let mut networks = LocalNetwork::new(threshold);
let mut wallets = vec![];
for i in 1 ..= threshold {
let mut wallet = Wallet::new(MemCoinDb::new(), coin.clone());
wallet.acknowledge_height(0, height);
wallet.add_keys(
&WalletKeys::new(Arc::try_unwrap(keys.remove(&i).take().unwrap()).unwrap(), 0)
);
wallets.push(wallet);
}
// Get the chain to a height where blocks have sufficient confirmations
while (height + C::CONFIRMATIONS) > coin.get_height().await.unwrap() {
coin.mine_block().await;
}
for wallet in wallets.iter_mut() {
// Poll to activate the keys
wallet.poll().await.unwrap();
}
coin.test_send(wallets[0].address()).await;
let mut futures = vec![];
for (network, wallet) in networks.iter_mut().zip(wallets.iter_mut()) {
wallet.poll().await.unwrap();
let height = coin.get_height().await.unwrap();
wallet.acknowledge_height(1, height - 10);
let signable = wallet.prepare_sends(
1,
vec![(wallet.address(), 10000000000)],
fee
).await.unwrap().1.swap_remove(0);
futures.push(
wallet.attempt_send(network, signable, (1 ..= threshold).into_iter().collect::<Vec<_>>())
);
}
println!(
"{:?}",
hex::encode(futures::future::join_all(futures).await.swap_remove(0).unwrap().0)
);
}
#[tokio::test]
async fn test() {
println!("{}", Monero::new("http://127.0.0.1:18081".to_string()).get_height().await.unwrap());
async fn monero() {
let monero = Monero::new("http://127.0.0.1:18081".to_string());
let fee = monero.rpc.get_fee().await.unwrap();
test_send(monero, fee).await;
}

View file

@ -1,30 +1,363 @@
use frost::{Curve, MultisigKeys};
use std::{sync::Arc, collections::HashMap};
use crate::Coin;
use rand_core::OsRng;
struct Wallet<C: Coin> {
keys: MultisigKeys<C::Curve>,
outputs: Vec<C::Output>
use group::GroupEncoding;
use transcript::{Transcript, RecommendedTranscript};
use frost::{curve::Curve, FrostKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}};
use crate::{coin::{CoinError, Output, Coin}, SignError, Network};
pub struct WalletKeys<C: Curve> {
keys: FrostKeys<C>,
creation_height: usize
}
impl<C: Coin> Wallet<C> {
fn new(keys: &MultisigKeys<C::Curve>) -> Wallet<C> {
Wallet {
keys: keys.offset(
C::Curve::hash_to_F(
// Use distinct keys on each network by applying an additive offset
// While it would be fine to just C::id(), including the group key creates distinct
// offsets instead of static offsets. Under a statically offset system, a BTC key could
// have X subtracted to find the potential group key, and then have Y added to find the
// potential BCH group key. While this shouldn't be an issue, as this isn't a private
// system, there are potentially other benefits to binding this to a specific group key
&[b"Serai Processor Wallet", C::id(), &C::Curve::G_to_bytes(&keys.group_key())].concat()
)
),
impl<C: Curve> WalletKeys<C> {
pub fn new(keys: FrostKeys<C>, creation_height: usize) -> WalletKeys<C> {
WalletKeys { keys, creation_height }
}
outputs: vec![]
// Bind this key to a specific network by applying an additive offset
// While it would be fine to just C::ID, including the group key creates distinct
// offsets instead of static offsets. Under a statically offset system, a BTC key could
// have X subtracted to find the potential group key, and then have Y added to find the
// potential ETH group key. While this shouldn't be an issue, as this isn't a private
// system, there are potentially other benefits to binding this to a specific group key
// It's no longer possible to influence group key gen to key cancel without breaking the hash
// function as well, although that degree of influence means key gen is broken already
fn bind(&self, chain: &[u8]) -> FrostKeys<C> {
const DST: &[u8] = b"Serai Processor Wallet Chain Bind";
let mut transcript = RecommendedTranscript::new(DST);
transcript.append_message(b"chain", chain);
transcript.append_message(b"curve", C::ID);
transcript.append_message(b"group_key", self.keys.group_key().to_bytes().as_ref());
self.keys.offset(C::hash_to_F(DST, &transcript.challenge(b"offset")))
}
}
pub trait CoinDb {
// Set a height as scanned to
fn scanned_to_height(&mut self, height: usize);
// Acknowledge a given coin height for a canonical height
fn acknowledge_height(&mut self, canonical: usize, height: usize);
// Adds an output to the DB. Returns false if the output was already added
fn add_output<O: Output>(&mut self, output: &O) -> bool;
// Height this coin has been scanned to
fn scanned_height(&self) -> usize;
// Acknowledged height for a given canonical height
fn acknowledged_height(&self, canonical: usize) -> usize;
}
pub struct MemCoinDb {
// Height this coin has been scanned to
scanned_height: usize,
// Acknowledged height for a given canonical height
acknowledged_heights: HashMap<usize, usize>,
outputs: HashMap<Vec<u8>, Vec<u8>>
}
impl MemCoinDb {
pub fn new() -> MemCoinDb {
MemCoinDb {
scanned_height: 0,
acknowledged_heights: HashMap::new(),
outputs: HashMap::new()
}
}
}
impl CoinDb for MemCoinDb {
fn scanned_to_height(&mut self, height: usize) {
self.scanned_height = height;
}
fn acknowledge_height(&mut self, canonical: usize, height: usize) {
debug_assert!(!self.acknowledged_heights.contains_key(&canonical));
self.acknowledged_heights.insert(canonical, height);
}
fn add_output<O: Output>(&mut self, output: &O) -> bool {
// This would be insecure as we're indexing by ID and this will replace the output as a whole
// Multiple outputs may have the same ID in edge cases such as Monero, where outputs are ID'd
// by key image, not by hash + index
// self.outputs.insert(output.id(), output).is_some()
let id = output.id().as_ref().to_vec();
if self.outputs.contains_key(&id) {
return false;
}
self.outputs.insert(id, output.serialize());
true
}
fn scanned_height(&self) -> usize {
self.scanned_height
}
fn acknowledged_height(&self, canonical: usize) -> usize {
self.acknowledged_heights[&canonical]
}
}
fn select_inputs<C: Coin>(inputs: &mut Vec<C::Output>) -> (Vec<C::Output>, u64) {
// Sort to ensure determinism. Inefficient, yet produces the most legible code to be optimized
// later
inputs.sort_by(|a, b| a.amount().cmp(&b.amount()));
// Select the maximum amount of outputs possible
let res = inputs.split_off(inputs.len() - C::MAX_INPUTS.min(inputs.len()));
// Calculate their sum value, minus the fee needed to spend them
let sum = res.iter().map(|input| input.amount()).sum();
// sum -= C::MAX_FEE; // TODO
(res, sum)
}
fn select_outputs<C: Coin>(
payments: &mut Vec<(C::Address, u64)>,
value: &mut u64
) -> Vec<(C::Address, u64)> {
// Prioritize large payments which will most efficiently use large inputs
payments.sort_by(|a, b| a.1.cmp(&b.1));
// Grab the payments this will successfully fund
let mut outputs = vec![];
let mut p = payments.len();
while p != 0 {
p -= 1;
if *value >= payments[p].1 {
*value -= payments[p].1;
// Swap remove will either pop the tail or insert an element that wouldn't fit, making it
// always safe to move past
outputs.push(payments.swap_remove(p));
}
// Doesn't break in this else case as a smaller payment may still fit
}
outputs
}
// Optimizes on the expectation selected/inputs are sorted from lowest value to highest
fn refine_inputs<C: Coin>(
selected: &mut Vec<C::Output>,
inputs: &mut Vec<C::Output>,
mut remaining: u64
) {
// Drop unused inputs
let mut s = 0;
while remaining > selected[s].amount() {
remaining -= selected[s].amount();
s += 1;
}
// Add them back to the inputs pool
inputs.extend(selected.drain(.. s));
// Replace large inputs with smaller ones
for s in (0 .. selected.len()).rev() {
for i in 0 .. inputs.len() {
// Doesn't break due to inputs no longer being sorted
// This could be made faster if we prioritized small input usage over transaction size/fees
// TODO: Consider. This would implicitly consolidate inputs which would be advantageous
if selected[s].amount() < inputs[i].amount() {
continue;
}
// If we can successfully replace this input, do so
let diff = selected[s].amount() - inputs[i].amount();
if remaining > diff {
remaining -= diff;
let old = selected[s].clone();
selected[s] = inputs[i].clone();
inputs[i] = old;
}
}
}
}
fn select_inputs_outputs<C: Coin>(
inputs: &mut Vec<C::Output>,
outputs: &mut Vec<(C::Address, u64)>
) -> (Vec<C::Output>, Vec<(C::Address, u64)>) {
if inputs.len() == 0 {
return (vec![], vec![]);
}
let (mut selected, mut value) = select_inputs::<C>(inputs);
let outputs = select_outputs::<C>(outputs, &mut value);
if outputs.len() == 0 {
inputs.extend(selected);
return (vec![], vec![]);
}
refine_inputs::<C>(&mut selected, inputs, value);
(selected, outputs)
}
pub struct Wallet<D: CoinDb, C: Coin> {
db: D,
coin: C,
keys: Vec<(Arc<FrostKeys<C::Curve>>, Vec<C::Output>)>,
pending: Vec<(usize, FrostKeys<C::Curve>)>
}
impl<D: CoinDb, C: Coin> Wallet<D, C> {
pub fn new(db: D, coin: C) -> Wallet<D, C> {
Wallet {
db,
coin,
keys: vec![],
pending: vec![]
}
}
async fn poll() { todo!() }
pub fn scanned_height(&self) -> usize { self.db.scanned_height() }
pub fn acknowledge_height(&mut self, canonical: usize, height: usize) {
self.db.acknowledge_height(canonical, height);
if height > self.db.scanned_height() {
self.db.scanned_to_height(height);
}
}
pub fn acknowledged_height(&self, canonical: usize) -> usize {
self.db.acknowledged_height(canonical)
}
pub fn add_keys(&mut self, keys: &WalletKeys<C::Curve>) {
// Doesn't use +1 as this is height, not block index, and poll moves by block index
self.pending.push((self.acknowledged_height(keys.creation_height), keys.bind(C::ID)));
}
pub fn address(&self) -> C::Address {
self.coin.address(self.keys[self.keys.len() - 1].0.group_key())
}
pub async fn poll(&mut self) -> Result<(), CoinError> {
if self.coin.get_height().await? < C::CONFIRMATIONS {
return Ok(());
}
let confirmed_block = self.coin.get_height().await? - C::CONFIRMATIONS;
for b in self.scanned_height() ..= confirmed_block {
// If any keys activated at this height, shift them over
{
let mut k = 0;
while k < self.pending.len() {
// TODO
//if b < self.pending[k].0 {
//} else if b == self.pending[k].0 {
if b <= self.pending[k].0 {
self.keys.push((Arc::new(self.pending.swap_remove(k).1), vec![]));
} else {
k += 1;
}
}
}
let block = self.coin.get_block(b).await?;
for (keys, outputs) in self.keys.iter_mut() {
outputs.extend(
self.coin.get_outputs(&block, keys.group_key()).await.iter().cloned().filter(
|output| self.db.add_output(output)
)
);
}
// Blocks are zero-indexed while heights aren't
self.db.scanned_to_height(b + 1);
}
Ok(())
}
// This should be called whenever new outputs are received, meaning there was a new block
// If these outputs were received and sent to Substrate, it should be called after they're
// included in a block and we have results to act on
// If these outputs weren't sent to Substrate (change), it should be called immediately
// with all payments still queued from the last call
pub async fn prepare_sends(
&mut self,
canonical: usize,
payments: Vec<(C::Address, u64)>,
fee: C::Fee
) -> Result<(Vec<(C::Address, u64)>, Vec<C::SignableTransaction>), CoinError> {
if payments.len() == 0 {
return Ok((vec![], vec![]));
}
let acknowledged_height = self.acknowledged_height(canonical);
// TODO: Log schedule outputs when MAX_OUTPUTS is lower than payments.len()
// Payments is the first set of TXs in the schedule
// As each payment re-appears, let mut payments = schedule[payment] where the only input is
// the source payment
// let (mut payments, schedule) = schedule(payments);
let mut payments = payments;
let mut txs = vec![];
for (keys, outputs) in self.keys.iter_mut() {
while outputs.len() != 0 {
let (inputs, outputs) = select_inputs_outputs::<C>(outputs, &mut payments);
// If we can no longer process any payments, move to the next set of keys
if outputs.len() == 0 {
debug_assert_eq!(inputs.len(), 0);
break;
}
// Create the transcript for this transaction
let mut transcript = RecommendedTranscript::new(b"Serai Processor Wallet Send");
transcript.append_message(
b"canonical_height",
&u64::try_from(canonical).unwrap().to_le_bytes()
);
transcript.append_message(
b"acknowledged_height",
&u64::try_from(acknowledged_height).unwrap().to_le_bytes()
);
transcript.append_message(
b"index",
&u64::try_from(txs.len()).unwrap().to_le_bytes()
);
let tx = self.coin.prepare_send(
keys.clone(),
transcript,
acknowledged_height,
inputs,
&outputs,
fee
).await?;
// self.db.save_tx(tx) // TODO
txs.push(tx);
}
}
Ok((payments, txs))
}
pub async fn attempt_send<N: Network>(
&mut self,
network: &mut N,
prepared: C::SignableTransaction,
included: Vec<u16>
) -> Result<(Vec<u8>, Vec<<C::Output as Output>::Id>), SignError> {
let attempt = self.coin.attempt_send(
prepared,
&included
).await.map_err(|e| SignError::CoinError(e))?;
let (attempt, commitments) = attempt.preprocess(&mut OsRng);
let commitments = network.round(commitments).await.map_err(|e| SignError::NetworkError(e))?;
let (attempt, share) = attempt.sign(commitments, b"").map_err(|e| SignError::FrostError(e))?;
let shares = network.round(share).await.map_err(|e| SignError::NetworkError(e))?;
let tx = attempt.complete(shares).map_err(|e| SignError::FrostError(e))?;
self.coin.publish_transaction(&tx).await.map_err(|e| SignError::CoinError(e))
}
}