Implement Bulletproofs in Rust (#69)

* Initial attempt at Bulletproofs

I don't know why this doesn't work. The generators and hash_cache lines
up without issue. AFAICT, the inner product proof is valid as well, as
are all included formulas.

* Add yinvpow asserts

* Clean code

* Correct bad imports

* Fix the definition of TWO_N

Bulletproofs work now :D

* Tidy up a bit

* fmt + clippy

* Compile a variety of XMR dependencies with optimizations, even under dev

The Rust bulletproof implementation is 8% slower than C right now, under 
release. This is acceptable, even if suboptimal. Under debug, they take 
a quarter of a second to two seconds though, depending on the amount of 
outputs, which justifies this move.

* Remove unnecessary deref in BPs
This commit is contained in:
Luke Parker 2022-07-26 02:05:15 -05:00 committed by GitHub
parent 3711e13009
commit ee29f6d6d8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 382 additions and 45 deletions

1
Cargo.lock generated
View file

@ -4581,6 +4581,7 @@ dependencies = [
"modular-frost",
"monero",
"monero-epee-bin-serde",
"multiexp",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rand_core 0.6.3",

View file

@ -21,6 +21,22 @@ members = [
"contracts/multisig",
]
# Always compile Monero (and a variety of dependencies) with optimizations due
# to the unoptimized performance of Bulletproofs
[profile.dev.package]
subtle = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
ff = { opt-level = 3 }
group = { opt-level = 3 }
crypto-bigint = { opt-level = 3 }
dalek-ff-group = { opt-level = 3 }
multiexp = { opt-level = 3 }
monero-serai = { opt-level = 3 }
[profile.release]
panic = "unwind"

View file

@ -28,6 +28,7 @@ curve25519-dalek = { version = "3", features = ["std"] }
group = { version = "0.12" }
dalek-ff-group = { path = "../../crypto/dalek-ff-group" }
multiexp = { path = "../../crypto/multiexp" }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", features = ["recommended"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true }

View file

@ -0,0 +1,235 @@
// Required to be for this entire file, which isn't an issue, as it wouldn't bind to the static
#![allow(non_upper_case_globals)]
use lazy_static::lazy_static;
use rand_core::{RngCore, CryptoRng};
use group::{ff::Field, Group};
use dalek_ff_group::{Scalar, EdwardsPoint};
use multiexp::multiexp;
use crate::{
H as DALEK_H, Commitment, random_scalar as dalek_random, hash, hash_to_scalar as dalek_hash,
ringct::{
hash_to_point::raw_hash_to_point,
bulletproofs::{scalar_vector::*, Bulletproofs},
},
serialize::write_varint,
};
pub(crate) const MAX_M: usize = 16;
pub(crate) const MAX_N: usize = 64;
const MAX_MN: usize = MAX_M * MAX_N;
// Wrap random_scalar and hash_to_scalar into dalek_ff_group
fn random_scalar<R: RngCore + CryptoRng>(rng: &mut R) -> Scalar {
Scalar(dalek_random(rng))
}
fn hash_to_scalar(data: &[u8]) -> Scalar {
let scalar = Scalar(dalek_hash(data));
// Monero will explicitly retry on these cases, as them occurring breaks the proof
// This library acknowledges their practical impossibility of them occurring, and doesn't bother
// to code in logic to handle it. That said, if they ever occur, something must happen in order
// to not generate a proof we believe to be valid when it isn't
assert!(!bool::from(scalar.is_zero()), "ZERO HASH: {:?}", data);
scalar
}
fn generator(i: usize) -> EdwardsPoint {
let mut transcript = (*H).compress().to_bytes().to_vec();
transcript.extend(b"bulletproof");
write_varint(&i.try_into().unwrap(), &mut transcript).unwrap();
EdwardsPoint(raw_hash_to_point(hash(&transcript)))
}
lazy_static! {
static ref INV_EIGHT: Scalar = Scalar::from(8u8).invert().unwrap();
static ref H: EdwardsPoint = EdwardsPoint(*DALEK_H);
pub(crate) static ref ONE_N: ScalarVector = ScalarVector(vec![Scalar::one(); MAX_N]);
pub(crate) static ref TWO_N: ScalarVector = ScalarVector::powers(Scalar::from(2u8), MAX_N);
pub(crate) static ref IP12: Scalar = inner_product(&ONE_N, &TWO_N);
static ref H_i: Vec<EdwardsPoint> = (0 .. MAX_MN).map(|g| generator(g * 2)).collect();
static ref G_i: Vec<EdwardsPoint> = (0 .. MAX_MN).map(|g| generator((g * 2) + 1)).collect();
}
pub(crate) fn vector_exponent(a: &ScalarVector, b: &ScalarVector) -> EdwardsPoint {
assert_eq!(a.len(), b.len());
(a * &G_i[.. a.len()]) + (b * &H_i[.. b.len()])
}
fn hash_cache(cache: &mut Scalar, mash: &[[u8; 32]]) -> Scalar {
let slice =
&[cache.to_bytes().as_ref(), mash.iter().cloned().flatten().collect::<Vec<_>>().as_ref()]
.concat();
*cache = hash_to_scalar(slice);
*cache
}
pub(crate) fn prove<R: RngCore + CryptoRng>(
rng: &mut R,
commitments: &[Commitment],
) -> Bulletproofs {
let sv = ScalarVector(commitments.iter().cloned().map(|c| Scalar::from(c.amount)).collect());
let gamma = ScalarVector(commitments.iter().cloned().map(|c| Scalar(c.mask)).collect());
let logN = 6;
let N = 1 << logN;
assert_eq!(N, 64);
let mut logM = 0;
let mut M;
while {
M = 1 << logM;
(M <= MAX_M) && (M < sv.len())
} {
logM += 1;
}
let logMN = logM + logN;
let MN = M * N;
let mut aL = ScalarVector::new(MN);
let mut aR = ScalarVector::new(MN);
for j in 0 .. M {
for i in (0 .. N).rev() {
if (j < sv.len()) && ((sv[j][i / 8] & (1u8 << (i % 8))) != 0) {
aL.0[(j * N) + i] = Scalar::one();
} else {
aR.0[(j * N) + i] = -Scalar::one();
}
}
}
// Commitments * INV_EIGHT
let V = commitments.iter().map(|c| EdwardsPoint(c.calculate()) * *INV_EIGHT).collect::<Vec<_>>();
let mut cache =
hash_to_scalar(&V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>());
let alpha = random_scalar(&mut *rng);
let A = (vector_exponent(&aL, &aR) + (EdwardsPoint::generator() * alpha)) * *INV_EIGHT;
let (sL, sR) =
ScalarVector((0 .. (MN * 2)).map(|_| random_scalar(&mut *rng)).collect::<Vec<_>>()).split();
let rho = random_scalar(&mut *rng);
let S = (vector_exponent(&sL, &sR) + (EdwardsPoint::generator() * rho)) * *INV_EIGHT;
let y = hash_cache(&mut cache, &[A.compress().to_bytes(), S.compress().to_bytes()]);
let mut cache = hash_to_scalar(&y.to_bytes());
let z = cache;
let l0 = &aL - z;
let l1 = sL;
let mut zero_twos = Vec::with_capacity(MN);
let zpow = ScalarVector::powers(z, M + 2);
for j in 0 .. M {
for i in 0 .. N {
zero_twos.push(zpow[j + 2] * TWO_N[i]);
}
}
let yMN = ScalarVector::powers(y, MN);
let r0 = (&(aR + z) * &yMN) + ScalarVector(zero_twos);
let r1 = yMN * sR;
let t1 = inner_product(&l0, &r1) + inner_product(&l1, &r0);
let t2 = inner_product(&l1, &r1);
let tau1 = random_scalar(&mut *rng);
let tau2 = random_scalar(&mut *rng);
let T1 = multiexp(&[(t1, *H), (tau1, EdwardsPoint::generator())]) * *INV_EIGHT;
let T2 = multiexp(&[(t2, *H), (tau2, EdwardsPoint::generator())]) * *INV_EIGHT;
let x =
hash_cache(&mut cache, &[z.to_bytes(), T1.compress().to_bytes(), T2.compress().to_bytes()]);
let mut taux = (tau2 * (x * x)) + (tau1 * x);
for i in 1 ..= sv.len() {
taux += zpow[i + 1] * gamma[i - 1];
}
let mu = (x * rho) + alpha;
let l = &l0 + &(l1 * x);
let r = &r0 + &(r1 * x);
let t = inner_product(&l, &r);
let x_ip = hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
let mut a = l;
let mut b = r;
let yinv = y.invert().unwrap();
let yinvpow = ScalarVector::powers(yinv, MN);
let mut G_proof = G_i[.. a.len()].to_vec();
let mut H_proof = H_i[.. a.len()].to_vec();
H_proof.iter_mut().zip(yinvpow.0.iter()).for_each(|(this_H, yinvpow)| *this_H *= yinvpow);
let U = *H * x_ip;
let mut L = Vec::with_capacity(logMN);
let mut R = Vec::with_capacity(logMN);
while a.len() != 1 {
let (aL, aR) = a.split();
let (bL, bR) = b.split();
let cL = inner_product(&aL, &bR);
let cR = inner_product(&aR, &bL);
let (G_L, G_R) = G_proof.split_at(aL.len());
let (H_L, H_R) = H_proof.split_at(aL.len());
let mut L_i_s = aL
.0
.iter()
.cloned()
.zip(G_R.iter().cloned())
.chain(bR.0.iter().cloned().zip(H_L.iter().cloned()))
.collect::<Vec<_>>();
L_i_s.push((cL, U));
let L_i = multiexp(&L_i_s) * *INV_EIGHT;
let mut R_i_s = aR
.0
.iter()
.cloned()
.zip(G_L.iter().cloned())
.chain(bL.0.iter().cloned().zip(H_R.iter().cloned()))
.collect::<Vec<_>>();
R_i_s.push((cR, U));
let R_i = multiexp(&R_i_s) * *INV_EIGHT;
L.push(L_i);
R.push(R_i);
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
let winv = w.invert().unwrap();
a = (aL * w) + (aR * winv);
b = (bL * winv) + (bR * w);
if a.len() != 1 {
G_proof = hadamard_fold(G_L, G_R, winv, w);
H_proof = hadamard_fold(H_L, H_R, w, winv);
}
}
Bulletproofs {
A: *A,
S: *S,
T1: *T1,
T2: *T2,
taux: *taux,
mu: *mu,
L: L.drain(..).map(|L| *L).collect(),
R: R.drain(..).map(|R| *R).collect(),
a: *a[0],
b: *b[0],
t: *t,
}
}

View file

@ -6,7 +6,13 @@ use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
use crate::{Commitment, wallet::TransactionError, serialize::*};
pub(crate) const MAX_OUTPUTS: usize = 16;
pub(crate) mod scalar_vector;
mod core;
pub(crate) use self::core::MAX_M;
use self::core::prove;
pub(crate) const MAX_OUTPUTS: usize = MAX_M;
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Bulletproofs {
@ -45,44 +51,7 @@ impl Bulletproofs {
if outputs.len() > MAX_OUTPUTS {
return Err(TransactionError::TooManyOutputs)?;
}
let mut seed = [0; 32];
rng.fill_bytes(&mut seed);
let masks = outputs.iter().map(|commitment| commitment.mask.to_bytes()).collect::<Vec<_>>();
let amounts = outputs.iter().map(|commitment| commitment.amount).collect::<Vec<_>>();
let res;
unsafe {
#[link(name = "wrapper")]
extern "C" {
fn free(ptr: *const u8);
fn c_generate_bp(
seed: *const u8,
len: u8,
amounts: *const u64,
masks: *const [u8; 32],
) -> *const u8;
}
let ptr = c_generate_bp(
seed.as_ptr(),
u8::try_from(outputs.len()).unwrap(),
amounts.as_ptr(),
masks.as_ptr(),
);
let mut len = 6 * 32;
len += (2 * (1 + (usize::from(ptr.add(len).read()) * 32))) + (3 * 32);
res = Bulletproofs::deserialize(
// Wrap in a cursor to provide a mutable Reader
&mut std::io::Cursor::new(std::slice::from_raw_parts(ptr, len)),
)
.expect("Couldn't deserialize Bulletproofs from Monero");
free(ptr);
};
Ok(res)
Ok(prove(rng, outputs))
}
#[must_use]

View file

@ -0,0 +1,106 @@
use core::ops::{Add, Sub, Mul, Index};
use group::ff::Field;
use dalek_ff_group::{Scalar, EdwardsPoint};
use multiexp::multiexp;
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
macro_rules! math_op {
($Op: ident, $op: ident, $f: expr) => {
impl $Op<Scalar> for ScalarVector {
type Output = ScalarVector;
fn $op(self, b: Scalar) -> ScalarVector {
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
}
}
impl $Op<Scalar> for &ScalarVector {
type Output = ScalarVector;
fn $op(self, b: Scalar) -> ScalarVector {
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
}
}
impl $Op<ScalarVector> for ScalarVector {
type Output = ScalarVector;
fn $op(self, b: ScalarVector) -> ScalarVector {
assert_eq!(self.len(), b.len());
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
}
}
impl $Op<&ScalarVector> for &ScalarVector {
type Output = ScalarVector;
fn $op(self, b: &ScalarVector) -> ScalarVector {
assert_eq!(self.len(), b.len());
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
}
}
};
}
math_op!(Add, add, |(a, b): (&Scalar, &Scalar)| *a + *b);
math_op!(Sub, sub, |(a, b): (&Scalar, &Scalar)| *a - *b);
math_op!(Mul, mul, |(a, b): (&Scalar, &Scalar)| *a * *b);
impl ScalarVector {
pub(crate) fn new(len: usize) -> ScalarVector {
ScalarVector(vec![Scalar::zero(); len])
}
pub(crate) fn powers(x: Scalar, len: usize) -> ScalarVector {
let mut res = Vec::with_capacity(len);
if len == 0 {
return ScalarVector(res);
}
res.push(Scalar::one());
for i in 1 .. len {
res.push(res[i - 1] * x);
}
ScalarVector(res)
}
pub(crate) fn len(&self) -> usize {
self.0.len()
}
pub(crate) fn split(self) -> (ScalarVector, ScalarVector) {
let (l, r) = self.0.split_at(self.0.len() / 2);
(ScalarVector(l.to_vec()), ScalarVector(r.to_vec()))
}
}
impl Index<usize> for ScalarVector {
type Output = Scalar;
fn index(&self, index: usize) -> &Scalar {
&self.0[index]
}
}
pub(crate) fn inner_product(a: &ScalarVector, b: &ScalarVector) -> Scalar {
(a * b).0.drain(..).sum()
}
impl Mul<&[EdwardsPoint]> for &ScalarVector {
type Output = EdwardsPoint;
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
assert_eq!(self.len(), b.len());
multiexp(&self.0.iter().cloned().zip(b.iter().cloned()).collect::<Vec<_>>())
}
}
pub(crate) fn hadamard_fold(
l: &[EdwardsPoint],
r: &[EdwardsPoint],
a: Scalar,
b: Scalar,
) -> Vec<EdwardsPoint> {
let mut res = Vec::with_capacity(l.len() / 2);
for i in 0 .. l.len() {
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
}
res
}

View file

@ -7,12 +7,11 @@ use dalek_ff_group::field::FieldElement;
use crate::hash;
pub fn hash_to_point(point: EdwardsPoint) -> EdwardsPoint {
let mut bytes = point.compress().to_bytes();
pub(crate) fn raw_hash_to_point(mut bytes: [u8; 32]) -> EdwardsPoint {
unsafe {
#[link(name = "wrapper")]
extern "C" {
fn c_hash_to_point(point: *const u8);
fn c_hash_to_point(key: *const u8);
}
c_hash_to_point(bytes.as_mut_ptr());
@ -24,11 +23,11 @@ pub fn hash_to_point(point: EdwardsPoint) -> EdwardsPoint {
// for all branches, there still could be *some* discrepancy somewhere. There's no reason to use it
// unless we're trying to purge that section of the C static library, which we aren't right now
#[allow(dead_code)]
pub(crate) fn rust_hash_to_point(key: EdwardsPoint) -> EdwardsPoint {
pub(crate) fn rust_hash_to_point(bytes: [u8; 32]) -> EdwardsPoint {
#[allow(non_snake_case)]
let A = FieldElement::from(486662u64);
let v = FieldElement::from_square(hash(&key.compress().to_bytes())).double();
let v = FieldElement::from_square(hash(&bytes)).double();
let w = v + FieldElement::one();
let x = w.square() + (-A.square() * v);
@ -65,3 +64,7 @@ pub(crate) fn rust_hash_to_point(key: EdwardsPoint) -> EdwardsPoint {
CompressedEdwardsY(bytes).decompress().unwrap().mul_by_cofactor()
}
pub fn hash_to_point(key: EdwardsPoint) -> EdwardsPoint {
raw_hash_to_point(key.compress().to_bytes())
}

View file

@ -11,6 +11,6 @@ use crate::{
fn hash_to_point() {
for _ in 0 .. 50 {
let point = &random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE;
assert_eq!(rust_hash_to_point(point), c_hash_to_point(point));
assert_eq!(rust_hash_to_point(point.compress().to_bytes()), c_hash_to_point(point));
}
}

View file

@ -264,6 +264,12 @@ impl PrimeFieldBits for Scalar {
}
}
impl Sum<Scalar> for Scalar {
fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
Self(DScalar::sum(iter))
}
}
macro_rules! dalek_group {
(
$Point: ident,