2022-06-21 03:00:49 +00:00
|
|
|
use std::{sync::Mutex, collections::HashSet};
|
2022-05-04 10:24:52 +00:00
|
|
|
|
2022-05-04 12:18:43 +00:00
|
|
|
use lazy_static::lazy_static;
|
|
|
|
|
2022-05-04 10:24:52 +00:00
|
|
|
use rand_core::{RngCore, CryptoRng};
|
2022-05-04 12:18:43 +00:00
|
|
|
use rand_distr::{Distribution, Gamma};
|
2022-05-04 10:24:52 +00:00
|
|
|
|
Utilize zeroize (#76)
* Apply Zeroize to nonces used in Bulletproofs
Also makes bit decomposition constant time for a given amount of
outputs.
* Fix nonce reuse for single-signer CLSAG
* Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data
* Zeroize private keys and nonces
* Merge prepare_outputs and prepare_transactions
* Ensure CLSAG is constant time
* Pass by borrow where needed, bug fixes
The past few commitments have been one in-progress chunk which I've
broken up as best read.
* Add Zeroize to FROST structs
Still needs to zeroize internally, yet next step. Not quite as
aggressive as Monero, partially due to the limitations of HashMaps,
partially due to less concern about metadata, yet does still delete a
few smaller items of metadata (group key, context string...).
* Remove Zeroize from most Monero multisig structs
These structs largely didn't have private data, just fields with private
data, yet those fields implemented ZeroizeOnDrop making them already
covered. While there is still traces of the transaction left in RAM,
fully purging that was never the intent.
* Use Zeroize within dleq
bitvec doesn't offer Zeroize, so a manual zeroing has been implemented.
* Use Zeroize for random_nonce
It isn't perfect, due to the inability to zeroize the digest, and due to
kp256 requiring a few transformations. It does the best it can though.
Does move the per-curve random_nonce to a provided one, which is allowed
as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231.
* Use Zeroize on FROST keygen/signing
* Zeroize constant time multiexp.
* Correct when FROST keygen zeroizes
* Move the FROST keys Arc into FrostKeys
Reduces amount of instances in memory.
* Manually implement Debug for FrostCore to not leak the secret share
* Misc bug fixes
* clippy + multiexp test bug fixes
* Correct FROST key gen share summation
It leaked our own share for ourself.
* Fix cross-group DLEq tests
2022-08-03 08:25:18 +00:00
|
|
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
|
|
|
|
2022-05-04 10:24:52 +00:00
|
|
|
use curve25519_dalek::edwards::EdwardsPoint;
|
|
|
|
|
2022-07-15 05:26:07 +00:00
|
|
|
use crate::{
|
|
|
|
wallet::SpendableOutput,
|
|
|
|
rpc::{RpcError, Rpc},
|
|
|
|
};
|
2022-05-04 10:24:52 +00:00
|
|
|
|
2022-05-04 12:18:43 +00:00
|
|
|
const LOCK_WINDOW: usize = 10;
|
2022-05-13 04:05:34 +00:00
|
|
|
const MATURITY: u64 = 60;
|
2022-05-04 12:18:43 +00:00
|
|
|
const RECENT_WINDOW: usize = 15;
|
|
|
|
const BLOCK_TIME: usize = 120;
|
|
|
|
const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME;
|
|
|
|
const TIP_APPLICATION: f64 = (LOCK_WINDOW * BLOCK_TIME) as f64;
|
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
static ref GAMMA: Gamma<f64> = Gamma::new(19.28, 1.0 / 1.61).unwrap();
|
2023-02-15 06:56:36 +00:00
|
|
|
// TODO: Expose an API to reset this in case a reorg occurs/the RPC fails/returns garbage
|
|
|
|
// TODO: This is not currently thread-safe. This needs to be a tokio Mutex held by select until
|
|
|
|
// it returns
|
2023-02-16 01:44:53 +00:00
|
|
|
// TODO: Update this when scanning a block, as possible.
|
2022-06-21 03:10:13 +00:00
|
|
|
static ref DISTRIBUTION: Mutex<Vec<u64>> = Mutex::new(Vec::with_capacity(3000000));
|
2022-05-04 12:18:43 +00:00
|
|
|
}
|
|
|
|
|
2022-08-21 09:13:07 +00:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2022-05-14 06:12:54 +00:00
|
|
|
async fn select_n<R: RngCore + CryptoRng>(
|
2022-05-04 10:24:52 +00:00
|
|
|
rng: &mut R,
|
|
|
|
rpc: &Rpc,
|
|
|
|
height: usize,
|
|
|
|
high: u64,
|
2022-05-04 12:18:43 +00:00
|
|
|
per_second: f64,
|
2022-08-21 09:13:07 +00:00
|
|
|
real: &[u64],
|
2022-05-14 06:12:54 +00:00
|
|
|
used: &mut HashSet<u64>,
|
2022-07-15 05:26:07 +00:00
|
|
|
count: usize,
|
2022-05-14 06:12:54 +00:00
|
|
|
) -> Result<Vec<(u64, [EdwardsPoint; 2])>, RpcError> {
|
2022-06-10 04:32:56 +00:00
|
|
|
let mut iters = 0;
|
2022-05-14 06:12:54 +00:00
|
|
|
let mut confirmed = Vec::with_capacity(count);
|
2022-08-21 09:13:07 +00:00
|
|
|
// Retries on failure. Retries are obvious as decoys, yet should be minimal
|
2022-05-14 06:12:54 +00:00
|
|
|
while confirmed.len() != count {
|
|
|
|
let remaining = count - confirmed.len();
|
|
|
|
let mut candidates = Vec::with_capacity(remaining);
|
|
|
|
while candidates.len() != remaining {
|
2022-06-10 04:32:56 +00:00
|
|
|
iters += 1;
|
|
|
|
// This is cheap and on fresh chains, thousands of rounds may be needed
|
|
|
|
if iters == 10000 {
|
2022-12-09 14:50:00 +00:00
|
|
|
Err(RpcError::InternalError("not enough decoy candidates"))?;
|
2022-06-10 04:32:56 +00:00
|
|
|
}
|
|
|
|
|
2022-05-14 06:12:54 +00:00
|
|
|
// Use a gamma distribution
|
|
|
|
let mut age = GAMMA.sample(rng).exp();
|
|
|
|
if age > TIP_APPLICATION {
|
|
|
|
age -= TIP_APPLICATION;
|
|
|
|
} else {
|
|
|
|
// f64 does not have try_from available, which is why these are written with `as`
|
|
|
|
age = (rng.next_u64() % u64::try_from(RECENT_WINDOW * BLOCK_TIME).unwrap()) as f64;
|
|
|
|
}
|
2022-05-04 12:18:43 +00:00
|
|
|
|
2022-05-14 06:12:54 +00:00
|
|
|
let o = (age * per_second) as u64;
|
|
|
|
if o < high {
|
2022-06-24 12:41:05 +00:00
|
|
|
let distribution = DISTRIBUTION.lock().unwrap();
|
2022-05-14 06:12:54 +00:00
|
|
|
let i = distribution.partition_point(|s| *s < (high - 1 - o));
|
|
|
|
let prev = i.saturating_sub(1);
|
|
|
|
let n = distribution[i] - distribution[prev];
|
|
|
|
if n != 0 {
|
|
|
|
let o = distribution[prev] + (rng.next_u64() % n);
|
|
|
|
if !used.contains(&o) {
|
|
|
|
// It will either actually be used, or is unusable and this prevents trying it again
|
|
|
|
used.insert(o);
|
|
|
|
candidates.push(o);
|
|
|
|
}
|
2022-05-13 04:05:34 +00:00
|
|
|
}
|
2022-05-04 12:18:43 +00:00
|
|
|
}
|
2022-05-04 10:24:52 +00:00
|
|
|
}
|
2022-05-13 04:05:34 +00:00
|
|
|
|
2022-08-21 09:13:07 +00:00
|
|
|
// If this is the first time we're requesting these outputs, include the real one as well
|
|
|
|
// Prevents the node we're connected to from having a list of known decoys and then seeing a
|
|
|
|
// TX which uses all of them, with one additional output (the true spend)
|
|
|
|
let mut real_indexes = HashSet::with_capacity(real.len());
|
|
|
|
if confirmed.is_empty() {
|
|
|
|
for real in real {
|
|
|
|
candidates.push(*real);
|
|
|
|
}
|
|
|
|
// Sort candidates so the real spends aren't the ones at the end
|
|
|
|
candidates.sort();
|
|
|
|
for real in real {
|
|
|
|
real_indexes.insert(candidates.binary_search(real).unwrap());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i, output) in rpc.get_unlocked_outputs(&candidates, height).await?.iter_mut().enumerate() {
|
|
|
|
// Don't include the real spend as a decoy, despite requesting it
|
|
|
|
if real_indexes.contains(&i) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(output) = output.take() {
|
2022-05-14 06:12:54 +00:00
|
|
|
confirmed.push((candidates[i], output));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(confirmed)
|
2022-04-28 07:31:09 +00:00
|
|
|
}
|
|
|
|
|
2022-05-28 07:17:02 +00:00
|
|
|
fn offset(ring: &[u64]) -> Vec<u64> {
|
|
|
|
let mut res = vec![ring[0]];
|
|
|
|
res.resize(ring.len(), 0);
|
|
|
|
for m in (1 .. ring.len()).rev() {
|
|
|
|
res[m] = ring[m] - ring[m - 1];
|
2022-04-28 07:31:09 +00:00
|
|
|
}
|
|
|
|
res
|
|
|
|
}
|
2022-05-04 10:24:52 +00:00
|
|
|
|
2022-09-28 12:44:49 +00:00
|
|
|
/// Decoy data, containing the actual member as well (at index `i`).
|
Utilize zeroize (#76)
* Apply Zeroize to nonces used in Bulletproofs
Also makes bit decomposition constant time for a given amount of
outputs.
* Fix nonce reuse for single-signer CLSAG
* Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data
* Zeroize private keys and nonces
* Merge prepare_outputs and prepare_transactions
* Ensure CLSAG is constant time
* Pass by borrow where needed, bug fixes
The past few commitments have been one in-progress chunk which I've
broken up as best read.
* Add Zeroize to FROST structs
Still needs to zeroize internally, yet next step. Not quite as
aggressive as Monero, partially due to the limitations of HashMaps,
partially due to less concern about metadata, yet does still delete a
few smaller items of metadata (group key, context string...).
* Remove Zeroize from most Monero multisig structs
These structs largely didn't have private data, just fields with private
data, yet those fields implemented ZeroizeOnDrop making them already
covered. While there is still traces of the transaction left in RAM,
fully purging that was never the intent.
* Use Zeroize within dleq
bitvec doesn't offer Zeroize, so a manual zeroing has been implemented.
* Use Zeroize for random_nonce
It isn't perfect, due to the inability to zeroize the digest, and due to
kp256 requiring a few transformations. It does the best it can though.
Does move the per-curve random_nonce to a provided one, which is allowed
as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231.
* Use Zeroize on FROST keygen/signing
* Zeroize constant time multiexp.
* Correct when FROST keygen zeroizes
* Move the FROST keys Arc into FrostKeys
Reduces amount of instances in memory.
* Manually implement Debug for FrostCore to not leak the secret share
* Misc bug fixes
* clippy + multiexp test bug fixes
* Correct FROST key gen share summation
It leaked our own share for ourself.
* Fix cross-group DLEq tests
2022-08-03 08:25:18 +00:00
|
|
|
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
2022-05-06 23:07:37 +00:00
|
|
|
pub struct Decoys {
|
|
|
|
pub i: u8,
|
2022-05-21 19:33:35 +00:00
|
|
|
pub offsets: Vec<u64>,
|
2022-07-15 05:26:07 +00:00
|
|
|
pub ring: Vec<[EdwardsPoint; 2]>,
|
2022-05-06 23:07:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Decoys {
|
|
|
|
pub fn len(&self) -> usize {
|
|
|
|
self.offsets.len()
|
|
|
|
}
|
|
|
|
|
2022-09-28 12:44:49 +00:00
|
|
|
/// Select decoys using the same distribution as Monero.
|
2022-08-21 15:06:17 +00:00
|
|
|
pub async fn select<R: RngCore + CryptoRng>(
|
2022-05-22 00:26:28 +00:00
|
|
|
rng: &mut R,
|
|
|
|
rpc: &Rpc,
|
2022-07-27 09:05:43 +00:00
|
|
|
ring_len: usize,
|
2022-05-22 00:26:28 +00:00
|
|
|
height: usize,
|
2022-07-15 05:26:07 +00:00
|
|
|
inputs: &[SpendableOutput],
|
2022-05-22 00:26:28 +00:00
|
|
|
) -> Result<Vec<Decoys>, RpcError> {
|
2022-07-27 09:05:43 +00:00
|
|
|
let decoy_count = ring_len - 1;
|
|
|
|
|
2022-05-22 00:26:28 +00:00
|
|
|
// Convert the inputs in question to the raw output data
|
2022-08-21 09:13:07 +00:00
|
|
|
let mut real = Vec::with_capacity(inputs.len());
|
2022-05-22 00:26:28 +00:00
|
|
|
let mut outputs = Vec::with_capacity(inputs.len());
|
|
|
|
for input in inputs {
|
2022-08-22 16:15:14 +00:00
|
|
|
real.push(input.global_index);
|
2022-08-22 17:35:49 +00:00
|
|
|
outputs.push((real[real.len() - 1], [input.key(), input.commitment().calculate()]));
|
2022-05-22 00:26:28 +00:00
|
|
|
}
|
2022-05-04 10:24:52 +00:00
|
|
|
|
2022-06-24 12:41:05 +00:00
|
|
|
let distribution_len = {
|
|
|
|
let distribution = DISTRIBUTION.lock().unwrap();
|
|
|
|
distribution.len()
|
|
|
|
};
|
|
|
|
if distribution_len <= height {
|
|
|
|
let extension = rpc.get_output_distribution(distribution_len, height).await?;
|
|
|
|
DISTRIBUTION.lock().unwrap().extend(extension);
|
2022-06-21 03:00:49 +00:00
|
|
|
}
|
2022-06-24 12:41:05 +00:00
|
|
|
|
|
|
|
let high;
|
|
|
|
let per_second;
|
|
|
|
{
|
|
|
|
let mut distribution = DISTRIBUTION.lock().unwrap();
|
|
|
|
// If asked to use an older height than previously asked, truncate to ensure accuracy
|
|
|
|
// Should never happen, yet risks desyncing if it did
|
|
|
|
distribution.truncate(height + 1); // height is inclusive, and 0 is a valid height
|
|
|
|
|
|
|
|
high = distribution[distribution.len() - 1];
|
|
|
|
per_second = {
|
|
|
|
let blocks = distribution.len().min(BLOCKS_PER_YEAR);
|
|
|
|
let outputs = high - distribution[distribution.len().saturating_sub(blocks + 1)];
|
|
|
|
(outputs as f64) / ((blocks * BLOCK_TIME) as f64)
|
|
|
|
};
|
2022-05-22 00:26:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let mut used = HashSet::<u64>::new();
|
|
|
|
for o in &outputs {
|
|
|
|
used.insert(o.0);
|
|
|
|
}
|
2022-05-04 10:24:52 +00:00
|
|
|
|
2022-06-21 03:00:49 +00:00
|
|
|
// TODO: Simply create a TX with less than the target amount
|
2022-07-27 09:05:43 +00:00
|
|
|
if (high - MATURITY) < u64::try_from(inputs.len() * ring_len).unwrap() {
|
2022-12-09 14:50:00 +00:00
|
|
|
Err(RpcError::InternalError("not enough decoy candidates"))?;
|
2022-05-28 07:17:02 +00:00
|
|
|
}
|
2022-05-22 00:26:28 +00:00
|
|
|
|
2022-05-28 07:17:02 +00:00
|
|
|
// Select all decoys for this transaction, assuming we generate a sane transaction
|
2022-07-15 05:26:07 +00:00
|
|
|
// We should almost never naturally generate an insane transaction, hence why this doesn't
|
|
|
|
// bother with an overage
|
|
|
|
let mut decoys =
|
2022-08-21 09:13:07 +00:00
|
|
|
select_n(rng, rpc, height, high, per_second, &real, &mut used, inputs.len() * decoy_count)
|
|
|
|
.await?;
|
|
|
|
real.zeroize();
|
2022-05-28 07:17:02 +00:00
|
|
|
|
|
|
|
let mut res = Vec::with_capacity(inputs.len());
|
|
|
|
for o in outputs {
|
|
|
|
// Grab the decoys for this specific output
|
2022-07-27 09:05:43 +00:00
|
|
|
let mut ring = decoys.drain((decoys.len() - decoy_count) ..).collect::<Vec<_>>();
|
2022-05-28 07:17:02 +00:00
|
|
|
ring.push(o);
|
|
|
|
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
|
|
|
|
|
|
|
// Sanity checks are only run when 1000 outputs are available in Monero
|
|
|
|
// We run this check whenever the highest output index, which we acknowledge, is > 500
|
|
|
|
// This means we assume (for presumably test blockchains) the height being used has not had
|
|
|
|
// 500 outputs since while itself not being a sufficiently mature blockchain
|
|
|
|
// Considering Monero's p2p layer doesn't actually check transaction sanity, it should be
|
|
|
|
// fine for us to not have perfectly matching rules, especially since this code will infinite
|
2022-07-15 05:26:07 +00:00
|
|
|
// loop if it can't determine sanity, which is possible with sufficient inputs on
|
|
|
|
// sufficiently small chains
|
2022-05-28 07:17:02 +00:00
|
|
|
if high > 500 {
|
|
|
|
// Make sure the TX passes the sanity check that the median output is within the last 40%
|
2022-06-21 03:00:49 +00:00
|
|
|
let target_median = high * 3 / 5;
|
2022-07-27 09:05:43 +00:00
|
|
|
while ring[ring_len / 2].0 < target_median {
|
2022-05-22 00:26:28 +00:00
|
|
|
// If it's not, update the bottom half with new values to ensure the median only moves up
|
2022-07-27 09:05:43 +00:00
|
|
|
for removed in ring.drain(0 .. (ring_len / 2)).collect::<Vec<_>>() {
|
2022-05-28 07:17:02 +00:00
|
|
|
// If we removed the real spend, add it back
|
|
|
|
if removed.0 == o.0 {
|
|
|
|
ring.push(o);
|
|
|
|
} else {
|
2022-07-15 05:26:07 +00:00
|
|
|
// We could not remove this, saving CPU time and removing low values as
|
|
|
|
// possibilities, yet it'd increase the amount of decoys required to create this
|
|
|
|
// transaction and some removed outputs may be the best option (as we drop the first
|
|
|
|
// half, not just the bottom n)
|
2022-05-28 07:17:02 +00:00
|
|
|
used.remove(&removed.0);
|
|
|
|
}
|
2022-05-22 00:26:28 +00:00
|
|
|
}
|
2022-05-14 06:12:54 +00:00
|
|
|
|
2022-05-28 07:17:02 +00:00
|
|
|
// Select new outputs until we have a full sized ring again
|
|
|
|
ring.extend(
|
2022-08-21 09:13:07 +00:00
|
|
|
select_n(rng, rpc, height, high, per_second, &[], &mut used, ring_len - ring.len())
|
|
|
|
.await?,
|
2022-05-22 00:26:28 +00:00
|
|
|
);
|
2022-05-28 07:17:02 +00:00
|
|
|
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
2022-05-22 00:26:28 +00:00
|
|
|
}
|
2022-05-04 10:24:52 +00:00
|
|
|
|
2022-07-15 05:26:07 +00:00
|
|
|
// The other sanity check rule is about duplicates, yet we already enforce unique ring
|
|
|
|
// members
|
2022-05-04 10:24:52 +00:00
|
|
|
}
|
2022-05-22 00:26:28 +00:00
|
|
|
|
|
|
|
res.push(Decoys {
|
2022-05-28 07:17:02 +00:00
|
|
|
// Binary searches for the real spend since we don't know where it sorted to
|
|
|
|
i: u8::try_from(ring.partition_point(|x| x.0 < o.0)).unwrap(),
|
|
|
|
offsets: offset(&ring.iter().map(|output| output.0).collect::<Vec<_>>()),
|
2022-07-15 05:26:07 +00:00
|
|
|
ring: ring.iter().map(|output| output.1).collect(),
|
2022-05-22 00:26:28 +00:00
|
|
|
});
|
2022-05-04 10:24:52 +00:00
|
|
|
}
|
|
|
|
|
2022-05-22 00:26:28 +00:00
|
|
|
Ok(res)
|
2022-05-04 10:24:52 +00:00
|
|
|
}
|
|
|
|
}
|