Misc continuances on the Monero processor

This commit is contained in:
Luke Parker 2024-09-13 00:48:57 -04:00
parent 02409c5735
commit 2c4c33e632
9 changed files with 46 additions and 126 deletions

2
Cargo.lock generated
View file

@ -5105,7 +5105,6 @@ dependencies = [
"hex",
"modular-frost",
"monero-address",
"monero-clsag",
"monero-rpc",
"monero-serai",
"monero-simple-request-rpc",
@ -8524,6 +8523,7 @@ dependencies = [
"hex",
"log",
"modular-frost",
"monero-simple-request-rpc",
"monero-wallet",
"parity-scale-codec",
"rand_core",

View file

@ -21,7 +21,9 @@ pub(crate) struct Rpc<D: Db> {
#[async_trait::async_trait]
impl<D: Db> ScannerFeed for Rpc<D> {
const NETWORK: NetworkId = NetworkId::Bitcoin;
// 6 confirmations is widely accepted as secure and shouldn't occur
const CONFIRMATIONS: u64 = 6;
// The window length should be roughly an hour
const WINDOW_LENGTH: u64 = 6;
const TEN_MINUTES: u64 = 1;

View file

@ -43,7 +43,11 @@ pub trait KeyGenParams {
>;
/// Tweaks keys as necessary/beneficial.
fn tweak_keys(keys: &mut ThresholdKeys<Self::ExternalNetworkCiphersuite>);
///
/// A default implementation which doesn't perform any tweaking is provided.
fn tweak_keys(keys: &mut ThresholdKeys<Self::ExternalNetworkCiphersuite>) {
let _ = keys;
}
/// Encode keys as optimal.
///

View file

@ -31,6 +31,7 @@ dkg = { path = "../../crypto/dkg", default-features = false, features = ["std",
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false }
monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig"] }
monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request", default-features = false }
serai-client = { path = "../../substrate/client", default-features = false, features = ["monero"] }

View file

@ -1,11 +1,8 @@
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ed25519};
use frost::ThresholdKeys;
use ciphersuite::Ed25519;
pub(crate) struct KeyGenParams;
impl key_gen::KeyGenParams for KeyGenParams {
const ID: &'static str = "Monero";
type ExternalNetworkCiphersuite = Ed25519;
fn tweak_keys(keys: &mut ThresholdKeys<Self::ExternalNetworkCiphersuite>) {}
}

View file

@ -11,11 +11,11 @@ use monero_wallet::rpc::Rpc as MRpc;
mod primitives;
pub(crate) use crate::primitives::*;
/*
mod key_gen;
use crate::key_gen::KeyGenParams;
mod rpc;
use rpc::Rpc;
/*
mod scheduler;
use scheduler::Scheduler;

View file

@ -1,81 +1,43 @@
use bitcoin_serai::rpc::{RpcError, Rpc as BRpc};
use monero_wallet::rpc::{RpcError, Rpc as RpcTrait};
use monero_simple_request_rpc::SimpleRequestRpc;
use serai_client::primitives::{NetworkId, Coin, Amount};
use serai_db::Db;
use scanner::ScannerFeed;
use signers::TransactionPublisher;
use crate::{
db,
transaction::Transaction,
block::{BlockHeader, Block},
};
#[derive(Clone)]
pub(crate) struct Rpc<D: Db> {
pub(crate) db: D,
pub(crate) rpc: BRpc,
pub(crate) struct Rpc {
pub(crate) rpc: SimpleRequestRpc,
}
#[async_trait::async_trait]
impl<D: Db> ScannerFeed for Rpc<D> {
const NETWORK: NetworkId = NetworkId::Bitcoin;
const CONFIRMATIONS: u64 = 6;
const WINDOW_LENGTH: u64 = 6;
impl ScannerFeed for Rpc {
const NETWORK: NetworkId = NetworkId::Monero;
// Outputs aren't spendable until 10 blocks later due to the 10-block lock
// Since we assumed scanned outputs are spendable, that sets a minimum confirmation depth of 10
// A 10-block reorganization hasn't been observed in years and shouldn't occur
const CONFIRMATIONS: u64 = 10;
// The window length should be roughly an hour
const WINDOW_LENGTH: u64 = 30;
const TEN_MINUTES: u64 = 1;
const TEN_MINUTES: u64 = 5;
type Block = Block<D>;
type Block = Block;
type EphemeralError = RpcError;
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError> {
db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError)
Ok(self.rpc.get_height().await?.checked_sub(1).expect("connected to an invalid Monero RPC").try_into().unwrap())
}
async fn time_of_block(&self, number: u64) -> Result<u64, Self::EphemeralError> {
let number = usize::try_from(number).unwrap();
/*
The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the
median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve
CLTV). This creates a monotonic median time which we use as the block time.
*/
// This implements `GetMedianTimePast`
let median = {
const MEDIAN_TIMESPAN: usize = 11;
let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN);
for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number {
timestamps.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time);
}
timestamps.sort();
timestamps[timestamps.len() / 2]
};
/*
This block's timestamp is guaranteed to be greater than this median:
https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9
/src/validation.cpp#L4182-L4184
This does not guarantee the median always increases however. Take the following trivial
example, as the window is initially built:
0 block has time 0 // Prior blocks: []
1 block has time 1 // Prior blocks: [0]
2 block has time 2 // Prior blocks: [0, 1]
3 block has time 2 // Prior blocks: [0, 1, 2]
These two blocks have the same time (both greater than the median of their prior blocks) and
the same median.
The median will never decrease however. The values pushed onto the window will always be
greater than the median. If a value greater than the median is popped, the median will remain
the same (due to the counterbalance of the pushed value). If a value less than the median is
popped, the median will increase (either to another instance of the same value, yet one
closer to the end of the repeating sequence, or to a higher value).
*/
Ok(median.into())
todo!("TODO")
}
async fn unchecked_block_header_by_number(
@ -83,7 +45,7 @@ impl<D: Db> ScannerFeed for Rpc<D> {
number: u64,
) -> Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError> {
Ok(BlockHeader(
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?.header,
self.rpc.get_block_by_number(number.try_into().unwrap()).await?
))
}
@ -91,48 +53,13 @@ impl<D: Db> ScannerFeed for Rpc<D> {
&self,
number: u64,
) -> Result<Self::Block, Self::EphemeralError> {
Ok(Block(
self.db.clone(),
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?,
))
todo!("TODO")
}
fn dust(coin: Coin) -> Amount {
assert_eq!(coin, Coin::Bitcoin);
assert_eq!(coin, Coin::Monero);
/*
A Taproot input is:
- 36 bytes for the OutPoint
- 0 bytes for the script (+1 byte for the length)
- 4 bytes for the sequence
Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format
There's also:
- 1 byte for the witness length
- 1 byte for the signature length
- 64 bytes for the signature
which have the SegWit discount.
(4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units
230 ceil div 4 = 57 vbytes
Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are:
- 1000 sat/kilo-vbyte for a transaction to be relayed
- Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte
The DUST constant needs to be determined by the latter.
Since these are solely relay rules, and may be raised, we require all outputs be spendable
under a 5000 sat/kilo-vbyte fee rate.
5000 sat/kilo-vbyte = 5 sat/vbyte
5 * 57 = 285 sats/spent-output
Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding
100 vbytes, tripling the transaction size, then the sats/tx would be < 1000.
Increase by an order of magnitude, in order to ensure this is actually worth our time, and we
get 10,000 satoshis. This is $5 if 1 BTC = 50,000 USD.
*/
Amount(10_000)
todo!("TODO")
}
async fn cost_to_aggregate(
@ -147,10 +74,10 @@ impl<D: Db> ScannerFeed for Rpc<D> {
}
#[async_trait::async_trait]
impl<D: Db> TransactionPublisher<Transaction> for Rpc<D> {
impl TransactionPublisher<Transaction> for Rpc {
type EphemeralError = RpcError;
async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> {
self.rpc.send_raw_transaction(&tx.0).await.map(|_| ())
self.rpc.publish_transaction(&tx.0).await
}
}

View file

@ -14,7 +14,6 @@ use serai_db::Db;
use primitives::{OutputType, ReceivedOutput, Payment};
use scanner::{KeyFor, AddressFor, OutputFor, BlockFor};
use utxo_scheduler::{PlannedTransaction, TransactionPlanner};
use transaction_chaining_scheduler::{EffectedReceivedOutputs, Scheduler as GenericScheduler};
use crate::{
scan::{offsets_for_key, scanner},
@ -40,11 +39,11 @@ fn signable_transaction<D: Db>(
) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> {
assert!(
inputs.len() <
<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::MAX_INPUTS
<Planner as TransactionPlanner<Rpc<D>, ()>>::MAX_INPUTS
);
assert!(
(payments.len() + usize::from(u8::from(change.is_some()))) <
<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::MAX_OUTPUTS
<Planner as TransactionPlanner<Rpc<D>, ()>>::MAX_OUTPUTS
);
let inputs = inputs.into_iter().map(|input| input.output).collect::<Vec<_>>();
@ -73,7 +72,7 @@ fn signable_transaction<D: Db>(
));
let change = change
.map(<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::change_address);
.map(<Planner as TransactionPlanner<Rpc<D>, ()>>::change_address);
BSignableTransaction::new(
inputs.clone(),
@ -90,7 +89,7 @@ fn signable_transaction<D: Db>(
}
pub(crate) struct Planner;
impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Planner {
impl TransactionPlanner<Rpc, ()> for Planner {
type FeeRate = u64;
type SignableTransaction = SignableTransaction;
@ -157,7 +156,7 @@ impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Plan
inputs: Vec<OutputFor<Rpc<D>>>,
payments: Vec<Payment<AddressFor<Rpc<D>>>>,
change: Option<KeyFor<Rpc<D>>>,
) -> PlannedTransaction<Rpc<D>, Self::SignableTransaction, EffectedReceivedOutputs<Rpc<D>>> {
) -> PlannedTransaction<Rpc<D>, Self::SignableTransaction, ()> {
let key = inputs.first().unwrap().key();
for input in &inputs {
assert_eq!(key, input.key());
@ -168,23 +167,7 @@ impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Plan
Ok(tx) => PlannedTransaction {
signable: tx.0,
eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output },
auxilliary: EffectedReceivedOutputs({
let tx = tx.1.transaction();
let scanner = scanner(key);
let mut res = vec![];
for output in scanner.scan_transaction(tx) {
res.push(Output::new_with_presumed_origin(
key,
tx,
// It shouldn't matter if this is wrong as we should never try to return these
// We still provide an accurate value to ensure a lack of discrepancies
Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()),
output,
));
}
res
}),
auxilliary: (),
},
Err(
TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment,
@ -202,4 +185,4 @@ impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Plan
}
}
pub(crate) type Scheduler<D> = GenericScheduler<Rpc<D>, Planner>;
pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler<Rpc, Planner>;

View file

@ -67,6 +67,12 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone {
/// The amount of confirmations a block must have to be considered finalized.
///
/// This value must be at least `1`.
// This is distinct from `WINDOW_LENGTH` as it's only used for determining the lifetime of the
// key. The key switches to various stages of its lifetime depending on when user transactions
// will hit the Serai network (relative to the time they're made) and when outputs created by
// Serai become available again. If we set a long WINDOW_LENGTH, say two hours, that doesn't mean
// we expect user transactions made within a few minutes of a new key being declared to only
// appear in finalized blocks two hours later.
const CONFIRMATIONS: u64;
/// The amount of blocks to process in parallel.