mirror of
https://github.com/serai-dex/serai.git
synced 2025-02-04 04:06:30 +00:00
Misc continuances on the Monero processor
This commit is contained in:
parent
02409c5735
commit
2c4c33e632
9 changed files with 46 additions and 126 deletions
2
Cargo.lock
generated
2
Cargo.lock
generated
|
@ -5105,7 +5105,6 @@ dependencies = [
|
||||||
"hex",
|
"hex",
|
||||||
"modular-frost",
|
"modular-frost",
|
||||||
"monero-address",
|
"monero-address",
|
||||||
"monero-clsag",
|
|
||||||
"monero-rpc",
|
"monero-rpc",
|
||||||
"monero-serai",
|
"monero-serai",
|
||||||
"monero-simple-request-rpc",
|
"monero-simple-request-rpc",
|
||||||
|
@ -8524,6 +8523,7 @@ dependencies = [
|
||||||
"hex",
|
"hex",
|
||||||
"log",
|
"log",
|
||||||
"modular-frost",
|
"modular-frost",
|
||||||
|
"monero-simple-request-rpc",
|
||||||
"monero-wallet",
|
"monero-wallet",
|
||||||
"parity-scale-codec",
|
"parity-scale-codec",
|
||||||
"rand_core",
|
"rand_core",
|
||||||
|
|
|
@ -21,7 +21,9 @@ pub(crate) struct Rpc<D: Db> {
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl<D: Db> ScannerFeed for Rpc<D> {
|
impl<D: Db> ScannerFeed for Rpc<D> {
|
||||||
const NETWORK: NetworkId = NetworkId::Bitcoin;
|
const NETWORK: NetworkId = NetworkId::Bitcoin;
|
||||||
|
// 6 confirmations is widely accepted as secure and shouldn't occur
|
||||||
const CONFIRMATIONS: u64 = 6;
|
const CONFIRMATIONS: u64 = 6;
|
||||||
|
// The window length should be roughly an hour
|
||||||
const WINDOW_LENGTH: u64 = 6;
|
const WINDOW_LENGTH: u64 = 6;
|
||||||
|
|
||||||
const TEN_MINUTES: u64 = 1;
|
const TEN_MINUTES: u64 = 1;
|
||||||
|
|
|
@ -43,7 +43,11 @@ pub trait KeyGenParams {
|
||||||
>;
|
>;
|
||||||
|
|
||||||
/// Tweaks keys as necessary/beneficial.
|
/// Tweaks keys as necessary/beneficial.
|
||||||
fn tweak_keys(keys: &mut ThresholdKeys<Self::ExternalNetworkCiphersuite>);
|
///
|
||||||
|
/// A default implementation which doesn't perform any tweaking is provided.
|
||||||
|
fn tweak_keys(keys: &mut ThresholdKeys<Self::ExternalNetworkCiphersuite>) {
|
||||||
|
let _ = keys;
|
||||||
|
}
|
||||||
|
|
||||||
/// Encode keys as optimal.
|
/// Encode keys as optimal.
|
||||||
///
|
///
|
||||||
|
|
|
@ -31,6 +31,7 @@ dkg = { path = "../../crypto/dkg", default-features = false, features = ["std",
|
||||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false }
|
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false }
|
||||||
|
|
||||||
monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig"] }
|
monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig"] }
|
||||||
|
monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request", default-features = false }
|
||||||
|
|
||||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["monero"] }
|
serai-client = { path = "../../substrate/client", default-features = false, features = ["monero"] }
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,8 @@
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ed25519};
|
use ciphersuite::Ed25519;
|
||||||
use frost::ThresholdKeys;
|
|
||||||
|
|
||||||
pub(crate) struct KeyGenParams;
|
pub(crate) struct KeyGenParams;
|
||||||
impl key_gen::KeyGenParams for KeyGenParams {
|
impl key_gen::KeyGenParams for KeyGenParams {
|
||||||
const ID: &'static str = "Monero";
|
const ID: &'static str = "Monero";
|
||||||
|
|
||||||
type ExternalNetworkCiphersuite = Ed25519;
|
type ExternalNetworkCiphersuite = Ed25519;
|
||||||
|
|
||||||
fn tweak_keys(keys: &mut ThresholdKeys<Self::ExternalNetworkCiphersuite>) {}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,11 +11,11 @@ use monero_wallet::rpc::Rpc as MRpc;
|
||||||
mod primitives;
|
mod primitives;
|
||||||
pub(crate) use crate::primitives::*;
|
pub(crate) use crate::primitives::*;
|
||||||
|
|
||||||
/*
|
|
||||||
mod key_gen;
|
mod key_gen;
|
||||||
use crate::key_gen::KeyGenParams;
|
use crate::key_gen::KeyGenParams;
|
||||||
mod rpc;
|
mod rpc;
|
||||||
use rpc::Rpc;
|
use rpc::Rpc;
|
||||||
|
/*
|
||||||
mod scheduler;
|
mod scheduler;
|
||||||
use scheduler::Scheduler;
|
use scheduler::Scheduler;
|
||||||
|
|
||||||
|
|
|
@ -1,81 +1,43 @@
|
||||||
use bitcoin_serai::rpc::{RpcError, Rpc as BRpc};
|
use monero_wallet::rpc::{RpcError, Rpc as RpcTrait};
|
||||||
|
use monero_simple_request_rpc::SimpleRequestRpc;
|
||||||
|
|
||||||
use serai_client::primitives::{NetworkId, Coin, Amount};
|
use serai_client::primitives::{NetworkId, Coin, Amount};
|
||||||
|
|
||||||
use serai_db::Db;
|
|
||||||
use scanner::ScannerFeed;
|
use scanner::ScannerFeed;
|
||||||
use signers::TransactionPublisher;
|
use signers::TransactionPublisher;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
db,
|
|
||||||
transaction::Transaction,
|
transaction::Transaction,
|
||||||
block::{BlockHeader, Block},
|
block::{BlockHeader, Block},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub(crate) struct Rpc<D: Db> {
|
pub(crate) struct Rpc {
|
||||||
pub(crate) db: D,
|
pub(crate) rpc: SimpleRequestRpc,
|
||||||
pub(crate) rpc: BRpc,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl<D: Db> ScannerFeed for Rpc<D> {
|
impl ScannerFeed for Rpc {
|
||||||
const NETWORK: NetworkId = NetworkId::Bitcoin;
|
const NETWORK: NetworkId = NetworkId::Monero;
|
||||||
const CONFIRMATIONS: u64 = 6;
|
// Outputs aren't spendable until 10 blocks later due to the 10-block lock
|
||||||
const WINDOW_LENGTH: u64 = 6;
|
// Since we assumed scanned outputs are spendable, that sets a minimum confirmation depth of 10
|
||||||
|
// A 10-block reorganization hasn't been observed in years and shouldn't occur
|
||||||
|
const CONFIRMATIONS: u64 = 10;
|
||||||
|
// The window length should be roughly an hour
|
||||||
|
const WINDOW_LENGTH: u64 = 30;
|
||||||
|
|
||||||
const TEN_MINUTES: u64 = 1;
|
const TEN_MINUTES: u64 = 5;
|
||||||
|
|
||||||
type Block = Block<D>;
|
type Block = Block;
|
||||||
|
|
||||||
type EphemeralError = RpcError;
|
type EphemeralError = RpcError;
|
||||||
|
|
||||||
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError> {
|
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError> {
|
||||||
db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError)
|
Ok(self.rpc.get_height().await?.checked_sub(1).expect("connected to an invalid Monero RPC").try_into().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn time_of_block(&self, number: u64) -> Result<u64, Self::EphemeralError> {
|
async fn time_of_block(&self, number: u64) -> Result<u64, Self::EphemeralError> {
|
||||||
let number = usize::try_from(number).unwrap();
|
todo!("TODO")
|
||||||
|
|
||||||
/*
|
|
||||||
The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the
|
|
||||||
median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve
|
|
||||||
CLTV). This creates a monotonic median time which we use as the block time.
|
|
||||||
*/
|
|
||||||
// This implements `GetMedianTimePast`
|
|
||||||
let median = {
|
|
||||||
const MEDIAN_TIMESPAN: usize = 11;
|
|
||||||
let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN);
|
|
||||||
for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number {
|
|
||||||
timestamps.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time);
|
|
||||||
}
|
|
||||||
timestamps.sort();
|
|
||||||
timestamps[timestamps.len() / 2]
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
This block's timestamp is guaranteed to be greater than this median:
|
|
||||||
https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9
|
|
||||||
/src/validation.cpp#L4182-L4184
|
|
||||||
|
|
||||||
This does not guarantee the median always increases however. Take the following trivial
|
|
||||||
example, as the window is initially built:
|
|
||||||
|
|
||||||
0 block has time 0 // Prior blocks: []
|
|
||||||
1 block has time 1 // Prior blocks: [0]
|
|
||||||
2 block has time 2 // Prior blocks: [0, 1]
|
|
||||||
3 block has time 2 // Prior blocks: [0, 1, 2]
|
|
||||||
|
|
||||||
These two blocks have the same time (both greater than the median of their prior blocks) and
|
|
||||||
the same median.
|
|
||||||
|
|
||||||
The median will never decrease however. The values pushed onto the window will always be
|
|
||||||
greater than the median. If a value greater than the median is popped, the median will remain
|
|
||||||
the same (due to the counterbalance of the pushed value). If a value less than the median is
|
|
||||||
popped, the median will increase (either to another instance of the same value, yet one
|
|
||||||
closer to the end of the repeating sequence, or to a higher value).
|
|
||||||
*/
|
|
||||||
Ok(median.into())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn unchecked_block_header_by_number(
|
async fn unchecked_block_header_by_number(
|
||||||
|
@ -83,7 +45,7 @@ impl<D: Db> ScannerFeed for Rpc<D> {
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError> {
|
) -> Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError> {
|
||||||
Ok(BlockHeader(
|
Ok(BlockHeader(
|
||||||
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?.header,
|
self.rpc.get_block_by_number(number.try_into().unwrap()).await?
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,48 +53,13 @@ impl<D: Db> ScannerFeed for Rpc<D> {
|
||||||
&self,
|
&self,
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<Self::Block, Self::EphemeralError> {
|
) -> Result<Self::Block, Self::EphemeralError> {
|
||||||
Ok(Block(
|
todo!("TODO")
|
||||||
self.db.clone(),
|
|
||||||
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dust(coin: Coin) -> Amount {
|
fn dust(coin: Coin) -> Amount {
|
||||||
assert_eq!(coin, Coin::Bitcoin);
|
assert_eq!(coin, Coin::Monero);
|
||||||
|
|
||||||
/*
|
todo!("TODO")
|
||||||
A Taproot input is:
|
|
||||||
- 36 bytes for the OutPoint
|
|
||||||
- 0 bytes for the script (+1 byte for the length)
|
|
||||||
- 4 bytes for the sequence
|
|
||||||
Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format
|
|
||||||
|
|
||||||
There's also:
|
|
||||||
- 1 byte for the witness length
|
|
||||||
- 1 byte for the signature length
|
|
||||||
- 64 bytes for the signature
|
|
||||||
which have the SegWit discount.
|
|
||||||
|
|
||||||
(4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units
|
|
||||||
230 ceil div 4 = 57 vbytes
|
|
||||||
|
|
||||||
Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are:
|
|
||||||
- 1000 sat/kilo-vbyte for a transaction to be relayed
|
|
||||||
- Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte
|
|
||||||
The DUST constant needs to be determined by the latter.
|
|
||||||
Since these are solely relay rules, and may be raised, we require all outputs be spendable
|
|
||||||
under a 5000 sat/kilo-vbyte fee rate.
|
|
||||||
|
|
||||||
5000 sat/kilo-vbyte = 5 sat/vbyte
|
|
||||||
5 * 57 = 285 sats/spent-output
|
|
||||||
|
|
||||||
Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding
|
|
||||||
100 vbytes, tripling the transaction size, then the sats/tx would be < 1000.
|
|
||||||
|
|
||||||
Increase by an order of magnitude, in order to ensure this is actually worth our time, and we
|
|
||||||
get 10,000 satoshis. This is $5 if 1 BTC = 50,000 USD.
|
|
||||||
*/
|
|
||||||
Amount(10_000)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn cost_to_aggregate(
|
async fn cost_to_aggregate(
|
||||||
|
@ -147,10 +74,10 @@ impl<D: Db> ScannerFeed for Rpc<D> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl<D: Db> TransactionPublisher<Transaction> for Rpc<D> {
|
impl TransactionPublisher<Transaction> for Rpc {
|
||||||
type EphemeralError = RpcError;
|
type EphemeralError = RpcError;
|
||||||
|
|
||||||
async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> {
|
async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> {
|
||||||
self.rpc.send_raw_transaction(&tx.0).await.map(|_| ())
|
self.rpc.publish_transaction(&tx.0).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@ use serai_db::Db;
|
||||||
use primitives::{OutputType, ReceivedOutput, Payment};
|
use primitives::{OutputType, ReceivedOutput, Payment};
|
||||||
use scanner::{KeyFor, AddressFor, OutputFor, BlockFor};
|
use scanner::{KeyFor, AddressFor, OutputFor, BlockFor};
|
||||||
use utxo_scheduler::{PlannedTransaction, TransactionPlanner};
|
use utxo_scheduler::{PlannedTransaction, TransactionPlanner};
|
||||||
use transaction_chaining_scheduler::{EffectedReceivedOutputs, Scheduler as GenericScheduler};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
scan::{offsets_for_key, scanner},
|
scan::{offsets_for_key, scanner},
|
||||||
|
@ -40,11 +39,11 @@ fn signable_transaction<D: Db>(
|
||||||
) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> {
|
) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> {
|
||||||
assert!(
|
assert!(
|
||||||
inputs.len() <
|
inputs.len() <
|
||||||
<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::MAX_INPUTS
|
<Planner as TransactionPlanner<Rpc<D>, ()>>::MAX_INPUTS
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
(payments.len() + usize::from(u8::from(change.is_some()))) <
|
(payments.len() + usize::from(u8::from(change.is_some()))) <
|
||||||
<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::MAX_OUTPUTS
|
<Planner as TransactionPlanner<Rpc<D>, ()>>::MAX_OUTPUTS
|
||||||
);
|
);
|
||||||
|
|
||||||
let inputs = inputs.into_iter().map(|input| input.output).collect::<Vec<_>>();
|
let inputs = inputs.into_iter().map(|input| input.output).collect::<Vec<_>>();
|
||||||
|
@ -73,7 +72,7 @@ fn signable_transaction<D: Db>(
|
||||||
));
|
));
|
||||||
|
|
||||||
let change = change
|
let change = change
|
||||||
.map(<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::change_address);
|
.map(<Planner as TransactionPlanner<Rpc<D>, ()>>::change_address);
|
||||||
|
|
||||||
BSignableTransaction::new(
|
BSignableTransaction::new(
|
||||||
inputs.clone(),
|
inputs.clone(),
|
||||||
|
@ -90,7 +89,7 @@ fn signable_transaction<D: Db>(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct Planner;
|
pub(crate) struct Planner;
|
||||||
impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Planner {
|
impl TransactionPlanner<Rpc, ()> for Planner {
|
||||||
type FeeRate = u64;
|
type FeeRate = u64;
|
||||||
|
|
||||||
type SignableTransaction = SignableTransaction;
|
type SignableTransaction = SignableTransaction;
|
||||||
|
@ -157,7 +156,7 @@ impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Plan
|
||||||
inputs: Vec<OutputFor<Rpc<D>>>,
|
inputs: Vec<OutputFor<Rpc<D>>>,
|
||||||
payments: Vec<Payment<AddressFor<Rpc<D>>>>,
|
payments: Vec<Payment<AddressFor<Rpc<D>>>>,
|
||||||
change: Option<KeyFor<Rpc<D>>>,
|
change: Option<KeyFor<Rpc<D>>>,
|
||||||
) -> PlannedTransaction<Rpc<D>, Self::SignableTransaction, EffectedReceivedOutputs<Rpc<D>>> {
|
) -> PlannedTransaction<Rpc<D>, Self::SignableTransaction, ()> {
|
||||||
let key = inputs.first().unwrap().key();
|
let key = inputs.first().unwrap().key();
|
||||||
for input in &inputs {
|
for input in &inputs {
|
||||||
assert_eq!(key, input.key());
|
assert_eq!(key, input.key());
|
||||||
|
@ -168,23 +167,7 @@ impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Plan
|
||||||
Ok(tx) => PlannedTransaction {
|
Ok(tx) => PlannedTransaction {
|
||||||
signable: tx.0,
|
signable: tx.0,
|
||||||
eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output },
|
eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output },
|
||||||
auxilliary: EffectedReceivedOutputs({
|
auxilliary: (),
|
||||||
let tx = tx.1.transaction();
|
|
||||||
let scanner = scanner(key);
|
|
||||||
|
|
||||||
let mut res = vec![];
|
|
||||||
for output in scanner.scan_transaction(tx) {
|
|
||||||
res.push(Output::new_with_presumed_origin(
|
|
||||||
key,
|
|
||||||
tx,
|
|
||||||
// It shouldn't matter if this is wrong as we should never try to return these
|
|
||||||
// We still provide an accurate value to ensure a lack of discrepancies
|
|
||||||
Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()),
|
|
||||||
output,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}),
|
|
||||||
},
|
},
|
||||||
Err(
|
Err(
|
||||||
TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment,
|
TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment,
|
||||||
|
@ -202,4 +185,4 @@ impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Plan
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) type Scheduler<D> = GenericScheduler<Rpc<D>, Planner>;
|
pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler<Rpc, Planner>;
|
||||||
|
|
|
@ -67,6 +67,12 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||||
/// The amount of confirmations a block must have to be considered finalized.
|
/// The amount of confirmations a block must have to be considered finalized.
|
||||||
///
|
///
|
||||||
/// This value must be at least `1`.
|
/// This value must be at least `1`.
|
||||||
|
// This is distinct from `WINDOW_LENGTH` as it's only used for determining the lifetime of the
|
||||||
|
// key. The key switches to various stages of its lifetime depending on when user transactions
|
||||||
|
// will hit the Serai network (relative to the time they're made) and when outputs created by
|
||||||
|
// Serai become available again. If we set a long WINDOW_LENGTH, say two hours, that doesn't mean
|
||||||
|
// we expect user transactions made within a few minutes of a new key being declared to only
|
||||||
|
// appear in finalized blocks two hours later.
|
||||||
const CONFIRMATIONS: u64;
|
const CONFIRMATIONS: u64;
|
||||||
|
|
||||||
/// The amount of blocks to process in parallel.
|
/// The amount of blocks to process in parallel.
|
||||||
|
|
Loading…
Reference in a new issue