mirror of
https://github.com/serai-dex/serai.git
synced 2024-12-23 20:19:24 +00:00
8ca90e7905
* Initial work on an In Inherents pallet * Add an event for when a batch is executed * Add a dummy provider for InInstructions * Add in-instructions to the node * Add the Serai runtime API to the processor * Move processor tests around * Build a subxt Client around Serai * Successfully get Batch events from Serai Renamed processor/substrate to processor/serai. * Much more robust InInstruction pallet * Implement the workaround from https://github.com/paritytech/subxt/issues/602 * Initial prototype of processor generated InInstructions * Correct PendingCoins data flow for InInstructions * Minor lint to in-instructions * Remove the global Serai connection for a partial re-impl * Correct ID handling of the processor test * Workaround the delay in the subscription * Make an unwrap an if let Some, remove old comments * Lint the processor toml * Rebase and update * Move substrate/in-instructions to substrate/in-instructions/pallet * Start an in-instructions primitives lib * Properly update processor to subxt 0.24 Also corrects failures from the rebase. * in-instructions cargo update * Implement IsFatalError * is_inherent -> true * Rename in-instructions crates and misc cleanup * Update documentation * cargo update * Misc update fixes * Replace height with block_number * Update processor src to latest subxt * Correct pipeline for InInstructions testing * Remove runtime::AccountId for serai_primitives::NativeAddress * Rewrite the in-instructions pallet Complete with respect to the currently written docs. Drops the custom serializer for just using SCALE. Makes slight tweaks as relevant. * Move instructions' InherentDataProvider to a client crate * Correct doc gen * Add serde to in-instructions-primitives * Add in-instructions-primitives to pallet * Heights -> BlockNumbers * Get batch pub test loop working * Update in instructions pallet terminology Removes the ambiguous Coin for Update. Removes pending/artificial latency for furture client work. Also moves to using serai_primitives::Coin. * Add a BlockNumber primitive * Belated cargo fmt * Further document why DifferentBatch isn't fatal * Correct processor sleeps * Remove metadata at compile time, add test framework for Serai nodes * Remove manual RPC client * Simplify update test * Improve re-exporting behavior of serai-runtime It now re-exports all pallets underneath it. * Add a function to get storage values to the Serai RPC * Update substrate/ to latest substrate * Create a dedicated crate for the Serai RPC * Remove unused dependencies in substrate/ * Remove unused dependencies in coins/ Out of scope for this branch, just minor and path of least resistance. * Use substrate/serai/client for the Serai RPC lib It's a bit out of place, since these client folders are intended for the node to access pallets and so on. This is for end-users to access Serai as a whole. In that sense, it made more sense as a top level folder, yet that also felt out of place. * Move InInstructions test to serai-client for now * Final cleanup * Update deny.toml * Cargo.lock update from merging develop * Update nightly Attempt to work around the current CI failure, which is a Rust ICE. We previously didn't upgrade due to clippy 10134, yet that's been reverted. * clippy * clippy * fmt * NativeAddress -> SeraiAddress * Sec fix on non-provided updates and doc fixes * Add Serai as a Coin Necessary in order to swap to Serai. * Add a BlockHash type, used for batch IDs * Remove origin from InInstruction Makes InInstructionTarget. Adds RefundableInInstruction with origin. * Document storage items in in-instructions * Rename serai/client/tests/serai.rs to updates.rs It only tested publishing updates and their successful acceptance.
281 lines
8.6 KiB
Rust
281 lines
8.6 KiB
Rust
use core::ops::Deref;
|
|
use std::collections::HashSet;
|
|
|
|
use lazy_static::lazy_static;
|
|
|
|
use zeroize::Zeroizing;
|
|
use rand_core::OsRng;
|
|
|
|
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar};
|
|
|
|
use tokio::sync::Mutex;
|
|
|
|
use monero_serai::{
|
|
Protocol, random_scalar,
|
|
wallet::{
|
|
ViewPair, Scanner,
|
|
address::{Network, AddressType, AddressSpec, AddressMeta, MoneroAddress},
|
|
SpendableOutput,
|
|
},
|
|
rpc::Rpc,
|
|
};
|
|
|
|
pub fn random_address() -> (Scalar, ViewPair, MoneroAddress) {
|
|
let spend = random_scalar(&mut OsRng);
|
|
let spend_pub = &spend * &ED25519_BASEPOINT_TABLE;
|
|
let view = Zeroizing::new(random_scalar(&mut OsRng));
|
|
(
|
|
spend,
|
|
ViewPair::new(spend_pub, view.clone()),
|
|
MoneroAddress {
|
|
meta: AddressMeta::new(Network::Mainnet, AddressType::Standard),
|
|
spend: spend_pub,
|
|
view: view.deref() * &ED25519_BASEPOINT_TABLE,
|
|
},
|
|
)
|
|
}
|
|
|
|
// TODO: Support transactions already on-chain
|
|
// TODO: Don't have a side effect of mining blocks more blocks than needed under race conditions
|
|
// TODO: mine as much as needed instead of default 10 blocks
|
|
pub async fn mine_until_unlocked(rpc: &Rpc, addr: &str, tx_hash: [u8; 32]) {
|
|
// mine until tx is in a block
|
|
let mut height = rpc.get_height().await.unwrap();
|
|
let mut found = false;
|
|
while !found {
|
|
let block = rpc.get_block_by_number(height - 1).await.unwrap();
|
|
found = match block.txs.iter().find(|&&x| x == tx_hash) {
|
|
Some(_) => true,
|
|
None => {
|
|
rpc.generate_blocks(addr, 1).await.unwrap();
|
|
height += 1;
|
|
false
|
|
}
|
|
}
|
|
}
|
|
|
|
// mine 9 more blocks to unlock the tx
|
|
rpc.generate_blocks(addr, 9).await.unwrap();
|
|
}
|
|
|
|
// Mines 60 blocks and returns an unlocked miner TX output.
|
|
#[allow(dead_code)]
|
|
pub async fn get_miner_tx_output(rpc: &Rpc, view: &ViewPair) -> SpendableOutput {
|
|
let mut scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
|
|
// Mine 60 blocks to unlock a miner TX
|
|
let start = rpc.get_height().await.unwrap();
|
|
rpc
|
|
.generate_blocks(&view.address(Network::Mainnet, AddressSpec::Standard).to_string(), 60)
|
|
.await
|
|
.unwrap();
|
|
|
|
let block = rpc.get_block_by_number(start).await.unwrap();
|
|
scanner.scan(rpc, &block).await.unwrap().swap_remove(0).ignore_timelock().swap_remove(0)
|
|
}
|
|
|
|
pub async fn rpc() -> Rpc {
|
|
let rpc = Rpc::new("http://127.0.0.1:18081".to_string()).unwrap();
|
|
|
|
// Only run once
|
|
if rpc.get_height().await.unwrap() != 1 {
|
|
return rpc;
|
|
}
|
|
|
|
let addr = MoneroAddress {
|
|
meta: AddressMeta::new(Network::Mainnet, AddressType::Standard),
|
|
spend: &random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE,
|
|
view: &random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE,
|
|
}
|
|
.to_string();
|
|
|
|
// Mine 40 blocks to ensure decoy availability
|
|
rpc.generate_blocks(&addr, 40).await.unwrap();
|
|
assert!(!matches!(rpc.get_protocol().await.unwrap(), Protocol::Unsupported(_)));
|
|
|
|
rpc
|
|
}
|
|
|
|
lazy_static! {
|
|
pub static ref SEQUENTIAL: Mutex<()> = Mutex::new(());
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! async_sequential {
|
|
($(async fn $name: ident() $body: block)*) => {
|
|
$(
|
|
#[tokio::test]
|
|
async fn $name() {
|
|
let guard = runner::SEQUENTIAL.lock().await;
|
|
let local = tokio::task::LocalSet::new();
|
|
local.run_until(async move {
|
|
if let Err(err) = tokio::task::spawn_local(async move { $body }).await {
|
|
drop(guard);
|
|
Err(err).unwrap()
|
|
}
|
|
}).await;
|
|
}
|
|
)*
|
|
}
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! test {
|
|
(
|
|
$name: ident,
|
|
(
|
|
$first_tx: expr,
|
|
$first_checks: expr,
|
|
),
|
|
$((
|
|
$tx: expr,
|
|
$checks: expr,
|
|
)$(,)?),*
|
|
) => {
|
|
async_sequential! {
|
|
async fn $name() {
|
|
use core::{ops::Deref, any::Any};
|
|
use std::collections::HashSet;
|
|
#[cfg(feature = "multisig")]
|
|
use std::collections::HashMap;
|
|
|
|
use zeroize::Zeroizing;
|
|
use rand_core::OsRng;
|
|
|
|
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
|
|
|
|
#[cfg(feature = "multisig")]
|
|
use transcript::{Transcript, RecommendedTranscript};
|
|
#[cfg(feature = "multisig")]
|
|
use frost::{
|
|
curve::Ed25519,
|
|
tests::{THRESHOLD, key_gen},
|
|
};
|
|
|
|
use monero_serai::{
|
|
random_scalar,
|
|
wallet::{
|
|
address::{Network, AddressSpec}, ViewPair, Scanner, SignableTransaction,
|
|
SignableTransactionBuilder,
|
|
},
|
|
};
|
|
|
|
use runner::{random_address, rpc, mine_until_unlocked, get_miner_tx_output};
|
|
|
|
type Builder = SignableTransactionBuilder;
|
|
|
|
// Run each function as both a single signer and as a multisig
|
|
#[allow(clippy::redundant_closure_call)]
|
|
for multisig in [false, true] {
|
|
// Only run the multisig variant if multisig is enabled
|
|
if multisig {
|
|
#[cfg(not(feature = "multisig"))]
|
|
continue;
|
|
}
|
|
|
|
let spend = Zeroizing::new(random_scalar(&mut OsRng));
|
|
#[cfg(feature = "multisig")]
|
|
let keys = key_gen::<_, Ed25519>(&mut OsRng);
|
|
|
|
let spend_pub = if !multisig {
|
|
spend.deref() * &ED25519_BASEPOINT_TABLE
|
|
} else {
|
|
#[cfg(not(feature = "multisig"))]
|
|
panic!("Multisig branch called without the multisig feature");
|
|
#[cfg(feature = "multisig")]
|
|
keys[&1].group_key().0
|
|
};
|
|
|
|
let rpc = rpc().await;
|
|
|
|
let view = ViewPair::new(spend_pub, Zeroizing::new(random_scalar(&mut OsRng)));
|
|
let addr = view.address(Network::Mainnet, AddressSpec::Standard);
|
|
|
|
let miner_tx = get_miner_tx_output(&rpc, &view).await;
|
|
|
|
let builder = SignableTransactionBuilder::new(
|
|
rpc.get_protocol().await.unwrap(),
|
|
rpc.get_fee().await.unwrap(),
|
|
Some(random_address().2),
|
|
);
|
|
|
|
let sign = |tx: SignableTransaction| {
|
|
let rpc = rpc.clone();
|
|
let spend = spend.clone();
|
|
#[cfg(feature = "multisig")]
|
|
let keys = keys.clone();
|
|
async move {
|
|
if !multisig {
|
|
tx.sign(&mut OsRng, &rpc, &spend).await.unwrap()
|
|
} else {
|
|
#[cfg(not(feature = "multisig"))]
|
|
panic!("Multisig branch called without the multisig feature");
|
|
#[cfg(feature = "multisig")]
|
|
{
|
|
let mut machines = HashMap::new();
|
|
for i in 1 ..= THRESHOLD {
|
|
machines.insert(
|
|
i,
|
|
tx
|
|
.clone()
|
|
.multisig(
|
|
&rpc,
|
|
keys[&i].clone(),
|
|
RecommendedTranscript::new(b"Monero Serai Test Transaction"),
|
|
rpc.get_height().await.unwrap() - 10,
|
|
)
|
|
.await
|
|
.unwrap(),
|
|
);
|
|
}
|
|
|
|
frost::tests::sign_without_caching(&mut OsRng, machines, &[])
|
|
}
|
|
}
|
|
}
|
|
};
|
|
|
|
// TODO: Generate a distinct wallet for each transaction to prevent overlap
|
|
let next_addr = addr;
|
|
|
|
let temp = Box::new({
|
|
let mut builder = builder.clone();
|
|
builder.add_input(miner_tx);
|
|
let (tx, state) = ($first_tx)(rpc.clone(), builder, next_addr).await;
|
|
|
|
let signed = sign(tx).await;
|
|
rpc.publish_transaction(&signed).await.unwrap();
|
|
mine_until_unlocked(&rpc, &random_address().2.to_string(), signed.hash()).await;
|
|
let tx = rpc.get_transaction(signed.hash()).await.unwrap();
|
|
let scanner =
|
|
Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
($first_checks)(rpc.clone(), tx, scanner, state).await
|
|
});
|
|
#[allow(unused_variables, unused_mut, unused_assignments)]
|
|
let mut carried_state: Box<dyn Any> = temp;
|
|
|
|
$(
|
|
let (tx, state) = ($tx)(
|
|
rpc.clone(),
|
|
builder.clone(),
|
|
next_addr,
|
|
*carried_state.downcast().unwrap()
|
|
).await;
|
|
|
|
let signed = sign(tx).await;
|
|
rpc.publish_transaction(&signed).await.unwrap();
|
|
mine_until_unlocked(&rpc, &random_address().2.to_string(), signed.hash()).await;
|
|
let tx = rpc.get_transaction(signed.hash()).await.unwrap();
|
|
#[allow(unused_assignments)]
|
|
{
|
|
let scanner =
|
|
Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
carried_state =
|
|
Box::new(($checks)(rpc.clone(), tx, scanner, state).await);
|
|
}
|
|
)*
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|