mirror of
https://github.com/serai-dex/serai.git
synced 2025-02-23 19:40:18 +00:00
Extend send_test with TX signing
Monero fails with fee_too_low, which this commit is meant to document.
This commit is contained in:
parent
857e3ea72b
commit
f988c43f8d
4 changed files with 50 additions and 31 deletions
|
@ -84,9 +84,10 @@ On `substrate::CoordinatorMessage::SubstrateBlock`, the processor:
|
|||
Serai, as having had their batches signed.
|
||||
2) Adds the new outputs from newly finalized blocks to the scheduler, along
|
||||
with the necessary payments from `Burn` events on Serai.
|
||||
|
||||
The processor also sends a `substrate::ProcessorMessage::SubstrateBlockAck`,
|
||||
containing the IDs of all plans now being signed for, to the coordinator.
|
||||
3) Sends a `substrate::ProcessorMessage::SubstrateBlockAck`, containing the IDs
|
||||
of all plans now being signed for, to the coordinator.
|
||||
4) Sends `sign::ProcessorMessage::Preprocess` for each plan now being signed
|
||||
for.
|
||||
|
||||
## Sign Preprocesses
|
||||
|
||||
|
@ -107,8 +108,7 @@ signed and stop locally attempting to do so.
|
|||
|
||||
On `sign::CoordinatorMessage::Reattempt`, the processor will create a new
|
||||
a new instance of the transaction signing protocol. The new protocol's
|
||||
preprocess is sent to the coordinator in a
|
||||
`sign::ProcessorMessage::Preprocess`.
|
||||
preprocess is sent to the coordinator in a `sign::ProcessorMessage::Preprocess`.
|
||||
|
||||
## Sign Completed
|
||||
|
||||
|
|
|
@ -554,7 +554,7 @@ impl Coin for Monero {
|
|||
async fn get_fee(&self) -> Self::Fee {
|
||||
use monero_serai::wallet::FeePriority;
|
||||
|
||||
self.rpc.get_fee(self.rpc.get_protocol().await.unwrap(), FeePriority::Low).await.unwrap()
|
||||
self.rpc.get_fee(self.rpc.get_protocol().await.unwrap(), FeePriority::Highest).await.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -139,14 +139,14 @@ pub(crate) async fn sign_batch(
|
|||
pub(crate) async fn substrate_block(
|
||||
coordinator: &mut Coordinator,
|
||||
block: messages::substrate::CoordinatorMessage,
|
||||
) {
|
||||
) -> Vec<[u8; 32]> {
|
||||
match block.clone() {
|
||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||
context: _,
|
||||
network: sent_network,
|
||||
block: sent_block,
|
||||
key: _,
|
||||
burns,
|
||||
burns: _,
|
||||
} => {
|
||||
coordinator.send_message(block).await;
|
||||
match coordinator.recv_message().await {
|
||||
|
@ -159,8 +159,7 @@ pub(crate) async fn substrate_block(
|
|||
) => {
|
||||
assert_eq!(recvd_network, sent_network);
|
||||
assert_eq!(recvd_block, sent_block);
|
||||
// TODO: This isn't the correct formula at all
|
||||
assert_eq!(plans.len(), if burns.is_empty() { 0 } else { 1 });
|
||||
plans
|
||||
}
|
||||
_ => panic!("coordinator didn't respond to SubstrateBlock with SubstrateBlockAck"),
|
||||
}
|
||||
|
@ -269,7 +268,7 @@ fn batch_test() {
|
|||
let serai_time =
|
||||
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
|
||||
for coordinator in &mut coordinators {
|
||||
substrate_block(
|
||||
assert!(substrate_block(
|
||||
coordinator,
|
||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||
context: SubstrateContext {
|
||||
|
@ -283,7 +282,8 @@ fn batch_test() {
|
|||
burns: vec![],
|
||||
},
|
||||
)
|
||||
.await;
|
||||
.await
|
||||
.is_empty());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
|
@ -17,7 +17,7 @@ use crate::{*, tests::*};
|
|||
#[allow(unused)]
|
||||
pub(crate) async fn recv_sign_preprocesses(
|
||||
coordinators: &mut [Coordinator],
|
||||
key: [u8; 32],
|
||||
key: Vec<u8>,
|
||||
attempt: u32,
|
||||
) -> (SignId, HashMap<Participant, Vec<u8>>) {
|
||||
let mut id = None;
|
||||
|
@ -202,8 +202,9 @@ fn send_test() {
|
|||
let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1;
|
||||
let serai_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
|
||||
|
||||
let mut plans = vec![];
|
||||
for coordinator in &mut coordinators {
|
||||
substrate_block(
|
||||
let these_plans = substrate_block(
|
||||
coordinator,
|
||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||
context: SubstrateContext {
|
||||
|
@ -221,25 +222,43 @@ fn send_test() {
|
|||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
if plans.is_empty() {
|
||||
plans = these_plans;
|
||||
} else {
|
||||
assert_eq!(plans, these_plans);
|
||||
}
|
||||
}
|
||||
assert_eq!(plans.len(), 1);
|
||||
|
||||
// Start signing the TX
|
||||
let (mut id, mut preprocesses) =
|
||||
recv_sign_preprocesses(&mut coordinators, key_pair.1.to_vec(), 0).await;
|
||||
// TODO: Should this use the Substrate key?
|
||||
assert_eq!(id, SignId { key: key_pair.1.to_vec(), id: plans[0], attempt: 0 });
|
||||
|
||||
// Trigger a random amount of re-attempts
|
||||
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
|
||||
// TODO: Double check how the processor handles this ID field
|
||||
// It should be able to assert its perfectly sequential
|
||||
id.attempt = attempt;
|
||||
for coordinator in coordinators.iter_mut() {
|
||||
coordinator
|
||||
.send_message(messages::sign::CoordinatorMessage::Reattempt { id: id.clone() })
|
||||
.await;
|
||||
}
|
||||
(id, preprocesses) =
|
||||
recv_sign_preprocesses(&mut coordinators, key_pair.1.to_vec(), attempt).await;
|
||||
}
|
||||
|
||||
/*
|
||||
// Trigger a random amount of re-attempts
|
||||
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
|
||||
// TODO: Double check how the processor handles this ID field
|
||||
// It should be able to assert its perfectly sequential
|
||||
id.attempt = attempt;
|
||||
for coordinator in coordinators.iter_mut() {
|
||||
coordinator
|
||||
.send_message(messages::sign::CoordinatorMessage::Reattempt {
|
||||
id: id.clone(),
|
||||
})
|
||||
.await;
|
||||
}
|
||||
(id, preprocesses) =
|
||||
recv_batch_preprocesses(&mut coordinators, key_pair.0 .0, attempt).await;
|
||||
}
|
||||
*/
|
||||
let tx_id = sign_tx(&mut coordinators, id, preprocesses).await;
|
||||
|
||||
// TODO: Test callimg Sign again yields tx_id again
|
||||
// TODO: Make sure all participating nodes published the TX
|
||||
// TODO: Send this TX to the left out node and make sure they can complete the Eventuality
|
||||
// TODO: Test the Eventuality from the blockchain, instead of from the coordinator
|
||||
|
||||
let _ = tx_id;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue