Extend send_test with TX signing

Monero fails with fee_too_low, which this commit is meant to document.
This commit is contained in:
Luke Parker 2023-07-29 08:29:54 -04:00
parent 857e3ea72b
commit f988c43f8d
No known key found for this signature in database
4 changed files with 50 additions and 31 deletions

View file

@ -84,9 +84,10 @@ On `substrate::CoordinatorMessage::SubstrateBlock`, the processor:
Serai, as having had their batches signed. Serai, as having had their batches signed.
2) Adds the new outputs from newly finalized blocks to the scheduler, along 2) Adds the new outputs from newly finalized blocks to the scheduler, along
with the necessary payments from `Burn` events on Serai. with the necessary payments from `Burn` events on Serai.
3) Sends a `substrate::ProcessorMessage::SubstrateBlockAck`, containing the IDs
The processor also sends a `substrate::ProcessorMessage::SubstrateBlockAck`, of all plans now being signed for, to the coordinator.
containing the IDs of all plans now being signed for, to the coordinator. 4) Sends `sign::ProcessorMessage::Preprocess` for each plan now being signed
for.
## Sign Preprocesses ## Sign Preprocesses
@ -107,8 +108,7 @@ signed and stop locally attempting to do so.
On `sign::CoordinatorMessage::Reattempt`, the processor will create a new On `sign::CoordinatorMessage::Reattempt`, the processor will create a new
a new instance of the transaction signing protocol. The new protocol's a new instance of the transaction signing protocol. The new protocol's
preprocess is sent to the coordinator in a preprocess is sent to the coordinator in a `sign::ProcessorMessage::Preprocess`.
`sign::ProcessorMessage::Preprocess`.
## Sign Completed ## Sign Completed

View file

@ -554,7 +554,7 @@ impl Coin for Monero {
async fn get_fee(&self) -> Self::Fee { async fn get_fee(&self) -> Self::Fee {
use monero_serai::wallet::FeePriority; use monero_serai::wallet::FeePriority;
self.rpc.get_fee(self.rpc.get_protocol().await.unwrap(), FeePriority::Low).await.unwrap() self.rpc.get_fee(self.rpc.get_protocol().await.unwrap(), FeePriority::Highest).await.unwrap()
} }
#[cfg(test)] #[cfg(test)]

View file

@ -139,14 +139,14 @@ pub(crate) async fn sign_batch(
pub(crate) async fn substrate_block( pub(crate) async fn substrate_block(
coordinator: &mut Coordinator, coordinator: &mut Coordinator,
block: messages::substrate::CoordinatorMessage, block: messages::substrate::CoordinatorMessage,
) { ) -> Vec<[u8; 32]> {
match block.clone() { match block.clone() {
messages::substrate::CoordinatorMessage::SubstrateBlock { messages::substrate::CoordinatorMessage::SubstrateBlock {
context: _, context: _,
network: sent_network, network: sent_network,
block: sent_block, block: sent_block,
key: _, key: _,
burns, burns: _,
} => { } => {
coordinator.send_message(block).await; coordinator.send_message(block).await;
match coordinator.recv_message().await { match coordinator.recv_message().await {
@ -159,8 +159,7 @@ pub(crate) async fn substrate_block(
) => { ) => {
assert_eq!(recvd_network, sent_network); assert_eq!(recvd_network, sent_network);
assert_eq!(recvd_block, sent_block); assert_eq!(recvd_block, sent_block);
// TODO: This isn't the correct formula at all plans
assert_eq!(plans.len(), if burns.is_empty() { 0 } else { 1 });
} }
_ => panic!("coordinator didn't respond to SubstrateBlock with SubstrateBlockAck"), _ => panic!("coordinator didn't respond to SubstrateBlock with SubstrateBlockAck"),
} }
@ -269,7 +268,7 @@ fn batch_test() {
let serai_time = let serai_time =
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
for coordinator in &mut coordinators { for coordinator in &mut coordinators {
substrate_block( assert!(substrate_block(
coordinator, coordinator,
messages::substrate::CoordinatorMessage::SubstrateBlock { messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext { context: SubstrateContext {
@ -283,7 +282,8 @@ fn batch_test() {
burns: vec![], burns: vec![],
}, },
) )
.await; .await
.is_empty());
} }
} }
}); });

View file

@ -17,7 +17,7 @@ use crate::{*, tests::*};
#[allow(unused)] #[allow(unused)]
pub(crate) async fn recv_sign_preprocesses( pub(crate) async fn recv_sign_preprocesses(
coordinators: &mut [Coordinator], coordinators: &mut [Coordinator],
key: [u8; 32], key: Vec<u8>,
attempt: u32, attempt: u32,
) -> (SignId, HashMap<Participant, Vec<u8>>) { ) -> (SignId, HashMap<Participant, Vec<u8>>) {
let mut id = None; let mut id = None;
@ -202,8 +202,9 @@ fn send_test() {
let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1; let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1;
let serai_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); let serai_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
let mut plans = vec![];
for coordinator in &mut coordinators { for coordinator in &mut coordinators {
substrate_block( let these_plans = substrate_block(
coordinator, coordinator,
messages::substrate::CoordinatorMessage::SubstrateBlock { messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext { context: SubstrateContext {
@ -221,25 +222,43 @@ fn send_test() {
}, },
) )
.await; .await;
if plans.is_empty() {
plans = these_plans;
} else {
assert_eq!(plans, these_plans);
}
}
assert_eq!(plans.len(), 1);
// Start signing the TX
let (mut id, mut preprocesses) =
recv_sign_preprocesses(&mut coordinators, key_pair.1.to_vec(), 0).await;
// TODO: Should this use the Substrate key?
assert_eq!(id, SignId { key: key_pair.1.to_vec(), id: plans[0], attempt: 0 });
// Trigger a random amount of re-attempts
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
// TODO: Double check how the processor handles this ID field
// It should be able to assert its perfectly sequential
id.attempt = attempt;
for coordinator in coordinators.iter_mut() {
coordinator
.send_message(messages::sign::CoordinatorMessage::Reattempt { id: id.clone() })
.await;
}
(id, preprocesses) =
recv_sign_preprocesses(&mut coordinators, key_pair.1.to_vec(), attempt).await;
} }
/* let tx_id = sign_tx(&mut coordinators, id, preprocesses).await;
// Trigger a random amount of re-attempts
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() { // TODO: Test callimg Sign again yields tx_id again
// TODO: Double check how the processor handles this ID field // TODO: Make sure all participating nodes published the TX
// It should be able to assert its perfectly sequential // TODO: Send this TX to the left out node and make sure they can complete the Eventuality
id.attempt = attempt; // TODO: Test the Eventuality from the blockchain, instead of from the coordinator
for coordinator in coordinators.iter_mut() {
coordinator let _ = tx_id;
.send_message(messages::sign::CoordinatorMessage::Reattempt {
id: id.clone(),
})
.await;
}
(id, preprocesses) =
recv_batch_preprocesses(&mut coordinators, key_pair.0 .0, attempt).await;
}
*/
}); });
} }
} }