Remove the Signer events pseudo-channel for a returned message

Also replaces SignerEvent with usage of ProcessorMessage directly.
This commit is contained in:
Luke Parker 2023-11-09 01:26:30 -05:00
parent 7d72e224f0
commit 2eb155753a
No known key found for this signature in database
3 changed files with 127 additions and 127 deletions

View file

@ -42,7 +42,7 @@ mod key_gen;
use key_gen::{KeyConfirmed, KeyGen}; use key_gen::{KeyConfirmed, KeyGen};
mod signer; mod signer;
use signer::{SignerEvent, Signer}; use signer::Signer;
mod substrate_signer; mod substrate_signer;
use substrate_signer::{SubstrateSignerEvent, SubstrateSigner}; use substrate_signer::{SubstrateSignerEvent, SubstrateSigner};
@ -206,12 +206,15 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
} }
CoordinatorMessage::Sign(msg) => { CoordinatorMessage::Sign(msg) => {
tributary_mutable if let Some(msg) = tributary_mutable
.signers .signers
.get_mut(msg.key()) .get_mut(msg.key())
.expect("coordinator told us to sign with a signer we don't have") .expect("coordinator told us to sign with a signer we don't have")
.handle(txn, msg) .handle(txn, msg)
.await; .await
{
coordinator.send(msg).await;
}
} }
CoordinatorMessage::Coordinator(msg) => { CoordinatorMessage::Coordinator(msg) => {
@ -359,7 +362,9 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
let signers = &mut tributary_mutable.signers; let signers = &mut tributary_mutable.signers;
for (key, id, tx, eventuality) in to_sign { for (key, id, tx, eventuality) in to_sign {
if let Some(signer) = signers.get_mut(key.to_bytes().as_ref()) { if let Some(signer) = signers.get_mut(key.to_bytes().as_ref()) {
signer.sign_transaction(txn, id, tx, eventuality).await; if let Some(msg) = signer.sign_transaction(txn, id, tx, eventuality).await {
coordinator.send(msg).await;
}
} }
} }
@ -374,9 +379,10 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
} }
} }
async fn boot<N: Network, D: Db>( async fn boot<N: Network, D: Db, Co: Coordinator>(
raw_db: &mut D, raw_db: &mut D,
network: &N, network: &N,
coordinator: &mut Co,
) -> (MainDb<N, D>, TributaryMutable<N, D>, SubstrateMutable<N, D>) { ) -> (MainDb<N, D>, TributaryMutable<N, D>, SubstrateMutable<N, D>) {
let mut entropy_transcript = { let mut entropy_transcript = {
let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified")); let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified"));
@ -450,7 +456,11 @@ async fn boot<N: Network, D: Db>(
for (plan, tx, eventuality) in &actively_signing { for (plan, tx, eventuality) in &actively_signing {
if plan.key == network_key { if plan.key == network_key {
let mut txn = raw_db.txn(); let mut txn = raw_db.txn();
signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality.clone()).await; if let Some(msg) =
signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality.clone()).await
{
coordinator.send(msg).await;
}
// This should only have re-writes of existing data // This should only have re-writes of existing data
drop(txn); drop(txn);
} }
@ -474,7 +484,8 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
// This check ensures no network which doesn't have a bidirectional mapping is defined // This check ensures no network which doesn't have a bidirectional mapping is defined
assert_eq!(<N::Block as Block<N>>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len()); assert_eq!(<N::Block as Block<N>>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len());
let (main_db, mut tributary_mutable, mut substrate_mutable) = boot(&mut raw_db, &network).await; let (main_db, mut tributary_mutable, mut substrate_mutable) =
boot(&mut raw_db, &network, &mut coordinator).await;
// We can't load this from the DB as we can't guarantee atomic increments with the ack function // We can't load this from the DB as we can't guarantee atomic increments with the ack function
// TODO: Load with a slight tolerance // TODO: Load with a slight tolerance
@ -557,7 +568,9 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
}, },
MultisigEvent::Completed(key, id, tx) => { MultisigEvent::Completed(key, id, tx) => {
if let Some(signer) = tributary_mutable.signers.get_mut(&key) { if let Some(signer) = tributary_mutable.signers.get_mut(&key) {
signer.completed(txn, id, tx); if let Some(msg) = signer.completed(txn, id, tx) {
coordinator.send(msg).await;
}
} }
} }
} }
@ -568,28 +581,6 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
// The signers will only have events after the above select executes, so having no timeout on // The signers will only have events after the above select executes, so having no timeout on
// the above is fine // the above is fine
// TODO: Have the Signers return these events, allowing removing these channels? // TODO: Have the Signers return these events, allowing removing these channels?
for (key, signer) in tributary_mutable.signers.iter_mut() {
while let Some(msg) = signer.events.pop_front() {
match msg {
SignerEvent::ProcessorMessage(msg) => {
coordinator.send(msg).await;
}
SignerEvent::SignedTransaction { id, tx } => {
// It is important ProcessorMessage::Completed is only emitted if a Signer we had
// created the TX completed (which having it only emitted after a SignerEvent ensures)
coordinator
.send(messages::sign::ProcessorMessage::Completed {
key: key.clone(),
id,
tx: tx.as_ref().to_vec(),
})
.await;
}
}
}
}
if let Some(signer) = tributary_mutable.substrate_signer.as_mut() { if let Some(signer) = tributary_mutable.substrate_signer.as_mut() {
while let Some(msg) = signer.events.pop_front() { while let Some(msg) = signer.events.pop_front() {
match msg { match msg {

View file

@ -1,5 +1,5 @@
use core::{marker::PhantomData, fmt}; use core::{marker::PhantomData, fmt};
use std::collections::{VecDeque, HashMap}; use std::collections::HashMap;
use rand_core::OsRng; use rand_core::OsRng;
@ -18,12 +18,6 @@ use crate::{
networks::{Transaction, Eventuality, Network}, networks::{Transaction, Eventuality, Network},
}; };
#[derive(Debug)]
pub enum SignerEvent<N: Network> {
SignedTransaction { id: [u8; 32], tx: <N::Transaction as Transaction<N>>::Id },
ProcessorMessage(ProcessorMessage),
}
#[derive(Debug)] #[derive(Debug)]
struct SignerDb<N: Network, D: Db>(D, PhantomData<N>); struct SignerDb<N: Network, D: Db>(D, PhantomData<N>);
impl<N: Network, D: Db> SignerDb<N, D> { impl<N: Network, D: Db> SignerDb<N, D> {
@ -162,8 +156,6 @@ pub struct Signer<N: Network, D: Db> {
preprocessing: HashMap<[u8; 32], (Vec<SignMachineFor<N>>, Vec<PreprocessFor<N>>)>, preprocessing: HashMap<[u8; 32], (Vec<SignMachineFor<N>>, Vec<PreprocessFor<N>>)>,
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
signing: HashMap<[u8; 32], (SignatureMachineFor<N>, Vec<SignatureShareFor<N>>)>, signing: HashMap<[u8; 32], (SignatureMachineFor<N>, Vec<SignatureShareFor<N>>)>,
pub events: VecDeque<SignerEvent<N>>,
} }
impl<N: Network, D: Db> fmt::Debug for Signer<N, D> { impl<N: Network, D: Db> fmt::Debug for Signer<N, D> {
@ -210,8 +202,6 @@ impl<N: Network, D: Db> Signer<N, D> {
attempt: HashMap::new(), attempt: HashMap::new(),
preprocessing: HashMap::new(), preprocessing: HashMap::new(),
signing: HashMap::new(), signing: HashMap::new(),
events: VecDeque::new(),
} }
} }
@ -245,6 +235,7 @@ impl<N: Network, D: Db> Signer<N, D> {
Ok(()) Ok(())
} }
#[must_use]
fn already_completed(&self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { fn already_completed(&self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool {
if !SignerDb::<N, D>::completions(txn, id).is_empty() { if !SignerDb::<N, D>::completions(txn, id).is_empty() {
debug!( debug!(
@ -258,7 +249,12 @@ impl<N: Network, D: Db> Signer<N, D> {
} }
} }
fn complete(&mut self, id: [u8; 32], tx_id: <N::Transaction as Transaction<N>>::Id) { #[must_use]
fn complete(
&mut self,
id: [u8; 32],
tx_id: <N::Transaction as Transaction<N>>::Id,
) -> ProcessorMessage {
// Assert we're actively signing for this TX // Assert we're actively signing for this TX
assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for");
assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have"); assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have");
@ -271,10 +267,20 @@ impl<N: Network, D: Db> Signer<N, D> {
self.signing.remove(&id); self.signing.remove(&id);
// Emit the event for it // Emit the event for it
self.events.push_back(SignerEvent::SignedTransaction { id, tx: tx_id }); ProcessorMessage::Completed {
key: self.keys[0].group_key().to_bytes().as_ref().to_vec(),
id,
tx: tx_id.as_ref().to_vec(),
}
} }
pub fn completed(&mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], tx: N::Transaction) { #[must_use]
pub fn completed(
&mut self,
txn: &mut D::Transaction<'_>,
id: [u8; 32],
tx: N::Transaction,
) -> Option<ProcessorMessage> {
let first_completion = !self.already_completed(txn, id); let first_completion = !self.already_completed(txn, id);
// Save this completion to the DB // Save this completion to the DB
@ -282,17 +288,21 @@ impl<N: Network, D: Db> Signer<N, D> {
SignerDb::<N, D>::complete(txn, id, &tx); SignerDb::<N, D>::complete(txn, id, &tx);
if first_completion { if first_completion {
self.complete(id, tx.id()); Some(self.complete(id, tx.id()))
} else {
None
} }
} }
/// Returns Some if the first completion.
// Doesn't use any loops/retries since we'll eventually get this from the Scanner anyways // Doesn't use any loops/retries since we'll eventually get this from the Scanner anyways
#[must_use]
async fn claimed_eventuality_completion( async fn claimed_eventuality_completion(
&mut self, &mut self,
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
id: [u8; 32], id: [u8; 32],
tx_id: &<N::Transaction as Transaction<N>>::Id, tx_id: &<N::Transaction as Transaction<N>>::Id,
) -> bool { ) -> Option<ProcessorMessage> {
if let Some(eventuality) = SignerDb::<N, D>::eventuality(txn, id) { if let Some(eventuality) = SignerDb::<N, D>::eventuality(txn, id) {
// Transaction hasn't hit our mempool/was dropped for a different signature // Transaction hasn't hit our mempool/was dropped for a different signature
// The latter can happen given certain latency conditions/a single malicious signer // The latter can happen given certain latency conditions/a single malicious signer
@ -305,7 +315,7 @@ impl<N: Network, D: Db> Signer<N, D> {
hex::encode(id), hex::encode(id),
"(or had another connectivity issue)", "(or had another connectivity issue)",
); );
return false; return None;
}; };
if self.network.confirm_completion(&eventuality, &tx) { if self.network.confirm_completion(&eventuality, &tx) {
@ -317,8 +327,7 @@ impl<N: Network, D: Db> Signer<N, D> {
SignerDb::<N, D>::complete(txn, id, &tx); SignerDb::<N, D>::complete(txn, id, &tx);
if first_completion { if first_completion {
self.complete(id, tx.id()); return Some(self.complete(id, tx.id()));
return true;
} }
} else { } else {
warn!( warn!(
@ -337,12 +346,18 @@ impl<N: Network, D: Db> Signer<N, D> {
"which we already marked as completed", "which we already marked as completed",
); );
} }
false None
} }
async fn attempt(&mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], attempt: u32) { #[must_use]
async fn attempt(
&mut self,
txn: &mut D::Transaction<'_>,
id: [u8; 32],
attempt: u32,
) -> Option<ProcessorMessage> {
if self.already_completed(txn, id) { if self.already_completed(txn, id) {
return; return None;
} }
// Check if we're already working on this attempt // Check if we're already working on this attempt
@ -354,7 +369,7 @@ impl<N: Network, D: Db> Signer<N, D> {
attempt, attempt,
curr_attempt curr_attempt
); );
return; return None;
} }
} }
@ -363,7 +378,7 @@ impl<N: Network, D: Db> Signer<N, D> {
// (also because we do need an owned tx anyways) // (also because we do need an owned tx anyways)
let Some(tx) = self.signable.get(&id).cloned() else { let Some(tx) = self.signable.get(&id).cloned() else {
warn!("told to attempt a TX we aren't currently signing for"); warn!("told to attempt a TX we aren't currently signing for");
return; return None;
}; };
// Delete any existing machines // Delete any existing machines
@ -395,7 +410,7 @@ impl<N: Network, D: Db> Signer<N, D> {
hex::encode(id.id), hex::encode(id.id),
id.attempt id.attempt
); );
return; return None;
} }
SignerDb::<N, D>::attempt(txn, &id); SignerDb::<N, D>::attempt(txn, &id);
@ -408,7 +423,7 @@ impl<N: Network, D: Db> Signer<N, D> {
let machine = match self.network.attempt_send(keys.clone(), tx.clone()).await { let machine = match self.network.attempt_send(keys.clone(), tx.clone()).await {
Err(e) => { Err(e) => {
error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e);
return; return None;
} }
Ok(machine) => machine, Ok(machine) => machine,
}; };
@ -426,38 +441,41 @@ impl<N: Network, D: Db> Signer<N, D> {
self.preprocessing.insert(id.id, (machines, preprocesses)); self.preprocessing.insert(id.id, (machines, preprocesses));
// Broadcast our preprocess // Broadcast our preprocess
self.events.push_back(SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess { Some(ProcessorMessage::Preprocess { id, preprocesses: serialized_preprocesses })
id,
preprocesses: serialized_preprocesses,
}));
} }
#[must_use]
pub async fn sign_transaction( pub async fn sign_transaction(
&mut self, &mut self,
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
id: [u8; 32], id: [u8; 32],
tx: N::SignableTransaction, tx: N::SignableTransaction,
eventuality: N::Eventuality, eventuality: N::Eventuality,
) { ) -> Option<ProcessorMessage> {
// The caller is expected to re-issue sign orders on reboot // The caller is expected to re-issue sign orders on reboot
// This is solely used by the rebroadcast task // This is solely used by the rebroadcast task
SignerDb::<N, D>::add_active_sign(txn, &id); SignerDb::<N, D>::add_active_sign(txn, &id);
if self.already_completed(txn, id) { if self.already_completed(txn, id) {
return; return None;
} }
SignerDb::<N, D>::save_eventuality(txn, id, eventuality); SignerDb::<N, D>::save_eventuality(txn, id, eventuality);
self.signable.insert(id, tx); self.signable.insert(id, tx);
self.attempt(txn, id, 0).await; self.attempt(txn, id, 0).await
} }
pub async fn handle(&mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage) { #[must_use]
pub async fn handle(
&mut self,
txn: &mut D::Transaction<'_>,
msg: CoordinatorMessage,
) -> Option<ProcessorMessage> {
match msg { match msg {
CoordinatorMessage::Preprocesses { id, mut preprocesses } => { CoordinatorMessage::Preprocesses { id, mut preprocesses } => {
if self.verify_id(&id).is_err() { if self.verify_id(&id).is_err() {
return; return None;
} }
let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) { let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) {
@ -467,7 +485,7 @@ impl<N: Network, D: Db> Signer<N, D> {
"not preprocessing for {}. this is an error if we didn't reboot", "not preprocessing for {}. this is an error if we didn't reboot",
hex::encode(id.id) hex::encode(id.id)
); );
return; return None;
} }
Some(machine) => machine, Some(machine) => machine,
}; };
@ -516,15 +534,12 @@ impl<N: Network, D: Db> Signer<N, D> {
self.signing.insert(id.id, (signature_machine.unwrap(), shares)); self.signing.insert(id.id, (signature_machine.unwrap(), shares));
// Broadcast our shares // Broadcast our shares
self.events.push_back(SignerEvent::ProcessorMessage(ProcessorMessage::Share { Some(ProcessorMessage::Share { id, shares: serialized_shares })
id,
shares: serialized_shares,
}));
} }
CoordinatorMessage::Shares { id, mut shares } => { CoordinatorMessage::Shares { id, mut shares } => {
if self.verify_id(&id).is_err() { if self.verify_id(&id).is_err() {
return; return None;
} }
let (machine, our_shares) = match self.signing.remove(&id.id) { let (machine, our_shares) = match self.signing.remove(&id.id) {
@ -540,7 +555,7 @@ impl<N: Network, D: Db> Signer<N, D> {
"not preprocessing for {}. this is an error if we didn't reboot", "not preprocessing for {}. this is an error if we didn't reboot",
hex::encode(id.id) hex::encode(id.id)
); );
return; return None;
} }
Some(machine) => machine, Some(machine) => machine,
}; };
@ -582,12 +597,10 @@ impl<N: Network, D: Db> Signer<N, D> {
} }
// Stop trying to sign for this TX // Stop trying to sign for this TX
self.complete(id.id, tx_id); Some(self.complete(id.id, tx_id))
} }
CoordinatorMessage::Reattempt { id } => { CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await,
self.attempt(txn, id.id, id.attempt).await;
}
CoordinatorMessage::Completed { key: _, id, tx: mut tx_vec } => { CoordinatorMessage::Completed { key: _, id, tx: mut tx_vec } => {
let mut tx = <N::Transaction as Transaction<N>>::Id::default(); let mut tx = <N::Transaction as Transaction<N>>::Id::default();
@ -601,11 +614,11 @@ impl<N: Network, D: Db> Signer<N, D> {
hex::encode(id), hex::encode(id),
"that's not a valid TX ID", "that's not a valid TX ID",
); );
return; return None;
} }
tx.as_mut().copy_from_slice(&tx_vec); tx.as_mut().copy_from_slice(&tx_vec);
self.claimed_eventuality_completion(txn, id, &tx).await; self.claimed_eventuality_completion(txn, id, &tx).await
} }
} }
} }

View file

@ -16,7 +16,7 @@ use messages::sign::*;
use crate::{ use crate::{
Payment, Plan, Payment, Plan,
networks::{Output, Transaction, Network}, networks::{Output, Transaction, Network},
signer::{SignerEvent, Signer}, signer::Signer,
}; };
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
@ -33,9 +33,11 @@ pub async fn sign<N: Network>(
attempt: 0, attempt: 0,
}; };
let mut group_key = None;
let mut keys = HashMap::new(); let mut keys = HashMap::new();
let mut txs = HashMap::new(); let mut txs = HashMap::new();
for (i, (these_keys, this_tx)) in keys_txs.drain() { for (i, (these_keys, this_tx)) in keys_txs.drain() {
group_key = Some(these_keys.group_key());
keys.insert(i, these_keys); keys.insert(i, these_keys);
txs.insert(i, this_tx); txs.insert(i, this_tx);
} }
@ -52,14 +54,6 @@ pub async fn sign<N: Network>(
} }
drop(keys); drop(keys);
for i in 1 ..= signers.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let (tx, eventuality) = txs.remove(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
signers.get_mut(&i).unwrap().sign_transaction(&mut txn, actual_id.id, tx, eventuality).await;
txn.commit();
}
let mut signing_set = vec![]; let mut signing_set = vec![];
while signing_set.len() < usize::from(t) { while signing_set.len() < usize::from(t) {
let candidate = Participant::new( let candidate = Participant::new(
@ -72,29 +66,35 @@ pub async fn sign<N: Network>(
signing_set.push(candidate); signing_set.push(candidate);
} }
// All participants should emit a preprocess
let mut preprocesses = HashMap::new(); let mut preprocesses = HashMap::new();
for i in 1 ..= signers.len() { for i in 1 ..= signers.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess { let (tx, eventuality) = txs.remove(&i).unwrap();
id, let mut txn = dbs.get_mut(&i).unwrap().txn();
preprocesses: mut these_preprocesses, match signers
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap() .get_mut(&i)
.unwrap()
.sign_transaction(&mut txn, actual_id.id, tx, eventuality)
.await
{ {
assert_eq!(id, actual_id); // All participants should emit a preprocess
assert_eq!(these_preprocesses.len(), 1); Some(ProcessorMessage::Preprocess { id, preprocesses: mut these_preprocesses }) => {
if signing_set.contains(&i) { assert_eq!(id, actual_id);
preprocesses.insert(i, these_preprocesses.swap_remove(0)); assert_eq!(these_preprocesses.len(), 1);
if signing_set.contains(&i) {
preprocesses.insert(i, these_preprocesses.swap_remove(0));
}
} }
} else { _ => panic!("didn't get preprocess back"),
panic!("didn't get preprocess back");
} }
txn.commit();
} }
let mut shares = HashMap::new(); let mut shares = HashMap::new();
for i in &signing_set { for i in &signing_set {
let mut txn = dbs.get_mut(i).unwrap().txn(); let mut txn = dbs.get_mut(i).unwrap().txn();
signers match signers
.get_mut(i) .get_mut(i)
.unwrap() .unwrap()
.handle( .handle(
@ -104,52 +104,48 @@ pub async fn sign<N: Network>(
preprocesses: clone_without(&preprocesses, i), preprocesses: clone_without(&preprocesses, i),
}, },
) )
.await; .await
txn.commit(); .unwrap()
if let SignerEvent::ProcessorMessage(ProcessorMessage::Share { id, shares: mut these_shares }) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
{ {
assert_eq!(id, actual_id); ProcessorMessage::Share { id, shares: mut these_shares } => {
assert_eq!(these_shares.len(), 1); assert_eq!(id, actual_id);
shares.insert(*i, these_shares.swap_remove(0)); assert_eq!(these_shares.len(), 1);
} else { shares.insert(*i, these_shares.swap_remove(0));
panic!("didn't get share back"); }
_ => panic!("didn't get share back"),
} }
txn.commit();
} }
let mut tx_id = None; let mut tx_id = None;
for i in &signing_set { for i in &signing_set {
let mut txn = dbs.get_mut(i).unwrap().txn(); let mut txn = dbs.get_mut(i).unwrap().txn();
signers match signers
.get_mut(i) .get_mut(i)
.unwrap() .unwrap()
.handle( .handle(
&mut txn, &mut txn,
CoordinatorMessage::Shares { id: actual_id.clone(), shares: clone_without(&shares, i) }, CoordinatorMessage::Shares { id: actual_id.clone(), shares: clone_without(&shares, i) },
) )
.await; .await
txn.commit(); .unwrap()
if let SignerEvent::SignedTransaction { id, tx } =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
{ {
assert_eq!(id, actual_id.id); ProcessorMessage::Completed { key, id, tx } => {
if tx_id.is_none() { assert_eq!(&key, group_key.unwrap().to_bytes().as_ref());
tx_id = Some(tx.clone()); assert_eq!(id, actual_id.id);
if tx_id.is_none() {
tx_id = Some(tx.clone());
}
assert_eq!(tx_id, Some(tx));
} }
assert_eq!(tx_id, Some(tx)); _ => panic!("didn't get TX back"),
} else {
panic!("didn't get TX back");
} }
txn.commit();
} }
// Make sure there's no events left let mut typed_tx_id = <N::Transaction as Transaction<N>>::Id::default();
for (_, mut signer) in signers.drain() { typed_tx_id.as_mut().copy_from_slice(tx_id.unwrap().as_ref());
assert!(signer.events.pop_front().is_none()); typed_tx_id
}
tx_id.unwrap()
} }
pub async fn test_signer<N: Network>(network: N) { pub async fn test_signer<N: Network>(network: N) {