2023-04-16 03:01:07 +00:00
|
|
|
use std::collections::HashMap;
|
2023-04-10 16:48:48 +00:00
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
use rand_core::{RngCore, OsRng};
|
2023-04-10 16:48:48 +00:00
|
|
|
|
2023-07-17 19:49:15 +00:00
|
|
|
use ciphersuite::group::GroupEncoding;
|
2023-04-10 16:48:48 +00:00
|
|
|
use frost::{
|
|
|
|
curve::Ristretto,
|
|
|
|
Participant,
|
|
|
|
dkg::tests::{key_gen, clone_without},
|
|
|
|
};
|
|
|
|
|
|
|
|
use sp_application_crypto::{RuntimePublic, sr25519::Public};
|
|
|
|
|
2023-04-18 03:20:48 +00:00
|
|
|
use serai_db::{DbTxn, Db, MemDb};
|
2023-04-14 15:41:01 +00:00
|
|
|
|
2023-04-10 16:48:48 +00:00
|
|
|
use serai_client::{primitives::*, in_instructions::primitives::*};
|
|
|
|
|
|
|
|
use messages::{sign::SignId, coordinator::*};
|
2023-04-14 15:41:01 +00:00
|
|
|
use crate::substrate_signer::{SubstrateSignerEvent, SubstrateSigner};
|
2023-04-10 16:48:48 +00:00
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_substrate_signer() {
|
2023-08-25 00:35:50 +00:00
|
|
|
let keys = key_gen::<_, Ristretto>(&mut OsRng);
|
2023-04-10 16:48:48 +00:00
|
|
|
|
|
|
|
let participant_one = Participant::new(1).unwrap();
|
|
|
|
|
2023-08-14 15:57:38 +00:00
|
|
|
let id: u32 = 5;
|
2023-04-10 16:48:48 +00:00
|
|
|
let block = BlockHash([0xaa; 32]);
|
Start moving Coordinator to a multi-Tributary model
Prior, we only supported a single Tributary per network, and spawned a task to
handled Processor messages per Tributary. Now, we handle Processor messages per
network, yet we still only supported a single Tributary in that handling
function.
Now, when we handle a message, we load the Tributary which is relevant. Once we
know it, we ensure we have it (preventing race conditions), and then proceed.
We do need work to check if we should have a Tributary, or if we're not
participating. We also need to check if a Tributary has been retired, meaning
we shouldn't handle any transactions related to them, and to clean up retired
Tributaries.
2023-09-27 22:20:36 +00:00
|
|
|
let mut actual_id = SignId {
|
|
|
|
key: keys.values().next().unwrap().group_key().to_bytes().to_vec(),
|
|
|
|
id: [0; 32],
|
|
|
|
attempt: 0,
|
|
|
|
};
|
2023-04-10 16:48:48 +00:00
|
|
|
|
|
|
|
let batch = Batch {
|
2023-04-18 06:01:53 +00:00
|
|
|
network: NetworkId::Monero,
|
2023-08-14 15:57:38 +00:00
|
|
|
id,
|
2023-04-10 16:48:48 +00:00
|
|
|
block,
|
|
|
|
instructions: vec![
|
|
|
|
InInstructionWithBalance {
|
|
|
|
instruction: InInstruction::Transfer(SeraiAddress([0xbb; 32])),
|
2023-04-18 06:01:53 +00:00
|
|
|
balance: Balance { coin: Coin::Bitcoin, amount: Amount(1000) },
|
2023-04-10 16:48:48 +00:00
|
|
|
},
|
|
|
|
InInstructionWithBalance {
|
2023-07-26 16:45:51 +00:00
|
|
|
instruction: InInstruction::Dex(Data::new(vec![0xcc; 128]).unwrap()),
|
2023-04-18 06:01:53 +00:00
|
|
|
balance: Balance { coin: Coin::Monero, amount: Amount(9999999999999999) },
|
2023-04-10 16:48:48 +00:00
|
|
|
},
|
|
|
|
],
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut signers = HashMap::new();
|
2023-04-18 03:20:48 +00:00
|
|
|
let mut dbs = HashMap::new();
|
2023-04-16 03:01:07 +00:00
|
|
|
let mut t = 0;
|
2023-04-10 16:48:48 +00:00
|
|
|
for i in 1 ..= keys.len() {
|
|
|
|
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
2023-08-25 00:35:50 +00:00
|
|
|
let keys = keys.get(&i).unwrap().clone();
|
2023-04-16 03:01:07 +00:00
|
|
|
t = keys.params().t();
|
2023-04-18 03:20:48 +00:00
|
|
|
|
2023-08-24 22:52:31 +00:00
|
|
|
let mut signer = SubstrateSigner::<MemDb>::new(NetworkId::Monero, keys);
|
2023-04-18 03:20:48 +00:00
|
|
|
let mut db = MemDb::new();
|
|
|
|
let mut txn = db.txn();
|
|
|
|
signer.sign(&mut txn, batch.clone()).await;
|
|
|
|
txn.commit();
|
|
|
|
|
2023-04-10 16:48:48 +00:00
|
|
|
signers.insert(i, signer);
|
2023-04-18 03:20:48 +00:00
|
|
|
dbs.insert(i, db);
|
2023-04-10 16:48:48 +00:00
|
|
|
}
|
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
let mut signing_set = vec![];
|
|
|
|
while signing_set.len() < usize::from(t) {
|
|
|
|
let candidate = Participant::new(
|
|
|
|
u16::try_from((OsRng.next_u64() % u64::try_from(signers.len()).unwrap()) + 1).unwrap(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
if signing_set.contains(&candidate) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
signing_set.push(candidate);
|
|
|
|
}
|
|
|
|
|
|
|
|
// All participants should emit a preprocess
|
2023-04-10 16:48:48 +00:00
|
|
|
let mut preprocesses = HashMap::new();
|
2023-04-16 03:01:07 +00:00
|
|
|
for i in 1 ..= signers.len() {
|
|
|
|
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
|
|
|
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchPreprocess {
|
2023-04-10 16:48:48 +00:00
|
|
|
id,
|
2023-08-14 15:57:38 +00:00
|
|
|
block: batch_block,
|
2023-04-10 16:48:48 +00:00
|
|
|
preprocess,
|
2023-04-16 03:01:07 +00:00
|
|
|
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap()
|
2023-04-10 16:48:48 +00:00
|
|
|
{
|
2023-08-24 22:44:09 +00:00
|
|
|
if actual_id.id == [0; 32] {
|
|
|
|
actual_id.id = id.id;
|
|
|
|
}
|
2023-04-10 16:48:48 +00:00
|
|
|
assert_eq!(id, actual_id);
|
2023-08-14 15:57:38 +00:00
|
|
|
assert_eq!(batch_block, block);
|
2023-04-16 03:01:07 +00:00
|
|
|
if signing_set.contains(&i) {
|
|
|
|
preprocesses.insert(i, preprocess);
|
|
|
|
}
|
2023-04-10 16:48:48 +00:00
|
|
|
} else {
|
|
|
|
panic!("didn't get preprocess back");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut shares = HashMap::new();
|
|
|
|
for i in &signing_set {
|
2023-04-18 03:20:48 +00:00
|
|
|
let mut txn = dbs.get_mut(i).unwrap().txn();
|
2023-04-16 03:01:07 +00:00
|
|
|
signers
|
|
|
|
.get_mut(i)
|
|
|
|
.unwrap()
|
2023-04-18 03:20:48 +00:00
|
|
|
.handle(
|
|
|
|
&mut txn,
|
|
|
|
CoordinatorMessage::BatchPreprocesses {
|
|
|
|
id: actual_id.clone(),
|
|
|
|
preprocesses: clone_without(&preprocesses, i),
|
|
|
|
},
|
|
|
|
)
|
2023-04-10 16:48:48 +00:00
|
|
|
.await;
|
2023-04-18 03:20:48 +00:00
|
|
|
txn.commit();
|
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchShare { id, share }) =
|
|
|
|
signers.get_mut(i).unwrap().events.pop_front().unwrap()
|
2023-04-10 16:48:48 +00:00
|
|
|
{
|
|
|
|
assert_eq!(id, actual_id);
|
|
|
|
shares.insert(*i, share);
|
|
|
|
} else {
|
|
|
|
panic!("didn't get share back");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i in &signing_set {
|
2023-04-18 03:20:48 +00:00
|
|
|
let mut txn = dbs.get_mut(i).unwrap().txn();
|
2023-04-16 03:01:07 +00:00
|
|
|
signers
|
|
|
|
.get_mut(i)
|
|
|
|
.unwrap()
|
2023-04-18 03:20:48 +00:00
|
|
|
.handle(
|
|
|
|
&mut txn,
|
|
|
|
CoordinatorMessage::BatchShares {
|
|
|
|
id: actual_id.clone(),
|
|
|
|
shares: clone_without(&shares, i),
|
|
|
|
},
|
|
|
|
)
|
2023-04-10 16:48:48 +00:00
|
|
|
.await;
|
2023-04-18 03:20:48 +00:00
|
|
|
txn.commit();
|
2023-04-10 16:48:48 +00:00
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
if let SubstrateSignerEvent::SignedBatch(signed_batch) =
|
|
|
|
signers.get_mut(i).unwrap().events.pop_front().unwrap()
|
2023-04-10 16:48:48 +00:00
|
|
|
{
|
|
|
|
assert_eq!(signed_batch.batch, batch);
|
2023-10-13 08:40:59 +00:00
|
|
|
// SubstrateSigner will believe this is the first batch for this set, hence `true`
|
2023-08-25 00:35:50 +00:00
|
|
|
assert!(Public::from_raw(keys[&participant_one].group_key().to_bytes())
|
2023-10-13 08:40:59 +00:00
|
|
|
.verify(&batch_message(true, &batch), &signed_batch.signature));
|
2023-04-10 16:48:48 +00:00
|
|
|
} else {
|
|
|
|
panic!("didn't get signed batch back");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-16 03:01:07 +00:00
|
|
|
// Make sure there's no events left
|
|
|
|
for (_, mut signer) in signers.drain() {
|
|
|
|
assert!(signer.events.pop_front().is_none());
|
2023-04-10 16:48:48 +00:00
|
|
|
}
|
|
|
|
}
|