serai/processor/src/tests/substrate_signer.rs

164 lines
4.5 KiB
Rust
Raw Normal View History

use std::collections::HashMap;
2023-04-10 16:48:48 +00:00
use rand_core::{RngCore, OsRng};
2023-04-10 16:48:48 +00:00
use ciphersuite::group::GroupEncoding;
2023-04-10 16:48:48 +00:00
use frost::{
curve::Ristretto,
Participant,
dkg::tests::{key_gen, clone_without},
};
use sp_application_crypto::{RuntimePublic, sr25519::Public};
use serai_db::{DbTxn, Db, MemDb};
2023-11-07 00:50:32 +00:00
use scale::Encode;
2023-04-10 16:48:48 +00:00
use serai_client::{primitives::*, in_instructions::primitives::*};
2023-11-07 00:50:32 +00:00
use messages::coordinator::*;
use crate::substrate_signer::{SubstrateSignerEvent, SubstrateSigner};
2023-04-10 16:48:48 +00:00
#[tokio::test]
async fn test_substrate_signer() {
2023-08-25 00:35:50 +00:00
let keys = key_gen::<_, Ristretto>(&mut OsRng);
2023-04-10 16:48:48 +00:00
let participant_one = Participant::new(1).unwrap();
let id: u32 = 5;
2023-04-10 16:48:48 +00:00
let block = BlockHash([0xaa; 32]);
let batch = Batch {
network: NetworkId::Monero,
id,
2023-04-10 16:48:48 +00:00
block,
instructions: vec![
InInstructionWithBalance {
instruction: InInstruction::Transfer(SeraiAddress([0xbb; 32])),
balance: Balance { coin: Coin::Bitcoin, amount: Amount(1000) },
2023-04-10 16:48:48 +00:00
},
InInstructionWithBalance {
instruction: InInstruction::Dex(DexCall::SwapAndAddLiquidity(SeraiAddress([0xbb; 32]))),
balance: Balance { coin: Coin::Monero, amount: Amount(9999999999999999) },
2023-04-10 16:48:48 +00:00
},
],
};
2023-11-07 00:50:32 +00:00
let actual_id = BatchSignId {
key: keys.values().next().unwrap().group_key().to_bytes(),
id: (batch.network, batch.id).encode().try_into().unwrap(),
attempt: 0,
};
2023-04-10 16:48:48 +00:00
let mut signers = HashMap::new();
let mut dbs = HashMap::new();
let mut t = 0;
2023-04-10 16:48:48 +00:00
for i in 1 ..= keys.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
2023-08-25 00:35:50 +00:00
let keys = keys.get(&i).unwrap().clone();
t = keys.params().t();
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 23:26:13 +00:00
let mut signer = SubstrateSigner::<MemDb>::new(NetworkId::Monero, vec![keys]);
let mut db = MemDb::new();
let mut txn = db.txn();
signer.sign(&mut txn, batch.clone()).await;
txn.commit();
2023-04-10 16:48:48 +00:00
signers.insert(i, signer);
dbs.insert(i, db);
2023-04-10 16:48:48 +00:00
}
let mut signing_set = vec![];
while signing_set.len() < usize::from(t) {
let candidate = Participant::new(
u16::try_from((OsRng.next_u64() % u64::try_from(signers.len()).unwrap()) + 1).unwrap(),
)
.unwrap();
if signing_set.contains(&candidate) {
continue;
}
signing_set.push(candidate);
}
// All participants should emit a preprocess
2023-04-10 16:48:48 +00:00
let mut preprocesses = HashMap::new();
for i in 1 ..= signers.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchPreprocess {
2023-04-10 16:48:48 +00:00
id,
block: batch_block,
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 23:26:13 +00:00
preprocesses: mut these_preprocesses,
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap()
2023-04-10 16:48:48 +00:00
{
assert_eq!(id, actual_id);
assert_eq!(batch_block, block);
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 23:26:13 +00:00
assert_eq!(these_preprocesses.len(), 1);
if signing_set.contains(&i) {
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 23:26:13 +00:00
preprocesses.insert(i, these_preprocesses.swap_remove(0));
}
2023-04-10 16:48:48 +00:00
} else {
panic!("didn't get preprocess back");
}
}
let mut shares = HashMap::new();
for i in &signing_set {
let mut txn = dbs.get_mut(i).unwrap().txn();
signers
.get_mut(i)
.unwrap()
.handle(
&mut txn,
CoordinatorMessage::BatchPreprocesses {
id: actual_id.clone(),
preprocesses: clone_without(&preprocesses, i),
},
)
2023-04-10 16:48:48 +00:00
.await;
txn.commit();
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 23:26:13 +00:00
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchShare {
id,
shares: mut these_shares,
}) = signers.get_mut(i).unwrap().events.pop_front().unwrap()
2023-04-10 16:48:48 +00:00
{
assert_eq!(id, actual_id);
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 23:26:13 +00:00
assert_eq!(these_shares.len(), 1);
shares.insert(*i, these_shares.swap_remove(0));
2023-04-10 16:48:48 +00:00
} else {
panic!("didn't get share back");
}
}
for i in &signing_set {
let mut txn = dbs.get_mut(i).unwrap().txn();
signers
.get_mut(i)
.unwrap()
.handle(
&mut txn,
CoordinatorMessage::BatchShares {
id: actual_id.clone(),
shares: clone_without(&shares, i),
},
)
2023-04-10 16:48:48 +00:00
.await;
txn.commit();
2023-04-10 16:48:48 +00:00
if let SubstrateSignerEvent::SignedBatch(signed_batch) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
2023-04-10 16:48:48 +00:00
{
assert_eq!(signed_batch.batch, batch);
2023-08-25 00:35:50 +00:00
assert!(Public::from_raw(keys[&participant_one].group_key().to_bytes())
.verify(&batch_message(&batch), &signed_batch.signature));
2023-04-10 16:48:48 +00:00
} else {
panic!("didn't get signed batch back");
}
}
// Make sure there's no events left
for (_, mut signer) in signers.drain() {
assert!(signer.events.pop_front().is_none());
2023-04-10 16:48:48 +00:00
}
}