* Remove NetworkId from processor-messages

Because intent binds to the sender/receiver, it's not needed for intent.

The processor knows what the network is.

The coordinator knows which to use because it's sending this message to the
processor for that network.

Also removes the unused zeroize.

* ProcessorMessage::Completed use Session instead of key

* Move SubstrateSignId to Session

* Finish replacing key with session
This commit is contained in:
Luke Parker 2023-11-26 12:14:23 -05:00 committed by GitHub
parent b79cf8abde
commit 571195bfda
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 304 additions and 455 deletions

1
Cargo.lock generated
View file

@ -7862,7 +7862,6 @@ dependencies = [
"serai-in-instructions-primitives",
"serai-primitives",
"serai-validator-sets-primitives",
"zeroize",
]
[[package]]

View file

@ -51,12 +51,10 @@ macro_rules! create_db {
($($arg),*).encode()
)
}
#[allow(dead_code)]
pub fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
let key = $field_name::key($($arg),*);
txn.put(&key, borsh::to_vec(data).unwrap());
}
#[allow(dead_code)]
pub fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
getter.get($field_name::key($($arg),*)).map(|data| {
borsh::from_slice(data.as_ref()).unwrap()

View file

@ -34,18 +34,12 @@ impl<D: Db> MainDb<D> {
getter.get(Self::handled_message_key(network, id)).is_some()
}
fn in_tributary_key(set: ValidatorSet) -> Vec<u8> {
Self::main_key(b"in_tributary", set.encode())
}
fn active_tributaries_key() -> Vec<u8> {
Self::main_key(b"active_tributaries", [])
}
fn retired_tributary_key(set: ValidatorSet) -> Vec<u8> {
Self::main_key(b"retired_tributary", set.encode())
}
pub fn in_tributary<G: Get>(getter: &G, set: ValidatorSet) -> bool {
getter.get(Self::in_tributary_key(set)).is_some()
}
pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
let bytes = getter.get(Self::active_tributaries_key()).unwrap_or(vec![]);
let mut bytes_ref: &[u8] = bytes.as_ref();
@ -58,8 +52,6 @@ impl<D: Db> MainDb<D> {
(bytes, tributaries)
}
pub fn add_participating_in_tributary(txn: &mut D::Transaction<'_>, spec: &TributarySpec) {
txn.put(Self::in_tributary_key(spec.set()), []);
let key = Self::active_tributaries_key();
let (mut existing_bytes, existing) = Self::active_tributaries(txn);
for tributary in &existing {

View file

@ -57,7 +57,7 @@ pub mod processors;
use processors::Processors;
mod substrate;
use substrate::{CosignTransactions, SubstrateDb};
use substrate::CosignTransactions;
mod cosign_evaluator;
use cosign_evaluator::CosignEvaluator;
@ -116,7 +116,7 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
.send(
set.network,
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { set, attempt: 0 },
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
params: frost::ThresholdParams::new(spec.t(), spec.n(), our_i.start).unwrap(),
shares: u16::from(our_i.end) - u16::from(our_i.start),
},
@ -195,66 +195,50 @@ async fn handle_processor_message<D: Db, P: P2p>(
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
// in-set, making the Tributary relevant
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::InvalidCommitments { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::Shares { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::InvalidShare { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.session),
key_gen::ProcessorMessage::InvalidCommitments { id, .. } => Some(id.session),
key_gen::ProcessorMessage::Shares { id, .. } => Some(id.session),
key_gen::ProcessorMessage::InvalidShare { id, .. } => Some(id.session),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.session),
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
},
// TODO: Review replacing key with Session in messages?
ProcessorMessage::Sign(inner_msg) => match inner_msg {
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
sign::ProcessorMessage::InvalidParticipant { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
sign::ProcessorMessage::Preprocess { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
sign::ProcessorMessage::Share { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
sign::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session),
sign::ProcessorMessage::Preprocess { id, .. } => Some(id.session),
sign::ProcessorMessage::Share { id, .. } => Some(id.session),
// While the Processor's Scanner will always emit Completed, that's routed through the
// Signer and only becomes a ProcessorMessage::Completed if the Signer is present and
// confirms it
sign::ProcessorMessage::Completed { key, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, key).unwrap())
}
sign::ProcessorMessage::Completed { session, .. } => Some(*session),
},
ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
// This is a special case as it's relevant to *all* Tributaries for this network
// This is a special case as it's relevant to *all* Tributaries for this network we're
// signing in
// It doesn't return a Tributary to become `relevant_tributary` though
coordinator::ProcessorMessage::SubstrateBlockAck { network, block, plans } => {
assert_eq!(
*network, msg.network,
"processor claimed to be a different network than it was for SubstrateBlockAck",
);
coordinator::ProcessorMessage::SubstrateBlockAck { block, plans } => {
// Get the sessions for these keys
let keys = plans.iter().map(|plan| plan.key.clone()).collect::<HashSet<_>>();
let mut sessions = vec![];
for key in keys {
let session = SubstrateDb::<D>::session_for_key(&txn, &key).unwrap();
// Only keep them if we're in the Tributary AND they haven't been retied
let set = ValidatorSet { network: *network, session };
if MainDb::<D>::in_tributary(&txn, set) && (!MainDb::<D>::is_tributary_retired(&txn, set))
{
sessions.push((session, key));
}
}
let sessions = plans
.iter()
.map(|plan| plan.session)
.filter(|session| {
!MainDb::<D>::is_tributary_retired(&txn, ValidatorSet { network, session: *session })
})
.collect::<HashSet<_>>();
// Ensure we have the Tributaries
for (session, _) in &sessions {
for session in &sessions {
if !tributaries.contains_key(session) {
return false;
}
}
for (session, key) in sessions {
for session in sessions {
let tributary = &tributaries[&session];
let plans = plans
.iter()
.filter_map(|plan| Some(plan.id).filter(|_| plan.key == key))
.filter_map(|plan| Some(plan.id).filter(|_| plan.session == session))
.collect::<Vec<_>>();
PlanIds::set(&mut txn, &tributary.spec.genesis(), *block, &plans);
@ -286,18 +270,10 @@ async fn handle_processor_message<D: Db, P: P2p>(
None
}
// We'll only fire these if we are the Substrate signer, making the Tributary relevant
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::CosignPreprocess { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::SubstrateShare { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session),
coordinator::ProcessorMessage::CosignPreprocess { id, .. } => Some(id.session),
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => Some(id.session),
coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session),
coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => {
let cosigned_block = CosignedBlock {
network,
@ -462,11 +438,6 @@ async fn handle_processor_message<D: Db, P: P2p>(
}]
}
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
assert_eq!(
id.set.network, msg.network,
"processor claimed to be a different network than it was for in InvalidShare",
);
// Check if the MuSig signature had any errors as if so, we need to provide
// RemoveParticipant
// As for the safety of calling error_generating_key_pair, the processor is presumed
@ -490,11 +461,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
txs
}
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
assert_eq!(
id.set.network, msg.network,
"processor claimed to be a different network than it was for in GeneratedKeyPair",
);
// TODO2: Also check the other KeyGenId fields
// TODO2: Check the KeyGenId fields
// Tell the Tributary the key pair, get back the share for the MuSig signature
let share = crate::tributary::generated_key_pair::<D>(
@ -514,11 +481,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
}
}
}
key_gen::ProcessorMessage::Blame { id, participant } => {
assert_eq!(
id.set.network, msg.network,
"processor claimed to be a different network than it was for in Blame",
);
// This is a response to the ordered VerifyBlame, which is why this satisfies the provided
// transaction's needs to be perfectly ordered
key_gen::ProcessorMessage::Blame { id: _, participant } => {
vec![Transaction::RemoveParticipant(participant)]
}
},
@ -556,7 +521,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
signed: Transaction::empty_signed(),
})]
}
sign::ProcessorMessage::Completed { key: _, id, tx } => {
sign::ProcessorMessage::Completed { session: _, id, tx } => {
let r = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
#[allow(non_snake_case)]
let R = <Ristretto as Ciphersuite>::generator() * r.deref();

View file

@ -1,12 +1,12 @@
use scale::{Encode, Decode};
pub use serai_db::*;
use scale::Encode;
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
validator_sets::primitives::{Session, ValidatorSet},
};
pub use serai_db::*;
create_db! {
SubstrateDb {
CosignTriggered: () -> (),
@ -86,24 +86,6 @@ impl<D: Db> SubstrateDb<D> {
txn.put(Self::event_key(&id, index), []);
}
fn session_key(key: &[u8]) -> Vec<u8> {
Self::substrate_key(b"session", key)
}
pub fn session_for_key<G: Get>(getter: &G, key: &[u8]) -> Option<Session> {
getter.get(Self::session_key(key)).map(|bytes| Session::decode(&mut bytes.as_ref()).unwrap())
}
pub fn save_session_for_keys(txn: &mut D::Transaction<'_>, key_pair: &KeyPair, session: Session) {
let session = session.encode();
let key_0 = Self::session_key(&key_pair.0);
let existing = txn.get(&key_0);
// This may trigger if 100% of a DKG are malicious, and they create a key equivalent to a prior
// key. Since it requires 100% maliciousness, not just 67% maliciousness, this will only assert
// in a modified-to-be-malicious stack, making it safe
assert!(existing.is_none() || (existing.as_ref() == Some(&session)));
txn.put(key_0, session.clone());
txn.put(Self::session_key(&key_pair.1), session);
}
fn batch_instructions_key(network: NetworkId, id: u32) -> Vec<u8> {
Self::substrate_key(b"batch", (network, id).encode())
}

View file

@ -30,7 +30,7 @@ use tokio::{sync::mpsc, time::sleep};
use crate::{
Db,
processors::Processors,
tributary::{TributarySpec, SeraiBlockNumber, KeyPairDb},
tributary::{TributarySpec, SeraiBlockNumber},
};
mod db;
@ -116,19 +116,13 @@ async fn handle_new_set<D: Db>(
Ok(())
}
async fn handle_key_gen<D: Db, Pro: Processors>(
db: &mut D,
async fn handle_key_gen<Pro: Processors>(
processors: &Pro,
serai: &Serai,
block: &Block,
set: ValidatorSet,
key_pair: KeyPair,
) -> Result<(), SeraiError> {
// This has to be saved *before* we send ConfirmKeyPair
let mut txn = db.txn();
SubstrateDb::<D>::save_session_for_keys(&mut txn, &key_pair, set.session);
txn.commit();
processors
.send(
set.network,
@ -144,7 +138,7 @@ async fn handle_key_gen<D: Db, Pro: Processors>(
// block which has a time greater than or equal to the Serai time
.unwrap_or(BlockHash([0; 32])),
},
set,
session: set.session,
key_pair,
},
)
@ -232,7 +226,6 @@ async fn handle_batch_and_burns<D: Db, Pro: Processors>(
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block,
},
network,
block: block.number(),
burns: burns.remove(&network).unwrap(),
batches: batches.remove(&network).unwrap(),
@ -291,13 +284,7 @@ async fn handle_block<D: Db, Pro: Processors>(
if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
log::info!("found fresh key gen event {:?}", key_gen);
if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen {
// Immediately ensure this key pair is accessible to the tributary, before we fire any
// events off of it
let mut txn = db.0.txn();
KeyPairDb::set(&mut txn, set, &key_pair);
txn.commit();
handle_key_gen(&mut db.0, processors, serai, &block, set, key_pair).await?;
handle_key_gen(processors, serai, &block, set, key_pair).await?;
} else {
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
}

View file

@ -132,7 +132,7 @@ async fn dkg_test() {
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { set: spec.set(), attempt: 0 },
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
@ -150,7 +150,7 @@ async fn dkg_test() {
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { set: spec.set(), attempt: 0 },
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
@ -214,7 +214,7 @@ async fn dkg_test() {
// Each scanner should emit a distinct shares message
let shares_for = |i: usize| {
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
id: KeyGenId { set: spec.set(), attempt: 0 },
id: KeyGenId { session: spec.set().session, attempt: 0 },
shares: vec![txs
.iter()
.enumerate()
@ -267,7 +267,7 @@ async fn dkg_test() {
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { set: spec.set(), attempt: 0 },
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);

View file

@ -5,7 +5,7 @@ use zeroize::Zeroizing;
use ciphersuite::{Ciphersuite, Ristretto, group::GroupEncoding};
use frost::Participant;
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
use serai_client::validator_sets::primitives::KeyPair;
use processor_messages::coordinator::SubstrateSignableId;
@ -50,7 +50,6 @@ create_db!(
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair,
KeyPairDb: (set: ValidatorSet) -> KeyPair,
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,

View file

@ -30,7 +30,7 @@ use crate::{
nonce_decider::NonceDecider,
dkg_confirmer::DkgConfirmer,
scanner::{RecognizedIdType, RIDTrait},
FatallySlashed, DkgShare, PlanIds, ConfirmationNonces, KeyPairDb, AttemptDb, DataDb,
FatallySlashed, DkgShare, PlanIds, ConfirmationNonces, AttemptDb, DataDb,
},
};
@ -260,7 +260,7 @@ pub(crate) async fn handle_application_tx<
.send(
spec.set().network,
key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { set: spec.set(), attempt },
id: KeyGenId { session: spec.set().session, attempt },
commitments,
},
)
@ -397,7 +397,7 @@ pub(crate) async fn handle_application_tx<
.send(
spec.set().network,
key_gen::CoordinatorMessage::Shares {
id: KeyGenId { set: spec.set(), attempt },
id: KeyGenId { session: spec.set().session, attempt },
shares: expanded_shares,
},
)
@ -443,7 +443,7 @@ pub(crate) async fn handle_application_tx<
.send(
spec.set().network,
key_gen::CoordinatorMessage::VerifyBlame {
id: KeyGenId { set: spec.set(), attempt },
id: KeyGenId { session: spec.set().session, attempt },
accuser,
accused: faulty,
share,
@ -504,15 +504,6 @@ pub(crate) async fn handle_application_tx<
SubstrateSignableId::CosigningSubstrateBlock(hash),
);
let key = loop {
let Some(key_pair) = KeyPairDb::get(txn, spec.set()) else {
// This can happen based on a timing condition
log::warn!("CosignSubstrateBlock yet keys weren't set yet");
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
continue;
};
break key_pair.0.into();
};
let block_number = SeraiBlockNumber::get(txn, hash)
.expect("CosignSubstrateBlock yet didn't save Serai block number");
processors
@ -520,7 +511,7 @@ pub(crate) async fn handle_application_tx<
spec.set().network,
coordinator::CoordinatorMessage::CosignSubstrateBlock {
id: SubstrateSignId {
key,
session: spec.set().session,
id: SubstrateSignableId::CosigningSubstrateBlock(hash),
attempt: 0,
},
@ -579,12 +570,15 @@ pub(crate) async fn handle_application_tx<
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
unflatten(spec, &mut preprocesses);
NonceDecider::selected_for_signing_substrate(txn, genesis, data.plan);
let key = KeyPairDb::get(txn, spec.set()).unwrap().0 .0;
processors
.send(
spec.set().network,
coordinator::CoordinatorMessage::SubstratePreprocesses {
id: SubstrateSignId { key, id: data.plan, attempt: data.attempt },
id: SubstrateSignId {
session: spec.set().session,
id: data.plan,
attempt: data.attempt,
},
preprocesses: preprocesses
.into_iter()
.map(|(k, v)| (k, v.try_into().unwrap()))
@ -613,12 +607,15 @@ pub(crate) async fn handle_application_tx<
) {
Accumulation::Ready(DataSet::Participating(mut shares)) => {
unflatten(spec, &mut shares);
let key = KeyPairDb::get(txn, spec.set()).unwrap().0 .0;
processors
.send(
spec.set().network,
coordinator::CoordinatorMessage::SubstrateShares {
id: SubstrateSignId { key, id: data.plan, attempt: data.attempt },
id: SubstrateSignId {
session: spec.set().session,
id: data.plan,
attempt: data.attempt,
},
shares: shares
.into_iter()
.map(|(validator, share)| (validator, share.try_into().unwrap()))
@ -636,7 +633,6 @@ pub(crate) async fn handle_application_tx<
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
return;
};
let key_pair = KeyPairDb::get(txn, spec.set());
match handle(
txn,
&DataSpecification {
@ -654,14 +650,7 @@ pub(crate) async fn handle_application_tx<
.send(
spec.set().network,
sign::CoordinatorMessage::Preprocesses {
id: SignId {
key: key_pair
.expect("completed SignPreprocess despite not setting the key pair")
.1
.into(),
id: data.plan,
attempt: data.attempt,
},
id: SignId { session: spec.set().session, id: data.plan, attempt: data.attempt },
preprocesses,
},
)
@ -675,7 +664,6 @@ pub(crate) async fn handle_application_tx<
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
return;
};
let key_pair = KeyPairDb::get(txn, spec.set());
match handle(
txn,
&DataSpecification {
@ -692,14 +680,7 @@ pub(crate) async fn handle_application_tx<
.send(
spec.set().network,
sign::CoordinatorMessage::Shares {
id: SignId {
key: key_pair
.expect("completed SignShares despite not setting the key pair")
.1
.into(),
id: data.plan,
attempt: data.attempt,
},
id: SignId { session: spec.set().session, id: data.plan, attempt: data.attempt },
shares,
},
)
@ -727,13 +708,15 @@ pub(crate) async fn handle_application_tx<
};
// TODO: Confirm this signer hasn't prior published a completion
let Some(key_pair) = KeyPairDb::get(txn, spec.set()) else {
panic!("SignCompleted for recognized plan ID despite not having a key pair for this set")
};
processors
.send(
spec.set().network,
sign::CoordinatorMessage::Completed { key: key_pair.1.to_vec(), id: plan, tx: tx_hash },
sign::CoordinatorMessage::Completed {
session: spec.set().session,
id: plan,
tx: tx_hash,
},
)
.await;
}

View file

@ -69,12 +69,7 @@ mod binaries {
// Assert one, and only one of these, is the coordinator
assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator));
// Verify (from, intent) hasn't been prior seen
// At the time of writing, intents should be unique even across `from`. There's a DoS where
// a service sends another service's intent, causing the other service to have their message
// dropped though.
// Including from prevents that DoS, and allows simplifying intents to solely unique within
// a service (not within all of Serai).
// Verify (from, to, intent) hasn't been prior seen
fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
[&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat()
}

View file

@ -54,6 +54,7 @@ tokio = { version = "1", default-features = false, features = ["rt-multi-thread"
serai-db = { path = "../common/db", features = ["rocksdb"], optional = true }
serai-env = { path = "../common/env", optional = true }
# TODO: Replace with direct usage of primitives
serai-client = { path = "../substrate/client", default-features = false }
messages = { package = "serai-processor-messages", path = "./messages", optional = true }

View file

@ -14,8 +14,6 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
zeroize = { version = "1", default-features = false, features = ["std", "derive"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }

View file

@ -1,18 +1,16 @@
use std::collections::HashMap;
use zeroize::Zeroize;
use scale::{Encode, Decode};
use borsh::{BorshSerialize, BorshDeserialize};
use dkg::{Participant, ThresholdParams};
use serai_primitives::{BlockHash, NetworkId};
use serai_primitives::BlockHash;
use in_instructions_primitives::{Batch, SignedBatch};
use coins_primitives::OutInstructionWithBalance;
use validator_sets_primitives::{ValidatorSet, KeyPair};
use validator_sets_primitives::{Session, KeyPair};
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct SubstrateContext {
pub serai_time: u64,
pub network_latest_finalized_block: BlockHash,
@ -22,20 +20,10 @@ pub mod key_gen {
use super::*;
#[derive(
Clone,
Copy,
PartialEq,
Eq,
Hash,
Debug,
Zeroize,
Encode,
Decode,
BorshSerialize,
BorshDeserialize,
Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize,
)]
pub struct KeyGenId {
pub set: ValidatorSet,
pub session: Session,
pub attempt: u32,
}
@ -116,11 +104,9 @@ pub mod key_gen {
pub mod sign {
use super::*;
#[derive(
Clone, PartialEq, Eq, Hash, Debug, Zeroize, Encode, Decode, BorshSerialize, BorshDeserialize,
)]
#[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
pub struct SignId {
pub key: Vec<u8>,
pub session: Session,
pub id: [u8; 32],
pub attempt: u32,
}
@ -134,7 +120,7 @@ pub mod sign {
// Re-attempt a signing protocol.
Reattempt { id: SignId },
// Completed a signing protocol already.
Completed { key: Vec<u8>, id: [u8; 32], tx: Vec<u8> },
Completed { session: Session, id: [u8; 32], tx: Vec<u8> },
}
impl CoordinatorMessage {
@ -142,17 +128,17 @@ pub mod sign {
None
}
pub fn key(&self) -> &[u8] {
pub fn session(&self) -> Session {
match self {
CoordinatorMessage::Preprocesses { id, .. } => &id.key,
CoordinatorMessage::Shares { id, .. } => &id.key,
CoordinatorMessage::Reattempt { id } => &id.key,
CoordinatorMessage::Completed { key, .. } => key,
CoordinatorMessage::Preprocesses { id, .. } => id.session,
CoordinatorMessage::Shares { id, .. } => id.session,
CoordinatorMessage::Reattempt { id } => id.session,
CoordinatorMessage::Completed { session, .. } => *session,
}
}
}
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub enum ProcessorMessage {
// Participant sent an invalid message during the sign protocol.
InvalidParticipant { id: SignId, participant: Participant },
@ -161,7 +147,7 @@ pub mod sign {
// Signed share for the specified signing protocol.
Share { id: SignId, shares: Vec<Vec<u8>> },
// Completed a signing protocol already.
Completed { key: Vec<u8>, id: [u8; 32], tx: Vec<u8> },
Completed { session: Session, id: [u8; 32], tx: Vec<u8> },
}
}
@ -178,28 +164,16 @@ pub mod coordinator {
}
#[derive(
Clone,
Copy,
PartialEq,
Eq,
Hash,
Debug,
Zeroize,
Encode,
Decode,
BorshSerialize,
BorshDeserialize,
Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize,
)]
pub enum SubstrateSignableId {
CosigningSubstrateBlock([u8; 32]),
Batch([u8; 5]),
}
#[derive(
Clone, PartialEq, Eq, Hash, Debug, Zeroize, Encode, Decode, BorshSerialize, BorshDeserialize,
)]
#[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
pub struct SubstrateSignId {
pub key: [u8; 32],
pub session: Session,
pub id: SubstrateSignableId,
pub attempt: u32,
}
@ -226,26 +200,17 @@ pub mod coordinator {
CoordinatorMessage::BatchReattempt { .. } => None,
}
}
pub fn key(&self) -> &[u8] {
match self {
CoordinatorMessage::CosignSubstrateBlock { id, .. } => &id.key,
CoordinatorMessage::SubstratePreprocesses { id, .. } => &id.key,
CoordinatorMessage::SubstrateShares { id, .. } => &id.key,
CoordinatorMessage::BatchReattempt { id } => &id.key,
}
}
}
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct PlanMeta {
pub key: Vec<u8>,
pub session: Session,
pub id: [u8; 32],
}
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub enum ProcessorMessage {
SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<PlanMeta> },
SubstrateBlockAck { block: u64, plans: Vec<PlanMeta> },
InvalidParticipant { id: SubstrateSignId, participant: Participant },
CosignPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> },
BatchPreprocess { id: SubstrateSignId, block: BlockHash, preprocesses: Vec<[u8; 64]> },
@ -261,12 +226,11 @@ pub mod substrate {
pub enum CoordinatorMessage {
ConfirmKeyPair {
context: SubstrateContext,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
},
SubstrateBlock {
context: SubstrateContext,
network: NetworkId,
block: u64,
burns: Vec<OutInstructionWithBalance>,
batches: Vec<u32>,
@ -283,7 +247,7 @@ pub mod substrate {
}
}
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub enum ProcessorMessage {
Batch { batch: Batch },
SignedBatch { batch: SignedBatch },
@ -364,7 +328,7 @@ impl CoordinatorMessage {
pub fn intent(&self) -> Vec<u8> {
match self {
CoordinatorMessage::KeyGen(msg) => {
// Unique since key gen ID embeds the validator set and attempt
// Unique since key gen ID embeds the session and attempt
let (sub, id) = match msg {
key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id),
key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id),
@ -409,11 +373,9 @@ impl CoordinatorMessage {
}
CoordinatorMessage::Substrate(msg) => {
let (sub, id) = match msg {
// Unique since there's only one key pair for a set
substrate::CoordinatorMessage::ConfirmKeyPair { set, .. } => (0, set.encode()),
substrate::CoordinatorMessage::SubstrateBlock { network, block, .. } => {
(1, (network, block).encode())
}
// Unique since there's only one key pair for a session
substrate::CoordinatorMessage::ConfirmKeyPair { session, .. } => (0, session.encode()),
substrate::CoordinatorMessage::SubstrateBlock { block, .. } => (1, block.encode()),
};
let mut res = vec![COORDINATOR_UID, TYPE_SUBSTRATE_UID, sub];
@ -464,9 +426,7 @@ impl ProcessorMessage {
}
ProcessorMessage::Coordinator(msg) => {
let (sub, id) = match msg {
coordinator::ProcessorMessage::SubstrateBlockAck { network, block, .. } => {
(0, (network, block).encode())
}
coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()),
// Unique since SubstrateSignId
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()),
coordinator::ProcessorMessage::CosignPreprocess { id, .. } => (2, id.encode()),

View file

@ -3,7 +3,6 @@ use std::collections::HashMap;
use rand_core::OsRng;
use ciphersuite::group::GroupEncoding;
use frost::{
curve::Ristretto,
ThresholdKeys, FrostError,
@ -21,6 +20,7 @@ use scale::Encode;
use serai_client::{
primitives::{NetworkId, BlockHash},
in_instructions::primitives::{Batch, SignedBatch, batch_message},
validator_sets::primitives::Session,
};
use messages::coordinator::*;
@ -48,6 +48,7 @@ pub struct BatchSigner<D: Db> {
db: PhantomData<D>,
network: NetworkId,
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
signable: HashMap<[u8; 5], Batch>,
@ -71,12 +72,17 @@ impl<D: Db> fmt::Debug for BatchSigner<D> {
}
impl<D: Db> BatchSigner<D> {
pub fn new(network: NetworkId, keys: Vec<ThresholdKeys<Ristretto>>) -> BatchSigner<D> {
pub fn new(
network: NetworkId,
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
) -> BatchSigner<D> {
assert!(!keys.is_empty());
BatchSigner {
db: PhantomData,
network,
session,
keys,
signable: HashMap::new(),
@ -86,11 +92,11 @@ impl<D: Db> BatchSigner<D> {
}
}
fn verify_id(&self, id: &SubstrateSignId) -> Result<([u8; 32], [u8; 5], u32), ()> {
let SubstrateSignId { key, id, attempt } = id;
fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, [u8; 5], u32), ()> {
let SubstrateSignId { session, id, attempt } = id;
let SubstrateSignableId::Batch(id) = id else { panic!("BatchSigner handed non-Batch") };
assert_eq!(key, &self.keys[0].group_key().to_bytes());
assert_eq!(session, &self.session);
// Check the attempt lines up
match self.attempt.get(id) {
@ -114,7 +120,7 @@ impl<D: Db> BatchSigner<D> {
}
}
Ok((*key, *id, *attempt))
Ok((*session, *id, *attempt))
}
#[must_use]
@ -196,11 +202,7 @@ impl<D: Db> BatchSigner<D> {
}
self.preprocessing.insert(id, (machines, preprocesses));
let id = SubstrateSignId {
key: self.keys[0].group_key().to_bytes(),
id: SubstrateSignableId::Batch(id),
attempt,
};
let id = SubstrateSignId { session: self.session, id: SubstrateSignableId::Batch(id), attempt };
// Broadcast our preprocesses
Some(ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses })
@ -236,10 +238,10 @@ impl<D: Db> BatchSigner<D> {
}
CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {
let (key, id, attempt) = self.verify_id(&id).ok()?;
let (session, id, attempt) = self.verify_id(&id).ok()?;
let substrate_sign_id =
SubstrateSignId { key, id: SubstrateSignableId::Batch(id), attempt };
SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt };
let (machines, our_preprocesses) = match self.preprocessing.remove(&id) {
// Either rebooted or RPC error, or some invariant
@ -328,10 +330,10 @@ impl<D: Db> BatchSigner<D> {
}
CoordinatorMessage::SubstrateShares { id, shares } => {
let (key, id, attempt) = self.verify_id(&id).ok()?;
let (session, id, attempt) = self.verify_id(&id).ok()?;
let substrate_sign_id =
SubstrateSignId { key, id: SubstrateSignableId::Batch(id), attempt };
SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt };
let (machine, our_shares) = match self.signing.remove(&id) {
// Rebooted, RPC error, or some invariant

View file

@ -3,7 +3,6 @@ use std::collections::HashMap;
use rand_core::OsRng;
use ciphersuite::group::GroupEncoding;
use frost::{
curve::Ristretto,
ThresholdKeys, FrostError,
@ -18,6 +17,7 @@ use frost_schnorrkel::Schnorrkel;
use log::{info, warn};
use scale::Encode;
use serai_client::validator_sets::primitives::Session;
use messages::coordinator::*;
use crate::{Get, DbTxn, create_db};
@ -35,7 +35,7 @@ type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachin
>>::SignatureShare;
pub struct Cosigner {
#[allow(dead_code)] // False positive
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
block_number: u64,
@ -51,6 +51,7 @@ impl fmt::Debug for Cosigner {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("Cosigner")
.field("session", &self.session)
.field("block_number", &self.block_number)
.field("id", &self.id)
.field("attempt", &self.attempt)
@ -63,6 +64,7 @@ impl fmt::Debug for Cosigner {
impl Cosigner {
pub fn new(
txn: &mut impl DbTxn,
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
block_number: u64,
id: [u8; 32],
@ -100,14 +102,11 @@ impl Cosigner {
}
let preprocessing = Some((machines, preprocesses));
let substrate_sign_id = SubstrateSignId {
key: keys[0].group_key().to_bytes(),
id: SubstrateSignableId::CosigningSubstrateBlock(id),
attempt,
};
let substrate_sign_id =
SubstrateSignId { session, id: SubstrateSignableId::CosigningSubstrateBlock(id), attempt };
Some((
Cosigner { keys, block_number, id, attempt, preprocessing, signing: None },
Cosigner { session, keys, block_number, id, attempt, preprocessing, signing: None },
ProcessorMessage::CosignPreprocess {
id: substrate_sign_id,
preprocesses: serialized_preprocesses,
@ -127,7 +126,7 @@ impl Cosigner {
}
CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {
assert_eq!(id.key, self.keys[0].group_key().to_bytes());
assert_eq!(id.session, self.session);
let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {
panic!("cosigner passed Batch")
};
@ -212,7 +211,7 @@ impl Cosigner {
}
CoordinatorMessage::SubstrateShares { id, shares } => {
assert_eq!(id.key, self.keys[0].group_key().to_bytes());
assert_eq!(id.session, self.session);
let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {
panic!("cosigner passed Batch")
};

View file

@ -1,7 +1,7 @@
use std::io::Read;
use scale::{Encode, Decode};
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
use serai_client::validator_sets::primitives::{Session, KeyPair};
pub use serai_db::*;
@ -17,15 +17,15 @@ create_db!(
impl PendingActivationsDb {
pub fn pending_activation<N: Network>(
getter: &impl Get,
) -> Option<(<N::Block as Block<N>>::Id, ValidatorSet, KeyPair)> {
) -> Option<(<N::Block as Block<N>>::Id, Session, KeyPair)> {
if let Some(bytes) = Self::get(getter) {
if !bytes.is_empty() {
let mut slice = bytes.as_slice();
let (set, key_pair) = <(ValidatorSet, KeyPair)>::decode(&mut slice).unwrap();
let (session, key_pair) = <(Session, KeyPair)>::decode(&mut slice).unwrap();
let mut block_before_queue_block = <N::Block as Block<N>>::Id::default();
slice.read_exact(block_before_queue_block.as_mut()).unwrap();
assert!(slice.is_empty());
return Some((block_before_queue_block, set, key_pair));
return Some((block_before_queue_block, session, key_pair));
}
}
None
@ -33,10 +33,10 @@ impl PendingActivationsDb {
pub fn set_pending_activation<N: Network>(
txn: &mut impl DbTxn,
block_before_queue_block: <N::Block as Block<N>>::Id,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
) {
let mut buf = (set, key_pair).encode();
let mut buf = (session, key_pair).encode();
buf.extend(block_before_queue_block.as_ref());
Self::set(txn, &buf);
}

View file

@ -17,7 +17,7 @@ use frost::{
use log::info;
use scale::Encode;
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
use serai_client::validator_sets::primitives::{Session, KeyPair};
use messages::key_gen::*;
use crate::{Get, DbTxn, Db, create_db, networks::Network};
@ -30,16 +30,17 @@ pub struct KeyConfirmed<C: Ciphersuite> {
create_db!(
KeyGenDb {
ParamsDb: (set: &ValidatorSet) -> (ThresholdParams, u16),
ParamsDb: (session: &Session) -> (ThresholdParams, u16),
// Not scoped to the set since that'd have latter attempts overwrite former
// A former attempt may become the finalized attempt, even if it doesn't in a timely manner
// Overwriting its commitments would be accordingly poor
CommitmentsDb: (key: &KeyGenId) -> HashMap<Participant, Vec<u8>>,
GeneratedKeysDb: (set: &ValidatorSet, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec<u8>,
GeneratedKeysDb: (session: &Session, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec<u8>,
// These do assume a key is only used once across sets, which holds true so long as a single
// participant is honest in their execution of the protocol
KeysDb: (network_key: &[u8]) -> Vec<u8>,
NetworkKey: (substrate_key: [u8; 32]) -> Vec<u8>,
SessionDb: (network_key: &[u8]) -> Session,
NetworkKeyDb: (session: Session) -> Vec<u8>,
}
);
@ -76,7 +77,7 @@ impl GeneratedKeysDb {
}
txn.put(
Self::key(
&id.set,
&id.session,
&substrate_keys[0].group_key().to_bytes(),
network_keys[0].group_key().to_bytes().as_ref(),
),
@ -88,12 +89,12 @@ impl GeneratedKeysDb {
impl KeysDb {
fn confirm_keys<N: Network>(
txn: &mut impl DbTxn,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
) -> (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>) {
let (keys_vec, keys) = GeneratedKeysDb::read_keys::<N>(
txn,
&GeneratedKeysDb::key(&set, &key_pair.0 .0, key_pair.1.as_ref()),
&GeneratedKeysDb::key(&session, &key_pair.0 .0, key_pair.1.as_ref()),
)
.unwrap();
assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes());
@ -104,8 +105,9 @@ impl KeysDb {
},
keys.1[0].group_key().to_bytes().as_ref(),
);
txn.put(KeysDb::key(keys.1[0].group_key().to_bytes().as_ref()), keys_vec);
NetworkKey::set(txn, key_pair.0.into(), &key_pair.1.clone().into_inner());
txn.put(Self::key(key_pair.1.as_ref()), keys_vec);
NetworkKeyDb::set(txn, session, &key_pair.1.clone().into_inner());
SessionDb::set(txn, key_pair.1.as_ref(), &session);
keys
}
@ -113,21 +115,19 @@ impl KeysDb {
fn keys<N: Network>(
getter: &impl Get,
network_key: &<N::Curve as Ciphersuite>::G,
) -> Option<(Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>)> {
) -> Option<(Session, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {
let res =
GeneratedKeysDb::read_keys::<N>(getter, &Self::key(network_key.to_bytes().as_ref()))?.1;
assert_eq!(&res.1[0].group_key(), network_key);
Some(res)
Some((SessionDb::get(getter, network_key.to_bytes().as_ref()).unwrap(), res))
}
pub fn substrate_keys_by_substrate_key<N: Network>(
pub fn substrate_keys_by_session<N: Network>(
getter: &impl Get,
substrate_key: &[u8; 32],
session: Session,
) -> Option<Vec<ThresholdKeys<Ristretto>>> {
let network_key = NetworkKey::get(getter, *substrate_key)?;
let res = GeneratedKeysDb::read_keys::<N>(getter, &Self::key(&network_key))?.1;
assert_eq!(&res.0[0].group_key().to_bytes(), substrate_key);
Some(res.0)
let network_key = NetworkKeyDb::get(getter, session)?;
Some(GeneratedKeysDb::read_keys::<N>(getter, &Self::key(&network_key))?.1 .0)
}
}
@ -140,9 +140,9 @@ pub struct KeyGen<N: Network, D: Db> {
db: D,
entropy: Zeroizing<[u8; 32]>,
active_commit: HashMap<ValidatorSet, (SecretShareMachines<N>, Vec<Vec<u8>>)>,
active_commit: HashMap<Session, (SecretShareMachines<N>, Vec<Vec<u8>>)>,
#[allow(clippy::type_complexity)]
active_share: HashMap<ValidatorSet, (KeyMachines<N>, Vec<HashMap<Participant, Vec<u8>>>)>,
active_share: HashMap<Session, (KeyMachines<N>, Vec<HashMap<Participant, Vec<u8>>>)>,
}
impl<N: Network, D: Db> KeyGen<N, D> {
@ -151,26 +151,26 @@ impl<N: Network, D: Db> KeyGen<N, D> {
KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() }
}
pub fn in_set(&self, set: &ValidatorSet) -> bool {
// We determine if we're in set using if we have the parameters for a set's key generation
ParamsDb::get(&self.db, set).is_some()
pub fn in_set(&self, session: &Session) -> bool {
// We determine if we're in set using if we have the parameters for a session's key generation
ParamsDb::get(&self.db, session).is_some()
}
#[allow(clippy::type_complexity)]
pub fn keys(
&self,
key: &<N::Curve as Ciphersuite>::G,
) -> Option<(Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>)> {
) -> Option<(Session, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {
// This is safe, despite not having a txn, since it's a static value
// It doesn't change over time/in relation to other operations
KeysDb::keys::<N>(&self.db, key)
}
pub fn substrate_keys_by_substrate_key(
pub fn substrate_keys_by_session(
&self,
substrate_key: &[u8; 32],
session: Session,
) -> Option<Vec<ThresholdKeys<Ristretto>>> {
KeysDb::substrate_keys_by_substrate_key::<N>(&self.db, substrate_key)
KeysDb::substrate_keys_by_session::<N>(&self.db, session)
}
pub async fn handle(
@ -184,7 +184,10 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// TODO2: Also embed the chain ID/genesis block
format!(
"Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}",
id.set.session, id.set.network, id.attempt, key,
id.session,
N::NETWORK,
id.attempt,
key,
)
};
@ -313,15 +316,15 @@ impl<N: Network, D: Db> KeyGen<N, D> {
info!("Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}");
// Remove old attempts
if self.active_commit.remove(&id.set).is_none() &&
self.active_share.remove(&id.set).is_none()
if self.active_commit.remove(&id.session).is_none() &&
self.active_share.remove(&id.session).is_none()
{
// If we haven't handled this set before, save the params
ParamsDb::set(txn, &id.set, &(params, shares));
// If we haven't handled this session before, save the params
ParamsDb::set(txn, &id.session, &(params, shares));
}
let (machines, commitments) = key_gen_machines(id, params, shares);
self.active_commit.insert(id.set, (machines, commitments.clone()));
self.active_commit.insert(id.session, (machines, commitments.clone()));
ProcessorMessage::Commitments { id, commitments }
}
@ -329,14 +332,14 @@ impl<N: Network, D: Db> KeyGen<N, D> {
CoordinatorMessage::Commitments { id, mut commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) {
if self.active_share.contains_key(&id.session) {
// We should've been told of a new attempt before receiving commitments again
// The coordinator is either missing messages or repeating itself
// Either way, it's faulty
panic!("commitments when already handled commitments");
}
let (params, share_quantity) = ParamsDb::get(txn, &id.set).unwrap();
let (params, share_quantity) = ParamsDb::get(txn, &id.session).unwrap();
// Unwrap the machines, rebuilding them if we didn't have them in our cache
// We won't if the processor rebooted
@ -345,7 +348,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// The coordinator is trusted to be proper in this regard
let (prior, our_commitments) = self
.active_commit
.remove(&id.set)
.remove(&id.session)
.unwrap_or_else(|| key_gen_machines(id, params, share_quantity));
for (i, our_commitments) in our_commitments.into_iter().enumerate() {
@ -361,7 +364,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
match secret_share_machines(id, params, prior, commitments) {
Ok((machines, shares)) => {
self.active_share.insert(id.set, (machines, shares.clone()));
self.active_share.insert(id.session, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
}
Err(e) => e,
@ -371,10 +374,10 @@ impl<N: Network, D: Db> KeyGen<N, D> {
CoordinatorMessage::Shares { id, shares } => {
info!("Received shares for {:?}", id);
let (params, share_quantity) = ParamsDb::get(txn, &id.set).unwrap();
let (params, share_quantity) = ParamsDb::get(txn, &id.session).unwrap();
// Same commentary on inconsistency as above exists
let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| {
let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| {
let prior = key_gen_machines(id, params, share_quantity).0;
let (machines, shares) =
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())
@ -512,7 +515,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
}
CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {
let params = ParamsDb::get(txn, &id.set).unwrap().0;
let params = ParamsDb::get(txn, &id.session).unwrap().0;
let mut share_ref = share.as_slice();
let Ok(substrate_share) = EncryptedMessage::<
@ -580,17 +583,17 @@ impl<N: Network, D: Db> KeyGen<N, D> {
pub async fn confirm(
&mut self,
txn: &mut D::Transaction<'_>,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
) -> KeyConfirmed<N::Curve> {
info!(
"Confirmed key pair {} {} for set {:?}",
"Confirmed key pair {} {} for {:?}",
hex::encode(key_pair.0),
hex::encode(&key_pair.1),
set,
session,
);
let (substrate_keys, network_keys) = KeysDb::confirm_keys::<N>(txn, set, key_pair);
let (substrate_keys, network_keys) = KeysDb::confirm_keys::<N>(txn, session, key_pair);
KeyConfirmed { substrate_keys, network_keys }
}

View file

@ -10,7 +10,7 @@ use tokio::time::sleep;
use serai_client::{
primitives::{BlockHash, NetworkId},
validator_sets::primitives::{ValidatorSet, KeyPair},
validator_sets::primitives::{Session, KeyPair},
};
use messages::{
@ -44,7 +44,7 @@ mod coordinator;
pub use coordinator::*;
mod key_gen;
use key_gen::{KeyConfirmed, KeyGen};
use key_gen::{SessionDb, KeyConfirmed, KeyGen};
mod signer;
use signer::Signer;
@ -84,7 +84,7 @@ struct TributaryMutable<N: Network, D: Db> {
// invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage
// of a dropped task.
key_gen: KeyGen<N, D>,
signers: HashMap<Vec<u8>, Signer<N, D>>,
signers: HashMap<Session, Signer<N, D>>,
// This is also mutably borrowed by the Scanner.
// The Scanner starts new sign tasks.
@ -187,25 +187,26 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
substrate_mutable: &mut SubstrateMutable<N, D>,
tributary_mutable: &mut TributaryMutable<N, D>,
txn: &mut D::Transaction<'_>,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
activation_number: usize,
) {
info!("activating {set:?}'s keys at {activation_number}");
info!("activating {session:?}'s keys at {activation_number}");
let network_key = <N as Network>::Curve::read_G::<&[u8]>(&mut key_pair.1.as_ref())
.expect("Substrate finalized invalid point as a network's key");
if tributary_mutable.key_gen.in_set(&set) {
if tributary_mutable.key_gen.in_set(&session) {
// See TributaryMutable's struct definition for why this block is safe
let KeyConfirmed { substrate_keys, network_keys } =
tributary_mutable.key_gen.confirm(txn, set, key_pair.clone()).await;
if set.session.0 == 0 {
tributary_mutable.batch_signer = Some(BatchSigner::new(N::NETWORK, substrate_keys));
tributary_mutable.key_gen.confirm(txn, session, key_pair.clone()).await;
if session.0 == 0 {
tributary_mutable.batch_signer =
Some(BatchSigner::new(N::NETWORK, session, substrate_keys));
}
tributary_mutable
.signers
.insert(key_pair.1.into(), Signer::new(network.clone(), network_keys));
.insert(session, Signer::new(network.clone(), session, network_keys));
}
substrate_mutable.add_key(txn, activation_number, network_key).await;
@ -219,7 +220,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
CoordinatorMessage::Sign(msg) => {
if let Some(msg) = tributary_mutable
.signers
.get_mut(msg.key())
.get_mut(&msg.session())
.expect("coordinator told us to sign with a signer we don't have")
.handle(txn, msg)
.await
@ -257,11 +258,11 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {
panic!("CosignSubstrateBlock id didn't have a CosigningSubstrateBlock")
};
let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_substrate_key(&id.key)
else {
let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else {
panic!("didn't have key shares for the key we were told to cosign with");
};
if let Some((cosigner, msg)) = Cosigner::new(txn, keys, block_number, block, id.attempt)
if let Some((cosigner, msg)) =
Cosigner::new(txn, id.session, keys, block_number, block, id.attempt)
{
tributary_mutable.cosigner = Some(cosigner);
coordinator.send(msg).await;
@ -287,7 +288,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
CoordinatorMessage::Substrate(msg) => {
match msg {
messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, set, key_pair } => {
messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, key_pair } => {
// This is the first key pair for this network so no block has been finalized yet
// TODO: Write documentation for this in docs/
// TODO: Use an Option instead of a magic?
@ -339,7 +340,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
substrate_mutable,
tributary_mutable,
txn,
set,
session,
key_pair,
activation_number,
)
@ -355,7 +356,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
PendingActivationsDb::set_pending_activation::<N>(
txn,
block_before_queue_block,
set,
session,
key_pair,
);
}
@ -363,14 +364,13 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
messages::substrate::CoordinatorMessage::SubstrateBlock {
context,
network: network_id,
block: substrate_block,
burns,
batches,
} => {
assert_eq!(network_id, N::NETWORK, "coordinator sent us data for another network");
if let Some((block, set, key_pair)) = PendingActivationsDb::pending_activation::<N>(txn) {
if let Some((block, session, key_pair)) =
PendingActivationsDb::pending_activation::<N>(txn)
{
// Only run if this is a Batch belonging to a distinct block
if context.network_latest_finalized_block.as_ref() != block.as_ref() {
let mut queue_block = <N::Block as Block<N>>::Id::default();
@ -387,7 +387,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
substrate_mutable,
tributary_mutable,
txn,
set,
session,
key_pair,
activation_number,
)
@ -412,13 +412,12 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
if !tributary_mutable.signers.is_empty() {
coordinator
.send(messages::coordinator::ProcessorMessage::SubstrateBlockAck {
network: N::NETWORK,
block: substrate_block,
plans: to_sign
.iter()
.map(|signable| PlanMeta {
key: signable.0.to_bytes().as_ref().to_vec(),
id: signable.1,
.filter_map(|signable| {
SessionDb::get(txn, signable.0.to_bytes().as_ref())
.map(|session| PlanMeta { session, id: signable.1 })
})
.collect(),
})
@ -428,7 +427,8 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
// See commentary in TributaryMutable for why this is safe
let signers = &mut tributary_mutable.signers;
for (key, id, tx, eventuality) in to_sign {
if let Some(signer) = signers.get_mut(key.to_bytes().as_ref()) {
if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) {
let signer = signers.get_mut(&session).unwrap();
if let Some(msg) = signer.sign_transaction(txn, id, tx, eventuality).await {
coordinator.send(msg).await;
}
@ -494,7 +494,7 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
let mut signers = HashMap::new();
for (i, key) in current_keys.iter().enumerate() {
let Some((substrate_keys, network_keys)) = key_gen.keys(key) else { continue };
let Some((session, (substrate_keys, network_keys))) = key_gen.keys(key) else { continue };
let network_key = network_keys[0].group_key();
// If this is the oldest key, load the BatchSigner for it as the active BatchSigner
@ -503,7 +503,7 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
// We don't have to load any state for this since the Scanner will re-fire any events
// necessary, only no longer scanning old blocks once Substrate acks them
if i == 0 {
batch_signer = Some(BatchSigner::new(N::NETWORK, substrate_keys));
batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys));
}
// The Scanner re-fires events as needed for batch_signer yet not signer
@ -514,10 +514,9 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
// 2) Cause re-emission of Batch events, which we'd need to check the safety of
// (TODO: Do anyways?)
// 3) Violate the attempt counter (TODO: Is this already being violated?)
let mut signer = Signer::new(network.clone(), network_keys);
let mut signer = Signer::new(network.clone(), session, network_keys);
// Sign any TXs being actively signed
let key = key.to_bytes();
for (plan, tx, eventuality) in &actively_signing {
if plan.key == network_key {
let mut txn = raw_db.txn();
@ -531,7 +530,7 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
}
}
signers.insert(key.as_ref().to_vec(), signer);
signers.insert(session, signer);
}
// Spawn a task to rebroadcast signed TXs yet to be mined into a finalized block
@ -630,16 +629,20 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
if let Some((retired_key, new_key)) = retired_key_new_key {
// Safe to mutate since all signing operations are done and no more will be added
tributary_mutable.signers.remove(retired_key.to_bytes().as_ref());
if let Some(retired_session) = SessionDb::get(&txn, retired_key.to_bytes().as_ref()) {
tributary_mutable.signers.remove(&retired_session);
}
tributary_mutable.batch_signer.take();
if let Some((substrate_keys, _)) = tributary_mutable.key_gen.keys(&new_key) {
let keys = tributary_mutable.key_gen.keys(&new_key);
if let Some((session, (substrate_keys, _))) = keys {
tributary_mutable.batch_signer =
Some(BatchSigner::new(N::NETWORK, substrate_keys));
Some(BatchSigner::new(N::NETWORK, session, substrate_keys));
}
}
},
MultisigEvent::Completed(key, id, tx) => {
if let Some(signer) = tributary_mutable.signers.get_mut(&key) {
if let Some(session) = SessionDb::get(&txn, &key) {
let signer = tributary_mutable.signers.get_mut(&session).unwrap();
if let Some(msg) = signer.completed(&mut txn, id, tx) {
coordinator.send(msg).await;
}

View file

@ -11,6 +11,7 @@ use frost::{
use log::{info, debug, warn, error};
use scale::Encode;
use serai_client::validator_sets::primitives::Session;
use messages::sign::*;
pub use serai_db::*;
@ -131,6 +132,7 @@ pub struct Signer<N: Network, D: Db> {
network: N,
session: Session,
keys: Vec<ThresholdKeys<N::Curve>>,
signable: HashMap<[u8; 32], N::SignableTransaction>,
@ -172,13 +174,14 @@ impl<N: Network, D: Db> Signer<N, D> {
tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await;
}
}
pub fn new(network: N, keys: Vec<ThresholdKeys<N::Curve>>) -> Signer<N, D> {
pub fn new(network: N, session: Session, keys: Vec<ThresholdKeys<N::Curve>>) -> Signer<N, D> {
assert!(!keys.is_empty());
Signer {
db: PhantomData,
network,
session,
keys,
signable: HashMap::new(),
@ -250,11 +253,7 @@ impl<N: Network, D: Db> Signer<N, D> {
self.signing.remove(&id);
// Emit the event for it
ProcessorMessage::Completed {
key: self.keys[0].group_key().to_bytes().as_ref().to_vec(),
id,
tx: tx_id.as_ref().to_vec(),
}
ProcessorMessage::Completed { session: self.session, id, tx: tx_id.as_ref().to_vec() }
}
#[must_use]
@ -371,7 +370,7 @@ impl<N: Network, D: Db> Signer<N, D> {
// Update the attempt number
self.attempt.insert(id, attempt);
let id = SignId { key: self.keys[0].group_key().to_bytes().as_ref().to_vec(), id, attempt };
let id = SignId { session: self.session, id, attempt };
info!("signing for {} #{}", hex::encode(id.id), id.attempt);
@ -603,7 +602,7 @@ impl<N: Network, D: Db> Signer<N, D> {
CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await,
CoordinatorMessage::Completed { key: _, id, tx: mut tx_vec } => {
CoordinatorMessage::Completed { session: _, id, tx: mut tx_vec } => {
let mut tx = <N::Transaction as Transaction<N>>::Id::default();
if tx.as_ref().len() != tx_vec.len() {
let true_len = tx_vec.len();

View file

@ -7,6 +7,8 @@ use frost::{Participant, ThresholdKeys};
use tokio::time::timeout;
use serai_client::validator_sets::primitives::Session;
use serai_db::{DbTxn, MemDb};
use crate::{
@ -50,7 +52,7 @@ async fn spend<N: Network, D: Db>(
),
);
}
sign(network.clone(), keys_txs).await;
sign(network.clone(), Session(0), keys_txs).await;
for _ in 0 .. N::CONFIRMATIONS {
network.mine_block().await;

View file

@ -14,7 +14,8 @@ use sp_application_crypto::{RuntimePublic, sr25519::Public};
use serai_db::{DbTxn, Db, MemDb};
use scale::Encode;
use serai_client::{primitives::*, in_instructions::primitives::*};
#[rustfmt::skip]
use serai_client::{primitives::*, in_instructions::primitives::*, validator_sets::primitives::Session};
use messages::{
substrate,
@ -49,7 +50,7 @@ async fn test_batch_signer() {
};
let actual_id = SubstrateSignId {
key: keys.values().next().unwrap().group_key().to_bytes(),
session: Session(0),
id: SubstrateSignableId::Batch((batch.network, batch.id).encode().try_into().unwrap()),
attempt: 0,
};
@ -73,7 +74,7 @@ async fn test_batch_signer() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let keys = keys.get(&i).unwrap().clone();
let mut signer = BatchSigner::<MemDb>::new(NetworkId::Monero, vec![keys]);
let mut signer = BatchSigner::<MemDb>::new(NetworkId::Monero, Session(0), vec![keys]);
let mut db = MemDb::new();
let mut txn = db.txn();

View file

@ -13,7 +13,7 @@ use sp_application_crypto::{RuntimePublic, sr25519::Public};
use serai_db::{DbTxn, Db, MemDb};
use serai_client::primitives::*;
use serai_client::{primitives::*, validator_sets::primitives::Session};
use messages::coordinator::*;
use crate::cosigner::Cosigner;
@ -28,7 +28,7 @@ async fn test_cosigner() {
let block = [0xaa; 32];
let actual_id = SubstrateSignId {
key: keys.values().next().unwrap().group_key().to_bytes(),
session: Session(0),
id: SubstrateSignableId::CosigningSubstrateBlock(block),
attempt: (OsRng.next_u64() >> 32).try_into().unwrap(),
};
@ -55,7 +55,8 @@ async fn test_cosigner() {
let mut db = MemDb::new();
let mut txn = db.txn();
let (signer, preprocess) =
Cosigner::new(&mut txn, vec![keys], block_number, block, actual_id.attempt).unwrap();
Cosigner::new(&mut txn, Session(0), vec![keys], block_number, block, actual_id.attempt)
.unwrap();
match preprocess {
// All participants should emit a preprocess

View file

@ -10,10 +10,7 @@ use frost::{Participant, ThresholdParams, tests::clone_without};
use serai_db::{DbTxn, Db, MemDb};
use sp_application_crypto::sr25519;
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
};
use serai_client::validator_sets::primitives::{Session, KeyPair};
use messages::key_gen::*;
use crate::{
@ -21,8 +18,7 @@ use crate::{
key_gen::{KeyConfirmed, KeyGen},
};
const ID: KeyGenId =
KeyGenId { set: ValidatorSet { session: Session(1), network: NetworkId::Monero }, attempt: 3 };
const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 };
pub async fn test_key_gen<N: Network>() {
let mut entropies = HashMap::new();
@ -139,7 +135,11 @@ pub async fn test_key_gen<N: Network>() {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen
.confirm(&mut txn, ID.set, KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()))
.confirm(
&mut txn,
ID.session,
KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()),
)
.await;
txn.commit();

View file

@ -2,7 +2,6 @@ use std::collections::HashMap;
use rand_core::{RngCore, OsRng};
use ciphersuite::group::GroupEncoding;
use frost::{
Participant, ThresholdKeys,
dkg::tests::{key_gen, clone_without},
@ -10,7 +9,10 @@ use frost::{
use serai_db::{DbTxn, Db, MemDb};
use serai_client::primitives::{NetworkId, Coin, Amount, Balance};
use serai_client::{
primitives::{NetworkId, Coin, Amount, Balance},
validator_sets::primitives::Session,
};
use messages::sign::*;
use crate::{
@ -22,22 +24,17 @@ use crate::{
#[allow(clippy::type_complexity)]
pub async fn sign<N: Network>(
network: N,
session: Session,
mut keys_txs: HashMap<
Participant,
(ThresholdKeys<N::Curve>, (N::SignableTransaction, N::Eventuality)),
>,
) -> <N::Transaction as Transaction<N>>::Id {
let actual_id = SignId {
key: keys_txs[&Participant::new(1).unwrap()].0.group_key().to_bytes().as_ref().to_vec(),
id: [0xaa; 32],
attempt: 0,
};
let actual_id = SignId { session, id: [0xaa; 32], attempt: 0 };
let mut group_key = None;
let mut keys = HashMap::new();
let mut txs = HashMap::new();
for (i, (these_keys, this_tx)) in keys_txs.drain() {
group_key = Some(these_keys.group_key());
keys.insert(i, these_keys);
txs.insert(i, this_tx);
}
@ -49,7 +46,7 @@ pub async fn sign<N: Network>(
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let keys = keys.remove(&i).unwrap();
t = keys.params().t();
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), vec![keys]));
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), Session(0), vec![keys]));
dbs.insert(i, MemDb::new());
}
drop(keys);
@ -130,8 +127,8 @@ pub async fn sign<N: Network>(
.await
.unwrap()
{
ProcessorMessage::Completed { key, id, tx } => {
assert_eq!(&key, group_key.unwrap().to_bytes().as_ref());
ProcessorMessage::Completed { session, id, tx } => {
assert_eq!(session, Session(0));
assert_eq!(id, actual_id.id);
if tx_id.is_none() {
tx_id = Some(tx.clone());
@ -196,7 +193,7 @@ pub async fn test_signer<N: Network>(network: N) {
// The signer may not publish the TX if it has a connection error
// It doesn't fail in this case
let txid = sign(network.clone(), keys_txs).await;
let txid = sign(network.clone(), Session(0), keys_txs).await;
let tx = network.get_transaction(&txid).await.unwrap();
assert_eq!(tx.id(), txid);
// Mine a block, and scan it, to ensure that the TX actually made it on chain

View file

@ -8,7 +8,10 @@ use tokio::time::timeout;
use serai_db::{DbTxn, Db, MemDb};
use serai_client::primitives::{NetworkId, Coin, Amount, Balance};
use serai_client::{
primitives::{NetworkId, Coin, Amount, Balance},
validator_sets::primitives::Session,
};
use crate::{
Payment, Plan,
@ -140,7 +143,7 @@ pub async fn test_wallet<N: Network>(network: N) {
keys_txs.insert(i, (keys, (signable, eventuality)));
}
let txid = sign(network.clone(), keys_txs).await;
let txid = sign(network.clone(), Session(0), keys_txs).await;
let tx = network.get_transaction(&txid).await.unwrap();
network.mine_block().await;
let block_number = network.get_latest_block_number().await.unwrap();

View file

@ -22,6 +22,7 @@ use serai_client::{
primitives::{Batch, SignedBatch, batch_message},
InInstructionsEvent,
},
validator_sets::primitives::Session,
};
use messages::{
coordinator::{SubstrateSignableId, SubstrateSignId},
@ -33,16 +34,13 @@ use crate::{*, tests::*};
pub async fn batch(
processors: &mut [Processor],
processor_is: &[u8],
session: Session,
substrate_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
batch: Batch,
) -> u64 {
let mut id = [0; 5];
OsRng.fill_bytes(&mut id);
let id = SubstrateSignId {
key: (<Ristretto as Ciphersuite>::generator() * **substrate_key).to_bytes(),
id: SubstrateSignableId::Batch(id),
attempt: 0,
};
let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt: 0 };
for processor in processors.iter_mut() {
processor
@ -232,7 +230,6 @@ pub async fn batch(
serai_time: last_block.time().unwrap() / 1000,
network_latest_finalized_block: batch.batch.block,
},
network: batch.batch.network,
block: last_serai_block,
burns: vec![],
batches: vec![batch.batch.id],
@ -244,7 +241,6 @@ pub async fn batch(
processor
.send_message(messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
network: batch.batch.network,
block: last_serai_block,
plans: vec![],
},
@ -283,6 +279,7 @@ async fn batch_test() {
batch(
&mut processors,
&processor_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,

View file

@ -28,7 +28,7 @@ pub async fn key_gen<C: Ciphersuite>(
let mut participant_is = vec![];
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
let id = KeyGenId { set, attempt: 0 };
let id = KeyGenId { session: set.session, attempt: 0 };
for (i, processor) in processors.iter_mut().enumerate() {
let msg = processor.recv_message().await;
@ -173,7 +173,7 @@ pub async fn key_gen<C: Ciphersuite>(
CoordinatorMessage::Substrate(
messages::substrate::CoordinatorMessage::ConfirmKeyPair {
context,
set: this_set,
session,
ref key_pair,
},
) => {
@ -186,7 +186,7 @@ pub async fn key_gen<C: Ciphersuite>(
70
);
assert_eq!(context.network_latest_finalized_block.0, [0; 32]);
assert_eq!(set, this_set);
assert_eq!(set.session, session);
assert_eq!(key_pair.0 .0, substrate_key);
assert_eq!(&key_pair.1, &network_key);
}

View file

@ -4,10 +4,9 @@ use std::{
collections::{HashSet, HashMap},
};
use zeroize::Zeroizing;
use rand_core::{RngCore, OsRng};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1};
use ciphersuite::Secp256k1;
use dkg::Participant;
@ -22,23 +21,20 @@ use serai_client::{
CoinsEvent,
},
in_instructions::primitives::{InInstruction, InInstructionWithBalance, Batch},
validator_sets::primitives::Session,
SeraiCoins,
};
use messages::{coordinator::PlanMeta, sign::SignId, SubstrateContext, CoordinatorMessage};
use crate::tests::*;
pub async fn sign<C: Ciphersuite>(
pub async fn sign(
processors: &mut [Processor],
processor_is: &[u8],
network_key: &Zeroizing<C::F>,
session: Session,
plan_id: [u8; 32],
) {
let id = SignId {
key: (C::generator() * **network_key).to_bytes().as_ref().to_vec(),
id: plan_id,
attempt: 0,
};
let id = SignId { session, id: plan_id, attempt: 0 };
// Select a random participant to exclude, so we know for sure who *is* participating
assert_eq!(COORDINATORS - THRESHOLD, 1);
@ -150,7 +146,7 @@ pub async fn sign<C: Ciphersuite>(
&mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()];
processor
.send_message(messages::sign::ProcessorMessage::Completed {
key: id.key.clone(),
session,
id: id.id,
tx: b"signed_tx".to_vec(),
})
@ -163,7 +159,7 @@ pub async fn sign<C: Ciphersuite>(
assert_eq!(
processor.recv_message().await,
CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Completed {
key: id.key.clone(),
session,
id: id.id,
tx: b"signed_tx".to_vec()
})
@ -196,8 +192,7 @@ async fn sign_test() {
}
let mut processors = new_processors;
let (participant_is, substrate_key, network_key) =
key_gen::<Secp256k1>(&mut processors).await;
let (participant_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
// 'Send' external coins into Serai
let serai = processors[0].serai().await;
@ -230,6 +225,7 @@ async fn sign_test() {
let block_included_in = batch(
&mut processors,
&participant_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
@ -331,7 +327,6 @@ async fn sign_test() {
serai_time: last_serai_block.time().unwrap() / 1000,
network_latest_finalized_block: coin_block,
},
network: NetworkId::Bitcoin,
block: last_serai_block.number(),
burns: vec![out_instruction.clone()],
batches: vec![],
@ -343,18 +338,14 @@ async fn sign_test() {
processor
.send_message(messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
network: NetworkId::Bitcoin,
block: last_serai_block.number(),
plans: vec![PlanMeta {
key: (Secp256k1::generator() * *network_key).to_bytes().to_vec(),
id: plan_id,
}],
plans: vec![PlanMeta { session: Session(0), id: plan_id }],
},
))
.await;
}
sign::<Secp256k1>(&mut processors, &participant_is, &network_key, plan_id).await;
sign(&mut processors, &participant_is, Session(0), plan_id).await;
})
.await;
}

View file

@ -15,6 +15,7 @@ use serai_client::{
in_instructions::primitives::{
InInstruction, InInstructionWithBalance, Batch, SignedBatch, batch_message,
},
validator_sets::primitives::Session,
};
use processor::networks::{Network, Bitcoin, Monero};
@ -23,12 +24,12 @@ use crate::{*, tests::*};
pub(crate) async fn recv_batch_preprocesses(
coordinators: &mut [Coordinator],
substrate_key: &[u8; 32],
session: Session,
batch: &Batch,
attempt: u32,
) -> (SubstrateSignId, HashMap<Participant, [u8; 64]>) {
let id = SubstrateSignId {
key: *substrate_key,
session,
id: SubstrateSignableId::Batch((batch.network, batch.id).encode().try_into().unwrap()),
attempt,
};
@ -171,7 +172,6 @@ pub(crate) async fn substrate_block(
match block.clone() {
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: _,
network: sent_network,
block: sent_block,
burns: _,
batches: _,
@ -179,13 +179,8 @@ pub(crate) async fn substrate_block(
coordinator.send_message(block).await;
match coordinator.recv_message().await {
messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
network: recvd_network,
block: recvd_block,
plans,
},
messages::coordinator::ProcessorMessage::SubstrateBlockAck { block: recvd_block, plans },
) => {
assert_eq!(recvd_network, sent_network);
assert_eq!(recvd_block, sent_block);
plans
}
@ -214,7 +209,7 @@ fn batch_test() {
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Generate keys
let key_pair = key_gen(&mut coordinators, network).await;
let key_pair = key_gen(&mut coordinators).await;
// Now we we have to mine blocks to activate the key
// (the first key is activated when the network's time as of a block exceeds the Serai time
@ -284,7 +279,7 @@ fn batch_test() {
// Make sure the processors picked it up by checking they're trying to sign a batch for it
let (mut id, mut preprocesses) =
recv_batch_preprocesses(&mut coordinators, &key_pair.0 .0, &expected_batch, 0).await;
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await;
// Trigger a random amount of re-attempts
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
// TODO: Double check how the processor handles this ID field
@ -298,8 +293,7 @@ fn batch_test() {
.await;
}
(id, preprocesses) =
recv_batch_preprocesses(&mut coordinators, &key_pair.0 .0, &expected_batch, attempt)
.await;
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, attempt).await;
}
// Continue with signing the batch
@ -319,7 +313,6 @@ fn batch_test() {
serai_time,
network_latest_finalized_block: batch.batch.block,
},
network,
block: substrate_block_num + u64::from(i),
burns: vec![],
batches: vec![batch.batch.id],

View file

@ -4,14 +4,14 @@ use dkg::{Participant, ThresholdParams, tests::clone_without};
use serai_client::{
primitives::{NetworkId, BlockHash, PublicKey},
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
validator_sets::primitives::{Session, KeyPair},
};
use messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage, ProcessorMessage};
use crate::{*, tests::*};
pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId) -> KeyPair {
pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair {
// Perform an interaction with all processors via their coordinators
async fn interact_with_all<
FS: Fn(Participant) -> messages::key_gen::CoordinatorMessage,
@ -33,7 +33,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId
}
// Order a key gen
let id = KeyGenId { set: ValidatorSet { session: Session(0), network }, attempt: 0 };
let id = KeyGenId { session: Session(0), attempt: 0 };
let mut commitments = HashMap::new();
interact_with_all(
@ -132,7 +132,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId
.send_message(CoordinatorMessage::Substrate(
messages::substrate::CoordinatorMessage::ConfirmKeyPair {
context,
set: id.set,
session: id.session,
key_pair: key_pair.clone(),
},
))
@ -158,7 +158,7 @@ fn key_gen_test() {
.map(|(handles, key)| Coordinator::new(network, &ops, handles, key))
.collect::<Vec<_>>();
key_gen(&mut coordinators, network).await;
key_gen(&mut coordinators).await;
});
}
}

View file

@ -9,8 +9,9 @@ use messages::{sign::SignId, SubstrateContext};
use serai_client::{
primitives::{BlockHash, NetworkId},
in_instructions::primitives::Batch,
coins::primitives::{OutInstruction, OutInstructionWithBalance},
in_instructions::primitives::Batch,
validator_sets::primitives::Session,
};
use crate::{*, tests::*};
@ -18,7 +19,7 @@ use crate::{*, tests::*};
#[allow(unused)]
pub(crate) async fn recv_sign_preprocesses(
coordinators: &mut [Coordinator],
key: Vec<u8>,
session: Session,
attempt: u32,
) -> (SignId, HashMap<Participant, Vec<u8>>) {
let mut id = None;
@ -33,7 +34,7 @@ pub(crate) async fn recv_sign_preprocesses(
preprocesses: mut these_preprocesses,
}) => {
if id.is_none() {
assert_eq!(&this_id.key, &key);
assert_eq!(&this_id.session, &session);
assert_eq!(this_id.attempt, attempt);
id = Some(this_id.clone());
}
@ -62,6 +63,7 @@ pub(crate) async fn recv_sign_preprocesses(
#[allow(unused)]
pub(crate) async fn sign_tx(
coordinators: &mut [Coordinator],
session: Session,
id: SignId,
preprocesses: HashMap<Participant, Vec<u8>>,
) -> Vec<u8> {
@ -120,11 +122,11 @@ pub(crate) async fn sign_tx(
if preprocesses.contains_key(&i) {
match coordinator.recv_message().await {
messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed {
key,
session: this_session,
id: this_id,
tx: this_tx,
}) => {
assert_eq!(&key, &id.key);
assert_eq!(session, this_session);
assert_eq!(&this_id, &id.id);
if tx.is_none() {
@ -158,7 +160,7 @@ fn send_test() {
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Generate keys
let key_pair = key_gen(&mut coordinators, network).await;
let key_pair = key_gen(&mut coordinators).await;
// Now we we have to mine blocks to activate the key
// (the first key is activated when the network's time as of a block exceeds the Serai time
@ -195,7 +197,7 @@ fn send_test() {
// Make sure the proceessors picked it up by checking they're trying to sign a batch for it
let (id, preprocesses) =
recv_batch_preprocesses(&mut coordinators, &key_pair.0 .0, &expected_batch, 0).await;
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await;
// Continue with signing the batch
let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await;
@ -216,7 +218,6 @@ fn send_test() {
serai_time,
network_latest_finalized_block: batch.batch.block,
},
network,
block: substrate_block_num,
burns: vec![OutInstructionWithBalance {
instruction: OutInstruction { address: wallet.address(), data: None },
@ -237,9 +238,8 @@ fn send_test() {
// Start signing the TX
let (mut id, mut preprocesses) =
recv_sign_preprocesses(&mut coordinators, key_pair.1.to_vec(), 0).await;
// TODO: Should this use the Substrate key?
assert_eq!(id, SignId { key: key_pair.1.to_vec(), id: plans[0].id, attempt: 0 });
recv_sign_preprocesses(&mut coordinators, Session(0), 0).await;
assert_eq!(id, SignId { session: Session(0), id: plans[0].id, attempt: 0 });
// Trigger a random amount of re-attempts
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
@ -251,12 +251,11 @@ fn send_test() {
.send_message(messages::sign::CoordinatorMessage::Reattempt { id: id.clone() })
.await;
}
(id, preprocesses) =
recv_sign_preprocesses(&mut coordinators, key_pair.1.to_vec(), attempt).await;
(id, preprocesses) = recv_sign_preprocesses(&mut coordinators, Session(0), attempt).await;
}
let participating = preprocesses.keys().cloned().collect::<Vec<_>>();
let tx_id = sign_tx(&mut coordinators, id.clone(), preprocesses).await;
let tx_id = sign_tx(&mut coordinators, Session(0), id.clone(), preprocesses).await;
// Make sure all participating nodes published the TX
let participating =
@ -276,7 +275,7 @@ fn send_test() {
// Tell them of it as a completion of the relevant signing nodess
coordinator
.send_message(messages::sign::CoordinatorMessage::Completed {
key: key_pair.1.to_vec(),
session: Session(0),
id: id.id,
tx: tx_id.clone(),
})
@ -284,11 +283,11 @@ fn send_test() {
// Verify they send Completed back
match coordinator.recv_message().await {
messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed {
key,
session,
id: this_id,
tx: this_tx,
}) => {
assert_eq!(&key, &id.key);
assert_eq!(session, Session(0));
assert_eq!(&this_id, &id.id);
assert_eq!(this_tx, tx_id);
}