Remove offline participants from future DKG protocols so long as the threshold is met

Makes RemoveParticipantDueToDkg a voted-on event instead of a Provided.
This removes the requirement for offline parties to be able to fully validate
blame, yet unfortunately lets an dishonest supermajority have an honest node
label any arbitrary node as dishonest.

Corrects a variety of `.i(...)` calls which panicked when they shouldn't have.

Cleans up a couple no-longer-used storage values.
This commit is contained in:
Luke Parker 2023-12-18 09:13:09 -05:00
parent 008da698bc
commit c8747e23c5
10 changed files with 438 additions and 267 deletions

View file

@ -371,14 +371,22 @@ async fn handle_processor_message<D: Db, P: P2p>(
}]
}
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {
// This doesn't need the ID since it's a Provided transaction which everyone will provide
// With this provision comes explicit ordering (with regards to other
// RemoveParticipantDueToDkg transactions) and group consensus
// Accordingly, this can't be replayed
// It could be included on-chain early/late with regards to the chain's active attempt,
// which attempt scheduling is written to make a non-issue by auto re-attempting once a
// fatal slash occurs, regardless of timing
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant: faulty }]
// This doesn't have guaranteed timing
//
// While the party *should* be fatally slashed and not included in future attempts,
// they'll actually be fatally slashed (assuming liveness before the Tributary retires)
// and not included in future attempts *which begin after the latency window completes*
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
faulty,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::Shares { id, mut shares } => {
// Create a MuSig-based machine to inform Substrate of this key generation
@ -388,7 +396,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
let our_i = spec
.i(&removed, pub_key)
.expect("processor message to DKG for a session we aren't a validator in");
.expect("processor message to DKG for an attempt we aren't a validator in");
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
// without further context, it can't be deserialized without context
@ -417,30 +425,13 @@ async fn handle_processor_message<D: Db, P: P2p>(
}]
}
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
// Check if the MuSig signature had any errors as if so, we need to provide
// RemoveParticipantDueToDkg
// As for the safety of calling error_generating_key_pair, the processor is presumed
// to only send InvalidShare or GeneratedKeyPair for a given attempt
let mut txs = if let Some(faulty) =
crate::tributary::error_generating_key_pair(&mut txn, key, spec, id.attempt)
{
vec![Transaction::RemoveParticipantDueToDkg {
attempt: id.attempt,
participant: faulty,
}]
} else {
vec![]
};
txs.push(Transaction::InvalidDkgShare {
vec![Transaction::InvalidDkgShare {
attempt: id.attempt,
accuser,
faulty,
blame,
signed: Transaction::empty_signed(),
});
txs
}]
}
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
// TODO2: Check the KeyGenId fields
@ -454,6 +445,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
id.attempt,
);
// TODO: Move this into generated_key_pair?
match share {
Ok(share) => {
vec![Transaction::DkgConfirmed {
@ -463,14 +455,32 @@ async fn handle_processor_message<D: Db, P: P2p>(
}]
}
Err(p) => {
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant: p }]
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
p,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(),
}]
}
}
}
// This is a response to the ordered VerifyBlame, which is why this satisfies the provided
// transaction's needs to be perfectly ordered
key_gen::ProcessorMessage::Blame { id, participant } => {
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant }]
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
participant,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(),
}]
}
},
ProcessorMessage::Sign(msg) => match msg {

View file

@ -276,6 +276,9 @@ async fn handle_block<D: Db, Pro: Processors>(
},
)
.await;
// TODO: If we were in the set, yet were removed, drop the tributary
let mut txn = db.txn();
SeraiDkgCompleted::set(&mut txn, set, &substrate_key);
HandledEvent::handle_event(&mut txn, hash, event_id);

View file

@ -2,6 +2,8 @@ use core::fmt::Debug;
use rand_core::{RngCore, OsRng};
use ciphersuite::{group::Group, Ciphersuite, Ristretto};
use scale::{Encode, Decode};
use serai_client::{
primitives::{SeraiAddress, Signature},
@ -141,11 +143,8 @@ fn serialize_sign_data() {
#[test]
fn serialize_transaction() {
test_read_write(&Transaction::RemoveParticipantDueToDkg {
attempt: u32::try_from(OsRng.next_u64() >> 32).unwrap(),
participant: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
signed: random_signed_with_nonce(&mut OsRng, 0),
});
{

View file

@ -3,6 +3,7 @@ use std::collections::HashMap;
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;
use serai_client::validator_sets::primitives::{KeyPair, ValidatorSet};
@ -49,27 +50,42 @@ create_db!(
TributaryBlockNumber: (block: [u8; 32]) -> u32,
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
FatalSlashesAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
RemovalNonces:
(genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
DkgCompleted: (genesis: [u8; 32]) -> (),
LocallyDkgRemoved: (genesis: [u8; 32], validator: [u8; 32]) -> (),
VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (),
VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16,
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
ReattemptDb: (genesis: [u8; 32], block: u32) -> Vec<Topic>,
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
DkgLocallyCompleted: (genesis: [u8; 32]) -> (),
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec<u8>,
}
);
impl FatalSlashes {
pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristretto as Ciphersuite>::G> {
FatalSlashes::get(getter, genesis)
.unwrap_or(vec![])
.iter()
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
.collect::<Vec<_>>()
}
}
impl FatallySlashed {
pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], account: [u8; 32]) {
Self::set(txn, genesis, account, &());

View file

@ -1,13 +1,14 @@
use core::ops::Deref;
use std::collections::HashMap;
use zeroize::{Zeroize, Zeroizing};
use zeroize::Zeroizing;
use rand_core::OsRng;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::dkg::Participant;
use scale::{Encode, Decode};
use serai_client::{Public, Signature, validator_sets::primitives::KeyPair};
use serai_client::{Signature, validator_sets::primitives::KeyPair};
use tributary::{Signed, TransactionKind, TransactionTrait};
@ -42,35 +43,6 @@ pub fn dkg_confirmation_nonces(
.preprocess()
}
// If there's an error generating a key pair, return any errors which would've occurred when
// executing the DkgConfirmer in order to stay in sync with those who did.
//
// The caller must ensure only error_generating_key_pair or generated_key_pair is called for a
// given attempt.
pub fn error_generating_key_pair(
txn: &mut impl DbTxn,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
spec: &TributarySpec,
attempt: u32,
) -> Option<Participant> {
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
// Sign a key pair which can't be valid
// (0xff used as 0 would be the Ristretto identity point, 0-length for the network key)
let key_pair = KeyPair(Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap());
match DkgConfirmer::new(key, spec, txn, attempt)
.expect("reporting an error during DKG for an unrecognized attempt")
.share(preprocesses, &key_pair)
{
Ok(mut share) => {
// Zeroize the share to ensure it's not accessed
share.zeroize();
None
}
Err(p) => Some(p),
}
}
pub fn generated_key_pair<D: Db>(
txn: &mut D::Transaction<'_>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
@ -92,7 +64,7 @@ fn unflatten(
data: &mut HashMap<Participant, Vec<u8>>,
) {
for (validator, _) in spec.validators() {
let range = spec.i(removed, validator).unwrap();
let Some(range) = spec.i(removed, validator) else { continue };
let Some(all_segments) = data.remove(&range.start) else {
continue;
};
@ -126,10 +98,10 @@ impl<
panic!("accumulating data for a participant multiple times");
}
let signer_shares = {
let signer_i = self
.spec
.i(removed, signer)
.expect("transaction signed by a non-validator for this tributary");
let Some(signer_i) = self.spec.i(removed, signer) else {
log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes()));
return Accumulation::NotReady;
};
u16::from(signer_i.end) - u16::from(signer_i.start)
};
@ -153,10 +125,6 @@ impl<
// place
assert_eq!(AttemptDb::attempt(self.txn, genesis, data_spec.topic), Some(data_spec.attempt));
ReattemptDb::schedule_reattempt(self.txn, genesis, self.block_number, data_spec.topic);
// Because this attempt was participated in, it was justified
// The question becomes why did the prior attempt fail?
// TODO: Slash people who failed to participate as expected in the prior attempt
}
// If we have all the needed commitments/preprocesses/shares, tell the processor
@ -169,37 +137,29 @@ impl<
&data_spec.topic,
&data_spec.attempt
);
return Accumulation::Ready({
let mut data = HashMap::new();
for validator in self.spec.validators().iter().map(|validator| validator.0) {
data.insert(
self.spec.i(removed, validator).unwrap().start,
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
data
} else {
continue;
},
);
}
assert_eq!(data.len(), usize::from(needed));
let mut data = HashMap::new();
for validator in self.spec.validators().iter().map(|validator| validator.0) {
let Some(i) = self.spec.i(removed, validator) else { continue };
data.insert(
i.start,
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
data
} else {
continue;
},
);
}
// Remove our own piece of data, if we were involved
if data
.remove(
&self
.spec
.i(removed, Ristretto::generator() * self.our_key.deref())
.expect("handling a message for a Tributary we aren't part of")
.start,
)
.is_some()
{
DataSet::Participating(data)
} else {
DataSet::NotParticipating
assert_eq!(data.len(), usize::from(needed));
// Remove our own piece of data, if we were involved
if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) {
if data.remove(&i.start).is_some() {
return Accumulation::Ready(DataSet::Participating(data));
}
});
}
return Accumulation::Ready(DataSet::NotParticipating);
}
Accumulation::NotReady
}
@ -261,7 +221,12 @@ impl<
signer: <Ristretto as Ciphersuite>::G,
len: usize,
) -> Result<(), ()> {
let signer_i = self.spec.i(removed, signer).unwrap();
let Some(signer_i) = self.spec.i(removed, signer) else {
// TODO: Ensure processor doesn't so participate/check how it handles removals for being
// offline
self.fatal_slash(signer.to_bytes(), "signer participated despite being removed");
Err(())?
};
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
self.fatal_slash(
signer.to_bytes(),
@ -288,17 +253,33 @@ impl<
}
match tx {
Transaction::RemoveParticipantDueToDkg { attempt, participant } => self
.fatal_slash_with_participant_index(
&removed_as_of_dkg_attempt(self.txn, genesis, attempt).unwrap_or_else(|| {
panic!(
"removed a participant due to a provided transaction with an attempt not {}",
"locally handled?"
)
}),
participant,
"RemoveParticipantDueToDkg Provided TX",
),
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
if self.spec.i(&[], participant).is_none() {
self.fatal_slash(
participant.to_bytes(),
"RemoveParticipantDueToDkg vote for non-validator",
);
return;
}
let participant = participant.to_bytes();
let signer = signed.signer.to_bytes();
assert!(
VotedToRemove::get(self.txn, genesis, signer, participant).is_none(),
"VotedToRemove multiple times despite a single nonce being allocated",
);
VotedToRemove::set(self.txn, genesis, signer, participant, &());
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
let signer_votes =
self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?");
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
VotesToRemove::set(self.txn, genesis, participant, &new_votes);
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
self.fatal_slash(participant, "RemoveParticipantDueToDkg vote")
}
}
Transaction::DkgCommitments { attempt, commitments, signed } => {
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
@ -325,7 +306,10 @@ impl<
.await;
}
Accumulation::Ready(DataSet::NotParticipating) => {
panic!("wasn't a participant in DKG commitments")
assert!(
removed.contains(&(Ristretto::generator() * self.our_key.deref())),
"NotParticipating in a DkgCommitments we weren't removed for"
);
}
Accumulation::NotReady => {}
}
@ -336,14 +320,19 @@ impl<
self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt");
return;
};
let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref()));
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {
return;
};
let sender_i = self
.spec
.i(&removed, signed.signer)
.expect("transaction added to tributary by signer who isn't a participant");
let Some(sender_i) = self.spec.i(&removed, signed.signer) else {
self.fatal_slash(
signed.signer.to_bytes(),
"DkgShares for a DKG they aren't participating in",
);
return;
};
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
for shares in &shares {
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
@ -353,56 +342,59 @@ impl<
}
// Save each share as needed for blame
{
let from_range = self.spec.i(&removed, signed.signer).unwrap();
for (from_offset, shares) in shares.iter().enumerate() {
let from =
Participant::new(u16::from(from_range.start) + u16::try_from(from_offset).unwrap())
.unwrap();
for (from_offset, shares) in shares.iter().enumerate() {
let from =
Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap())
.unwrap();
for (to_offset, share) in shares.iter().enumerate() {
// 0-indexed (the enumeration) to 1-indexed (Participant)
let mut to = u16::try_from(to_offset).unwrap() + 1;
// Adjust for the omission of the sender's own shares
if to >= u16::from(from_range.start) {
to += u16::from(from_range.end) - u16::from(from_range.start);
}
let to = Participant::new(to).unwrap();
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
for (to_offset, share) in shares.iter().enumerate() {
// 0-indexed (the enumeration) to 1-indexed (Participant)
let mut to = u16::try_from(to_offset).unwrap() + 1;
// Adjust for the omission of the sender's own shares
if to >= u16::from(sender_i.start) {
to += u16::from(sender_i.end) - u16::from(sender_i.start);
}
let to = Participant::new(to).unwrap();
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
}
}
// Filter down to only our share's bytes for handle
let our_i = self
.spec
.i(&removed, Ristretto::generator() * self.our_key.deref())
.expect("in a tributary we're not a validator for");
let our_shares = if sender_i == our_i {
vec![]
} else {
// 1-indexed to 0-indexed
let mut our_i_pos = u16::from(our_i.start) - 1;
// Handle the omission of the sender's own data
if u16::from(our_i.start) > u16::from(sender_i.start) {
our_i_pos -= sender_is_len;
let our_shares = if let Some(our_i) =
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
{
if sender_i == our_i {
vec![]
} else {
// 1-indexed to 0-indexed
let mut our_i_pos = u16::from(our_i.start) - 1;
// Handle the omission of the sender's own data
if u16::from(our_i.start) > u16::from(sender_i.start) {
our_i_pos -= sender_is_len;
}
let our_i_pos = usize::from(our_i_pos);
shares
.iter_mut()
.map(|shares| {
shares
.drain(
our_i_pos ..
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
)
.collect::<Vec<_>>()
})
.collect()
}
let our_i_pos = usize::from(our_i_pos);
shares
.iter_mut()
.map(|shares| {
shares
.drain(
our_i_pos ..
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
)
.collect::<Vec<_>>()
})
.collect()
} else {
assert!(
not_participating,
"we didn't have an i while handling DkgShares we weren't removed for"
);
// Since we're not participating, simply save vec![] for our shares
vec![]
};
// Drop shares as it's been mutated into invalidity
// Drop shares as it's presumably been mutated into invalidity
drop(shares);
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
@ -458,7 +450,7 @@ impl<
.await;
}
Accumulation::Ready(DataSet::NotParticipating) => {
panic!("wasn't a participant in DKG shares")
assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for");
}
Accumulation::NotReady => {}
}
@ -470,7 +462,13 @@ impl<
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt");
return;
};
let range = self.spec.i(&removed, signed.signer).unwrap();
let Some(range) = self.spec.i(&removed, signed.signer) else {
self.fatal_slash(
signed.signer.to_bytes(),
"InvalidDkgShare for a DKG they aren't participating in",
);
return;
};
if !range.contains(&accuser) {
self.fatal_slash(
signed.signer.to_bytes(),
@ -524,8 +522,8 @@ impl<
};
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
// TODO: This can technically happen under very very very specific timing as the txn put
// happens before DkgConfirmed, yet the txn commit isn't guaranteed to
// TODO: This can technically happen under very very very specific timing as the txn
// put happens before DkgConfirmed, yet the txn commit isn't guaranteed to
let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect(
"in DkgConfirmed handling, which happens after everyone \
(including us) fires DkgConfirmed, yet no confirming key pair",
@ -535,12 +533,17 @@ impl<
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
Ok(sig) => sig,
Err(p) => {
self.fatal_slash_with_participant_index(&removed, p, "invalid DkgConfirmer share");
let mut tx = Transaction::RemoveParticipantDueToDkg {
participant: self.spec.reverse_lookup_i(&removed, p).unwrap(),
signed: Transaction::empty_signed(),
};
tx.sign(&mut OsRng, genesis, self.our_key);
self.publish_tributary_tx.publish_tributary_tx(tx).await;
return;
}
};
DkgCompleted::set(self.txn, genesis, &());
DkgLocallyCompleted::set(self.txn, genesis, &());
self
.publish_serai_tx

View file

@ -32,20 +32,12 @@ pub fn removed_as_of_dkg_attempt(
if attempt == 0 {
Some(vec![])
} else {
FatalSlashesAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
})
}
}
pub fn latest_removed(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristretto as Ciphersuite>::G> {
FatalSlashes::get(getter, genesis)
.unwrap_or(vec![])
.iter()
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
.collect()
}
pub fn removed_as_of_set_keys(
getter: &impl Get,
set: ValidatorSet,

View file

@ -1,10 +1,9 @@
use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration};
use std::sync::Arc;
use std::{sync::Arc, collections::HashSet};
use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;
use tokio::sync::broadcast;
@ -193,43 +192,18 @@ impl<
> TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P>
{
pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) {
// TODO: If this fatal slash puts the remaining set below the threshold, spin
let genesis = self.spec.genesis();
log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason);
FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing);
// TODO: disconnect the node from network/ban from further participation in all Tributaries
// TODO: If during DKG, trigger a re-attempt
// Despite triggering a re-attempt, this DKG may still complete and may become in-use
// TODO: disconnect the node from network/ban from further participation in all Tributaries
}
// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second
// Tributary post-DKG
// https://github.com/serai-dex/serai/issues/426
pub fn fatal_slash_with_participant_index(
&mut self,
removed: &[<Ristretto as Ciphersuite>::G],
i: Participant,
reason: &str,
) {
// Resolve from Participant to <Ristretto as Ciphersuite>::G
let i = u16::from(i);
let mut validator = None;
for (potential, _) in self.spec.validators() {
let v_i = self.spec.i(removed, potential).unwrap();
if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) {
validator = Some(potential);
break;
}
}
let validator = validator.unwrap();
self.fatal_slash(validator.to_bytes(), reason);
}
async fn handle<D: Db>(mut self) {
log::info!("found block for Tributary {:?}", self.spec.set());
@ -267,10 +241,151 @@ impl<
}
let genesis = self.spec.genesis();
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
// Calculate the shares still present, spinning if not enough are
// still_present_shares is used by a below branch, yet it's a natural byproduct of checking if
// we should spin, hence storing it in a variable here
let still_present_shares = {
// Start with the original n value
let mut present_shares = self.spec.n(&[]);
// Remove everyone fatally slashed
for removed in &current_fatal_slashes {
let original_i_for_removed =
self.spec.i(&[], *removed).expect("removed party was never present");
let removed_shares =
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
present_shares -= removed_shares;
}
// Spin if the present shares don't satisfy the required threshold
if present_shares < self.spec.t() {
loop {
log::error!(
"fatally slashed so many participants for {:?} we no longer meet the threshold",
self.spec.set()
);
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
}
}
present_shares
};
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
log::info!("re-attempting {topic:?} with attempt {attempt}");
// Slash people who failed to participate as expected in the prior attempt
{
let prior_attempt = attempt - 1;
let (removed, expected_participants) = match topic {
Topic::Dkg => {
// Every validator who wasn't removed is expected to have participated
let removed =
crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt)
.expect("prior attempt didn't have its removed saved to disk");
let removed_set = removed.iter().copied().collect::<HashSet<_>>();
(
removed,
self
.spec
.validators()
.into_iter()
.filter_map(|(validator, _)| {
Some(validator).filter(|validator| !removed_set.contains(validator))
})
.collect(),
)
}
Topic::DkgConfirmation => {
panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg")
}
Topic::SubstrateSign(_) | Topic::Sign(_) => {
let removed =
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
.expect("SubstrateSign/Sign yet have yet to set keys");
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
let expected_participants = vec![];
(removed, expected_participants)
}
};
let (expected_topic, expected_label) = match topic {
Topic::Dkg => {
let n = self.spec.n(&removed);
// If we got all the DKG shares, we should be on DKG confirmation
let share_spec =
DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt };
if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n {
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
// preprocess is part of Topic::Dkg Label::Share
(Topic::DkgConfirmation, Label::Share)
} else {
let preprocess_spec = DataSpecification {
topic: Topic::Dkg,
label: Label::Preprocess,
attempt: prior_attempt,
};
// If we got all the DKG preprocesses, DKG shares
if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n {
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
// preprocess is part of Topic::Dkg Label::Share
(Topic::Dkg, Label::Share)
} else {
(Topic::Dkg, Label::Preprocess)
}
}
}
Topic::DkgConfirmation => unreachable!(),
// If we got enough participants to move forward, then we expect shares from them all
Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share),
};
let mut did_not_participate = vec![];
for expected_participant in expected_participants {
if DataDb::get(
self.txn,
genesis,
&DataSpecification {
topic: expected_topic,
label: expected_label,
attempt: prior_attempt,
},
&expected_participant.to_bytes(),
)
.is_none()
{
did_not_participate.push(expected_participant);
}
}
// If a supermajority didn't participate as expected, the protocol was likely aborted due
// to detection of a completion or some larger networking error
// Accordingly, clear did_not_participate
// TODO
// If during the DKG, explicitly mark these people as having been offline
// TODO: If they were offline sufficiently long ago, don't strike them off
if topic == Topic::Dkg {
let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]);
for did_not_participate in did_not_participate {
existing.push(did_not_participate.to_bytes());
}
OfflineDuringDkg::set(self.txn, genesis, &existing);
}
// Slash everyone who didn't participate as expected
// This may be overzealous as if a minority detects a completion, they'll abort yet the
// supermajority will cause the above allowance to not trigger, causing an honest minority
// to be slashed
// At the end of the protocol, the accumulated slashes are reduced by the amount obtained
// by the worst-performing member of the supermajority, and this is expected to
// sufficiently compensate for slashes which occur under normal operation
// TODO
}
/*
All of these have the same common flow:
@ -290,33 +405,62 @@ impl<
*/
match topic {
Topic::Dkg => {
FatalSlashesAsOfDkgAttempt::set(
let mut removed = current_fatal_slashes.clone();
let t = self.spec.t();
{
let mut present_shares = still_present_shares;
// Load the parties marked as offline across the various attempts
let mut offline = OfflineDuringDkg::get(self.txn, genesis)
.unwrap_or(vec![])
.iter()
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
.collect::<Vec<_>>();
// Pop from the list to prioritize the removal of those recently offline
while let Some(offline) = offline.pop() {
// Make sure they weren't removed already (such as due to being fatally slashed)
// This also may trigger if they were offline across multiple attempts
if removed.contains(&offline) {
continue;
}
// If we can remove them and still meet the threshold, do so
let original_i_for_offline =
self.spec.i(&[], offline).expect("offline was never present?");
let offline_shares =
u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start);
if (present_shares - offline_shares) >= t {
present_shares -= offline_shares;
removed.push(offline);
}
// If we've removed as many people as we can, break
if present_shares == t {
break;
}
}
}
RemovedAsOfDkgAttempt::set(
self.txn,
genesis,
attempt,
&FatalSlashes::get(self.txn, genesis).unwrap_or(vec![]),
&removed.iter().map(<Ristretto as Ciphersuite>::G::to_bytes).collect(),
);
if DkgCompleted::get(self.txn, genesis).is_none() {
if DkgLocallyCompleted::get(self.txn, genesis).is_none() {
let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
else {
continue;
};
// Since it wasn't completed, instruct the processor to start the next attempt
let id =
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
let removed = crate::tributary::latest_removed(self.txn, genesis);
let our_i =
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()).unwrap();
// TODO: Don't fatal slash, yet don't include, parties who have been offline so long as
// we still meet the needed threshold. This will have to have any parties removed for
// being offline, who aren't participating in the confirmed key, drop the Tributary and
// notify their processor.
// TODO: Instead of DKG confirmations taking a n-of-n MuSig, have it take a t-of-n with
// a specification of those explicitly removed and those removed due to being offline.
let params =
frost::ThresholdParams::new(self.spec.t(), self.spec.n(&removed), our_i.start)
.unwrap();
frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap();
let shares = u16::from(our_i.end) - u16::from(our_i.start);
self
@ -328,9 +472,7 @@ impl<
.await;
}
}
Topic::DkgConfirmation => {
panic!("re-attempting DkgConfirmation when we should be re-attempting the Dkg")
}
Topic::DkgConfirmation => unreachable!(),
Topic::SubstrateSign(inner_id) => {
let id = processor_messages::coordinator::SubstrateSignId {
session: self.spec.set().session,

View file

@ -215,14 +215,16 @@ fn threshold_i_map_to_keys_and_musig_i_map(
mut map: HashMap<Participant, Vec<u8>>,
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
// Insert our own index so calculations aren't offset
let our_threshold_i =
spec.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref()).unwrap().start;
let our_threshold_i = spec
.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref())
.expect("MuSig t-of-n signing a for a protocol we were removed from")
.start;
assert!(map.insert(our_threshold_i, vec![]).is_none());
let spec_validators = spec.validators();
let key_from_threshold_i = |threshold_i| {
for (key, _) in &spec_validators {
if threshold_i == spec.i(removed, *key).unwrap().start {
if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start {
return *key;
}
}

View file

@ -137,6 +137,19 @@ impl TributarySpec {
Some(result_i)
}
pub fn reverse_lookup_i(
&self,
removed_validators: &[<Ristretto as Ciphersuite>::G],
i: Participant,
) -> Option<<Ristretto as Ciphersuite>::G> {
for (validator, _) in &self.validators {
if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) {
return Some(*validator);
}
}
None
}
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
}

View file

@ -131,8 +131,8 @@ impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
#[derive(Clone, PartialEq, Eq)]
pub enum Transaction {
RemoveParticipantDueToDkg {
attempt: u32,
participant: Participant,
participant: <Ristretto as Ciphersuite>::G,
signed: Signed,
},
DkgCommitments {
@ -195,11 +195,11 @@ pub enum Transaction {
impl Debug for Transaction {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
match self {
Transaction::RemoveParticipantDueToDkg { attempt, participant } => fmt
Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt
.debug_struct("Transaction::RemoveParticipantDueToDkg")
.field("participant", participant)
.field("attempt", attempt)
.finish(),
.field("participant", &hex::encode(participant.to_bytes()))
.field("signer", &hex::encode(signed.signer.to_bytes()))
.finish_non_exhaustive(),
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
.debug_struct("Transaction::DkgCommitments")
.field("attempt", attempt)
@ -255,17 +255,8 @@ impl ReadWrite for Transaction {
match kind[0] {
0 => Ok(Transaction::RemoveParticipantDueToDkg {
attempt: {
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
u32::from_le_bytes(attempt)
},
participant: {
let mut participant = [0; 2];
reader.read_exact(&mut participant)?;
Participant::new(u16::from_le_bytes(participant))
.ok_or_else(|| io::Error::other("invalid participant in RemoveParticipantDueToDkg"))?
},
participant: Ristretto::read_G(reader)?,
signed: Signed::read_without_nonce(reader, 0)?,
}),
1 => {
@ -428,10 +419,10 @@ impl ReadWrite for Transaction {
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
Transaction::RemoveParticipantDueToDkg { attempt, participant } => {
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
writer.write_all(&[0])?;
writer.write_all(&attempt.to_le_bytes())?;
writer.write_all(&u16::from(*participant).to_le_bytes())
writer.write_all(&participant.to_bytes())?;
signed.write_without_nonce(writer)
}
Transaction::DkgCommitments { attempt, commitments, signed } => {
@ -457,7 +448,7 @@ impl ReadWrite for Transaction {
writer.write_all(&[2])?;
writer.write_all(&attempt.to_le_bytes())?;
// `shares` is a Vec which is supposed to map to a HashMap<Pariticpant, Vec<u8>>. Since we
// `shares` is a Vec which is supposed to map to a HashMap<Participant, Vec<u8>>. Since we
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
// This assumes at least one share is being sent to another party
@ -545,7 +536,9 @@ impl ReadWrite for Transaction {
impl TransactionTrait for Transaction {
fn kind(&self) -> TransactionKind<'_> {
match self {
Transaction::RemoveParticipantDueToDkg { .. } => TransactionKind::Provided("remove"),
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
}
Transaction::DkgCommitments { attempt, commitments: _, signed } |
Transaction::DkgShares { attempt, signed, .. } |
@ -612,10 +605,9 @@ impl Transaction {
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
) {
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
let nonce = match tx {
Transaction::RemoveParticipantDueToDkg { .. } => {
panic!("signing RemoveParticipantDueToDkg")
}
Transaction::RemoveParticipantDueToDkg { .. } => 0,
Transaction::DkgCommitments { .. } => 0,
Transaction::DkgShares { .. } => 1,
@ -635,8 +627,7 @@ impl Transaction {
(
nonce,
match tx {
Transaction::RemoveParticipantDueToDkg { .. } => panic!("signing RemoveParticipant"),
Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } |
Transaction::DkgCommitments { ref mut signed, .. } |
Transaction::DkgShares { ref mut signed, .. } |
Transaction::InvalidDkgShare { ref mut signed, .. } |