mirror of
https://github.com/serai-dex/serai.git
synced 2025-01-03 09:29:46 +00:00
Handle the combination of DKG removals with re-attempts
With a DKG removal comes a reduction in the amount of participants which was ignored by re-attempts. Now, we determine n/i based on the parties removed, and deterministically obtain the context of who was removd.
This commit is contained in:
parent
884b6a6fec
commit
77edd00725
15 changed files with 410 additions and 132 deletions
|
@ -108,14 +108,14 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
|
||||||
// This is safe due to the message-queue deduplicating based off the intent system
|
// This is safe due to the message-queue deduplicating based off the intent system
|
||||||
let set = spec.set();
|
let set = spec.set();
|
||||||
let our_i = spec
|
let our_i = spec
|
||||||
.i(Ristretto::generator() * key.deref())
|
.i(&[], Ristretto::generator() * key.deref())
|
||||||
.expect("adding a tributary for a set we aren't in set for");
|
.expect("adding a tributary for a set we aren't in set for");
|
||||||
processors
|
processors
|
||||||
.send(
|
.send(
|
||||||
set.network,
|
set.network,
|
||||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||||
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
|
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
|
||||||
params: frost::ThresholdParams::new(spec.t(), spec.n(), our_i.start).unwrap(),
|
params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
|
||||||
shares: u16::from(our_i.end) - u16::from(our_i.start),
|
shares: u16::from(our_i.end) - u16::from(our_i.start),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -370,21 +370,24 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
key_gen::ProcessorMessage::InvalidCommitments { id: _, faulty } => {
|
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {
|
||||||
// This doesn't need the ID since it's a Provided transaction which everyone will provide
|
// This doesn't need the ID since it's a Provided transaction which everyone will provide
|
||||||
// With this provision comes explicit ordering (with regards to other RemoveParticipant
|
// With this provision comes explicit ordering (with regards to other
|
||||||
// transactions) and group consensus
|
// RemoveParticipantDueToDkg transactions) and group consensus
|
||||||
// Accordingly, this can't be replayed
|
// Accordingly, this can't be replayed
|
||||||
// It could be included on-chain early/late with regards to the chain's active attempt,
|
// It could be included on-chain early/late with regards to the chain's active attempt,
|
||||||
// which attempt scheduling is written to avoid
|
// which attempt scheduling is written to make a non-issue by auto re-attempting once a
|
||||||
vec![Transaction::RemoveParticipant(faulty)]
|
// fatal slash occurs, regardless of timing
|
||||||
|
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant: faulty }]
|
||||||
}
|
}
|
||||||
key_gen::ProcessorMessage::Shares { id, mut shares } => {
|
key_gen::ProcessorMessage::Shares { id, mut shares } => {
|
||||||
// Create a MuSig-based machine to inform Substrate of this key generation
|
// Create a MuSig-based machine to inform Substrate of this key generation
|
||||||
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);
|
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);
|
||||||
|
|
||||||
|
let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt)
|
||||||
|
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
|
||||||
let our_i = spec
|
let our_i = spec
|
||||||
.i(pub_key)
|
.i(&removed, pub_key)
|
||||||
.expect("processor message to DKG for a session we aren't a validator in");
|
.expect("processor message to DKG for a session we aren't a validator in");
|
||||||
|
|
||||||
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
|
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
|
||||||
|
@ -392,7 +395,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
let mut tx_shares = Vec::with_capacity(shares.len());
|
let mut tx_shares = Vec::with_capacity(shares.len());
|
||||||
for shares in &mut shares {
|
for shares in &mut shares {
|
||||||
tx_shares.push(vec![]);
|
tx_shares.push(vec![]);
|
||||||
for i in 1 ..= spec.n() {
|
for i in 1 ..= spec.n(&removed) {
|
||||||
let i = Participant::new(i).unwrap();
|
let i = Participant::new(i).unwrap();
|
||||||
if our_i.contains(&i) {
|
if our_i.contains(&i) {
|
||||||
if shares.contains_key(&i) {
|
if shares.contains_key(&i) {
|
||||||
|
@ -415,13 +418,16 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
}
|
}
|
||||||
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
|
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
|
||||||
// Check if the MuSig signature had any errors as if so, we need to provide
|
// Check if the MuSig signature had any errors as if so, we need to provide
|
||||||
// RemoveParticipant
|
// RemoveParticipantDueToDkg
|
||||||
// As for the safety of calling error_generating_key_pair, the processor is presumed
|
// As for the safety of calling error_generating_key_pair, the processor is presumed
|
||||||
// to only send InvalidShare or GeneratedKeyPair for a given attempt
|
// to only send InvalidShare or GeneratedKeyPair for a given attempt
|
||||||
let mut txs = if let Some(faulty) =
|
let mut txs = if let Some(faulty) =
|
||||||
crate::tributary::error_generating_key_pair(&mut txn, key, spec, id.attempt)
|
crate::tributary::error_generating_key_pair(&mut txn, key, spec, id.attempt)
|
||||||
{
|
{
|
||||||
vec![Transaction::RemoveParticipant(faulty)]
|
vec![Transaction::RemoveParticipantDueToDkg {
|
||||||
|
attempt: id.attempt,
|
||||||
|
participant: faulty,
|
||||||
|
}]
|
||||||
} else {
|
} else {
|
||||||
vec![]
|
vec![]
|
||||||
};
|
};
|
||||||
|
@ -457,14 +463,14 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
Err(p) => {
|
Err(p) => {
|
||||||
vec![Transaction::RemoveParticipant(p)]
|
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant: p }]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// This is a response to the ordered VerifyBlame, which is why this satisfies the provided
|
// This is a response to the ordered VerifyBlame, which is why this satisfies the provided
|
||||||
// transaction's needs to be perfectly ordered
|
// transaction's needs to be perfectly ordered
|
||||||
key_gen::ProcessorMessage::Blame { id: _, participant } => {
|
key_gen::ProcessorMessage::Blame { id, participant } => {
|
||||||
vec![Transaction::RemoveParticipant(participant)]
|
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant }]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
ProcessorMessage::Sign(msg) => match msg {
|
ProcessorMessage::Sign(msg) => match msg {
|
||||||
|
|
|
@ -268,6 +268,7 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||||
let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {
|
let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {
|
||||||
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
||||||
};
|
};
|
||||||
|
let substrate_key = key_pair.0 .0;
|
||||||
processors
|
processors
|
||||||
.send(
|
.send(
|
||||||
set.network,
|
set.network,
|
||||||
|
@ -289,7 +290,7 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
SeraiDkgCompleted::set(&mut txn, set, &());
|
SeraiDkgCompleted::set(&mut txn, set, &substrate_key);
|
||||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,10 +123,13 @@ fn serialize_sign_data() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn serialize_transaction() {
|
fn serialize_transaction() {
|
||||||
test_read_write(Transaction::RemoveParticipant(
|
test_read_write(Transaction::RemoveParticipantDueToDkg {
|
||||||
frost::Participant::new(u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1))
|
attempt: u32::try_from(OsRng.next_u64() >> 32).unwrap(),
|
||||||
.unwrap(),
|
participant: frost::Participant::new(
|
||||||
));
|
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
});
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
||||||
|
|
|
@ -29,7 +29,7 @@ async fn sync_test() {
|
||||||
let mut keys = new_keys(&mut OsRng);
|
let mut keys = new_keys(&mut OsRng);
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
let spec = new_spec(&mut OsRng, &keys);
|
||||||
// Ensure this can have a node fail
|
// Ensure this can have a node fail
|
||||||
assert!(spec.n() > spec.t());
|
assert!(spec.n(&[]) > spec.t());
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec)
|
let mut tributaries = new_tributaries(&keys, &spec)
|
||||||
.await
|
.await
|
||||||
|
@ -142,7 +142,7 @@ async fn sync_test() {
|
||||||
// Because only `t` validators are used in a commit, take n - t nodes offline
|
// Because only `t` validators are used in a commit, take n - t nodes offline
|
||||||
// leaving only `t` nodes. Which should force it to participate in the consensus
|
// leaving only `t` nodes. Which should force it to participate in the consensus
|
||||||
// of next blocks.
|
// of next blocks.
|
||||||
let spares = usize::from(spec.n() - spec.t());
|
let spares = usize::from(spec.n(&[]) - spec.t());
|
||||||
for thread in p2p_threads.iter().take(spares) {
|
for thread in p2p_threads.iter().take(spares) {
|
||||||
thread.abort();
|
thread.abort();
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,15 +42,18 @@ pub enum Accumulation {
|
||||||
NotReady,
|
NotReady,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Move from genesis to set for indexing
|
||||||
create_db!(
|
create_db!(
|
||||||
Tributary {
|
Tributary {
|
||||||
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
||||||
SeraiDkgRemoval: (spec: ValidatorSet, removing: [u8; 32]) -> (),
|
SeraiDkgRemoval: (spec: ValidatorSet, removing: [u8; 32]) -> (),
|
||||||
SeraiDkgCompleted: (spec: ValidatorSet) -> (),
|
SeraiDkgCompleted: (spec: ValidatorSet) -> [u8; 32],
|
||||||
|
|
||||||
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
||||||
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
||||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
|
FatalSlashesAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
|
||||||
|
FatalSlashesAsOfFatalSlash: (genesis: [u8; 32], fatally_slashed: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
||||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
||||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||||
|
@ -58,6 +61,7 @@ create_db!(
|
||||||
RemovalNonces:
|
RemovalNonces:
|
||||||
(genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
(genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||||
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
||||||
|
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
|
||||||
DkgCompleted: (genesis: [u8; 32]) -> (),
|
DkgCompleted: (genesis: [u8; 32]) -> (),
|
||||||
LocallyDkgRemoved: (genesis: [u8; 32], validator: [u8; 32]) -> (),
|
LocallyDkgRemoved: (genesis: [u8; 32], validator: [u8; 32]) -> (),
|
||||||
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
||||||
|
@ -74,13 +78,14 @@ impl FatallySlashed {
|
||||||
Self::set(txn, genesis, account, &());
|
Self::set(txn, genesis, account, &());
|
||||||
let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default();
|
let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default();
|
||||||
|
|
||||||
// Don't append if we already have it
|
// Don't append if we already have it, which can occur upon multiple faults
|
||||||
if existing.iter().any(|existing| existing == &account) {
|
if existing.iter().any(|existing| existing == &account) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
existing.push(account);
|
existing.push(account);
|
||||||
FatalSlashes::set(txn, genesis, &existing);
|
FatalSlashes::set(txn, genesis, &existing);
|
||||||
|
FatalSlashesAsOfFatalSlash::set(txn, genesis, account, &existing);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,9 @@ pub fn dkg_confirmation_nonces(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
) -> [u8; 64] {
|
) -> [u8; 64] {
|
||||||
(DkgConfirmer { key, spec, txn, attempt }).preprocess()
|
DkgConfirmer::new(key, spec, txn, attempt)
|
||||||
|
.expect("getting DKG confirmation nonces for unknown attempt")
|
||||||
|
.preprocess()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there's an error generating a key pair, return any errors which would've occured when
|
// If there's an error generating a key pair, return any errors which would've occured when
|
||||||
|
@ -58,7 +60,10 @@ pub fn error_generating_key_pair(
|
||||||
// Sign a key pair which can't be valid
|
// Sign a key pair which can't be valid
|
||||||
// (0xff used as 0 would be the Ristretto identity point, 0-length for the network key)
|
// (0xff used as 0 would be the Ristretto identity point, 0-length for the network key)
|
||||||
let key_pair = KeyPair(Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap());
|
let key_pair = KeyPair(Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap());
|
||||||
match (DkgConfirmer { key, spec, txn, attempt }).share(preprocesses, &key_pair) {
|
match DkgConfirmer::new(key, spec, txn, attempt)
|
||||||
|
.expect("reporting an error during DKG for an unrecognized attempt")
|
||||||
|
.share(preprocesses, &key_pair)
|
||||||
|
{
|
||||||
Ok(mut share) => {
|
Ok(mut share) => {
|
||||||
// Zeroize the share to ensure it's not accessed
|
// Zeroize the share to ensure it's not accessed
|
||||||
share.zeroize();
|
share.zeroize();
|
||||||
|
@ -76,13 +81,20 @@ pub fn generated_key_pair<D: Db>(
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
) -> Result<[u8; 32], Participant> {
|
) -> Result<[u8; 32], Participant> {
|
||||||
DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair);
|
DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair);
|
||||||
|
KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt);
|
||||||
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
|
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
|
||||||
(DkgConfirmer { key, spec, txn, attempt }).share(preprocesses, key_pair)
|
DkgConfirmer::new(key, spec, txn, attempt)
|
||||||
|
.expect("claiming to have generated a key pair for an unrecognized attempt")
|
||||||
|
.share(preprocesses, key_pair)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unflatten(spec: &TributarySpec, data: &mut HashMap<Participant, Vec<u8>>) {
|
fn unflatten(
|
||||||
|
spec: &TributarySpec,
|
||||||
|
removed: &[<Ristretto as Ciphersuite>::G],
|
||||||
|
data: &mut HashMap<Participant, Vec<u8>>,
|
||||||
|
) {
|
||||||
for (validator, _) in spec.validators() {
|
for (validator, _) in spec.validators() {
|
||||||
let range = spec.i(validator).unwrap();
|
let range = spec.i(removed, validator).unwrap();
|
||||||
let Some(all_segments) = data.remove(&range.start) else {
|
let Some(all_segments) = data.remove(&range.start) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
@ -99,6 +111,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
{
|
{
|
||||||
fn accumulate(
|
fn accumulate(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
removed: &[<Ristretto as Ciphersuite>::G],
|
||||||
data_spec: &DataSpecification,
|
data_spec: &DataSpecification,
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
signer: <Ristretto as Ciphersuite>::G,
|
||||||
data: &Vec<u8>,
|
data: &Vec<u8>,
|
||||||
|
@ -109,8 +122,10 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
panic!("accumulating data for a participant multiple times");
|
panic!("accumulating data for a participant multiple times");
|
||||||
}
|
}
|
||||||
let signer_shares = {
|
let signer_shares = {
|
||||||
let signer_i =
|
let signer_i = self
|
||||||
self.spec.i(signer).expect("transaction signed by a non-validator for this tributary");
|
.spec
|
||||||
|
.i(removed, signer)
|
||||||
|
.expect("transaction signed by a non-validator for this tributary");
|
||||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
u16::from(signer_i.end) - u16::from(signer_i.start)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -143,7 +158,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
||||||
let needs_everyone =
|
let needs_everyone =
|
||||||
(data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation);
|
(data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation);
|
||||||
let needed = if needs_everyone { self.spec.n() } else { self.spec.t() };
|
let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() };
|
||||||
if received_range.contains(&needed) {
|
if received_range.contains(&needed) {
|
||||||
log::debug!(
|
log::debug!(
|
||||||
"accumulation for entry {:?} attempt #{} is ready",
|
"accumulation for entry {:?} attempt #{} is ready",
|
||||||
|
@ -154,7 +169,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
let mut data = HashMap::new();
|
let mut data = HashMap::new();
|
||||||
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
||||||
data.insert(
|
data.insert(
|
||||||
self.spec.i(validator).unwrap().start,
|
self.spec.i(removed, validator).unwrap().start,
|
||||||
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
||||||
data
|
data
|
||||||
} else {
|
} else {
|
||||||
|
@ -170,7 +185,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
.remove(
|
.remove(
|
||||||
&self
|
&self
|
||||||
.spec
|
.spec
|
||||||
.i(Ristretto::generator() * self.our_key.deref())
|
.i(removed, Ristretto::generator() * self.our_key.deref())
|
||||||
.expect("handling a message for a Tributary we aren't part of")
|
.expect("handling a message for a Tributary we aren't part of")
|
||||||
.start,
|
.start,
|
||||||
)
|
)
|
||||||
|
@ -187,6 +202,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
|
|
||||||
async fn handle_data(
|
async fn handle_data(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
removed: &[<Ristretto as Ciphersuite>::G],
|
||||||
data_spec: &DataSpecification,
|
data_spec: &DataSpecification,
|
||||||
bytes: Vec<u8>,
|
bytes: Vec<u8>,
|
||||||
signed: &Signed,
|
signed: &Signed,
|
||||||
|
@ -234,15 +250,16 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
// TODO: If this is shares, we need to check they are part of the selected signing set
|
// TODO: If this is shares, we need to check they are part of the selected signing set
|
||||||
|
|
||||||
// Accumulate this data
|
// Accumulate this data
|
||||||
self.accumulate(data_spec, signed.signer, &bytes)
|
self.accumulate(removed, data_spec, signed.signer, &bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn check_sign_data_len(
|
async fn check_sign_data_len(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
removed: &[<Ristretto as Ciphersuite>::G],
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
signer: <Ristretto as Ciphersuite>::G,
|
||||||
len: usize,
|
len: usize,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
let signer_i = self.spec.i(signer).unwrap();
|
let signer_i = self.spec.i(removed, signer).unwrap();
|
||||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||||
self
|
self
|
||||||
.fatal_slash(
|
.fatal_slash(
|
||||||
|
@ -255,16 +272,23 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dkg_removal(&mut self, data: &SignData<[u8; 32]>) -> DkgRemoval<'_, T> {
|
fn dkg_removal<'a>(
|
||||||
|
&'a mut self,
|
||||||
|
removed: &'a [<Ristretto as Ciphersuite>::G],
|
||||||
|
data: &'a SignData<[u8; 32]>,
|
||||||
|
) -> DkgRemoval<'a, T> {
|
||||||
DkgRemoval {
|
DkgRemoval {
|
||||||
spec: self.spec,
|
|
||||||
key: self.our_key,
|
key: self.our_key,
|
||||||
|
spec: self.spec,
|
||||||
txn: self.txn,
|
txn: self.txn,
|
||||||
|
removed,
|
||||||
removing: data.plan,
|
removing: data.plan,
|
||||||
attempt: data.attempt,
|
attempt: data.attempt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Don't call fatal_slash in here, return the party to fatal_slash to ensure no further
|
||||||
|
// execution occurs
|
||||||
pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) {
|
pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) {
|
||||||
let genesis = self.spec.genesis();
|
let genesis = self.spec.genesis();
|
||||||
|
|
||||||
|
@ -279,19 +303,37 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
}
|
}
|
||||||
|
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::RemoveParticipant(i) => {
|
Transaction::RemoveParticipantDueToDkg { attempt, participant } => {
|
||||||
self.fatal_slash_with_participant_index(i, "RemoveParticipant Provided TX").await
|
self
|
||||||
|
.fatal_slash_with_participant_index(
|
||||||
|
&removed_as_of_dkg_attempt(self.txn, genesis, attempt).unwrap_or_else(|| {
|
||||||
|
panic!(
|
||||||
|
"removed a participant due to a provided transaction with an attempt not {}",
|
||||||
|
"locally handled?"
|
||||||
|
)
|
||||||
|
}),
|
||||||
|
participant,
|
||||||
|
"RemoveParticipantDueToDkg Provided TX",
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||||
let Ok(_) = self.check_sign_data_len(signed.signer, commitments.len()).await else {
|
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||||
|
self
|
||||||
|
.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt")
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
let Ok(_) = self.check_sign_data_len(&removed, signed.signer, commitments.len()).await
|
||||||
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };
|
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };
|
||||||
match self.handle_data(&data_spec, commitments.encode(), &signed).await {
|
match self.handle_data(&removed, &data_spec, commitments.encode(), &signed).await {
|
||||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
||||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
||||||
unflatten(self.spec, &mut commitments);
|
unflatten(self.spec, &removed, &mut commitments);
|
||||||
self
|
self
|
||||||
.processors
|
.processors
|
||||||
.send(
|
.send(
|
||||||
|
@ -311,17 +353,23 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
|
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
|
||||||
let Ok(_) = self.check_sign_data_len(signed.signer, shares.len()).await else {
|
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||||
|
self
|
||||||
|
.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt")
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
let Ok(_) = self.check_sign_data_len(&removed, signed.signer, shares.len()).await else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
let sender_i = self
|
let sender_i = self
|
||||||
.spec
|
.spec
|
||||||
.i(signed.signer)
|
.i(&removed, signed.signer)
|
||||||
.expect("transaction added to tributary by signer who isn't a participant");
|
.expect("transaction added to tributary by signer who isn't a participant");
|
||||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||||
for shares in &shares {
|
for shares in &shares {
|
||||||
if shares.len() != (usize::from(self.spec.n() - sender_is_len)) {
|
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares").await;
|
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares").await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -329,7 +377,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
|
|
||||||
// Save each share as needed for blame
|
// Save each share as needed for blame
|
||||||
{
|
{
|
||||||
let from_range = self.spec.i(signed.signer).unwrap();
|
let from_range = self.spec.i(&removed, signed.signer).unwrap();
|
||||||
for (from_offset, shares) in shares.iter().enumerate() {
|
for (from_offset, shares) in shares.iter().enumerate() {
|
||||||
let from =
|
let from =
|
||||||
Participant::new(u16::from(from_range.start) + u16::try_from(from_offset).unwrap())
|
Participant::new(u16::from(from_range.start) + u16::try_from(from_offset).unwrap())
|
||||||
|
@ -352,7 +400,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
// Filter down to only our share's bytes for handle
|
// Filter down to only our share's bytes for handle
|
||||||
let our_i = self
|
let our_i = self
|
||||||
.spec
|
.spec
|
||||||
.i(Ristretto::generator() * self.our_key.deref())
|
.i(&removed, Ristretto::generator() * self.our_key.deref())
|
||||||
.expect("in a tributary we're not a validator for");
|
.expect("in a tributary we're not a validator for");
|
||||||
|
|
||||||
let our_shares = if sender_i == our_i {
|
let our_shares = if sender_i == our_i {
|
||||||
|
@ -382,7 +430,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
|
|
||||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
|
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
|
||||||
let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();
|
let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();
|
||||||
match self.handle_data(&data_spec, encoded_data, &signed).await {
|
match self.handle_data(&removed, &data_spec, encoded_data, &signed).await {
|
||||||
Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {
|
Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {
|
||||||
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
||||||
|
|
||||||
|
@ -440,7 +488,13 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
||||||
let range = self.spec.i(signed.signer).unwrap();
|
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||||
|
self
|
||||||
|
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt")
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
let range = self.spec.i(&removed, signed.signer).unwrap();
|
||||||
if !range.contains(&accuser) {
|
if !range.contains(&accuser) {
|
||||||
self
|
self
|
||||||
.fatal_slash(
|
.fatal_slash(
|
||||||
|
@ -457,7 +511,15 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let share = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()).unwrap();
|
let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else {
|
||||||
|
self
|
||||||
|
.fatal_slash(
|
||||||
|
signed.signer.to_bytes(),
|
||||||
|
"InvalidDkgShare had a non-existent faulty participant",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
};
|
||||||
self
|
self
|
||||||
.processors
|
.processors
|
||||||
.send(
|
.send(
|
||||||
|
@ -474,12 +536,25 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
||||||
|
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||||
|
self
|
||||||
|
.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt")
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
let data_spec =
|
let data_spec =
|
||||||
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
||||||
match self.handle_data(&data_spec, confirmation_share.to_vec(), &signed).await {
|
match self.handle_data(&removed, &data_spec, confirmation_share.to_vec(), &signed).await {
|
||||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||||
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
||||||
|
|
||||||
|
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||||
|
panic!(
|
||||||
|
"DkgConfirmed for everyone yet didn't have the removed parties for this attempt",
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
||||||
// TODO: This can technically happen under very very very specific timing as the txn put
|
// TODO: This can technically happen under very very very specific timing as the txn put
|
||||||
// happens before DkgConfirmed, yet the txn commit isn't guaranteed to
|
// happens before DkgConfirmed, yet the txn commit isn't guaranteed to
|
||||||
|
@ -487,16 +562,17 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
"in DkgConfirmed handling, which happens after everyone \
|
"in DkgConfirmed handling, which happens after everyone \
|
||||||
(including us) fires DkgConfirmed, yet no confirming key pair",
|
(including us) fires DkgConfirmed, yet no confirming key pair",
|
||||||
);
|
);
|
||||||
let sig =
|
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
|
||||||
match (DkgConfirmer { spec: self.spec, key: self.our_key, txn: self.txn, attempt })
|
.expect("confirming DKG for unrecognized attempt");
|
||||||
.complete(preprocesses, &key_pair, shares)
|
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
||||||
{
|
Ok(sig) => sig,
|
||||||
Ok(sig) => sig,
|
Err(p) => {
|
||||||
Err(p) => {
|
self
|
||||||
self.fatal_slash_with_participant_index(p, "invalid DkgConfirmer share").await;
|
.fatal_slash_with_participant_index(&removed, p, "invalid DkgConfirmer share")
|
||||||
return;
|
.await;
|
||||||
}
|
return;
|
||||||
};
|
}
|
||||||
|
};
|
||||||
|
|
||||||
DkgCompleted::set(self.txn, genesis, &());
|
DkgCompleted::set(self.txn, genesis, &());
|
||||||
|
|
||||||
|
@ -527,13 +603,20 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let Some(removed) =
|
||||||
|
crate::tributary::removed_as_of_fatal_slash(self.txn, genesis, data.plan)
|
||||||
|
else {
|
||||||
|
self.fatal_slash(signer.to_bytes(), "removing someone who wasn't fatally slashed").await;
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
let data_spec = DataSpecification {
|
let data_spec = DataSpecification {
|
||||||
topic: Topic::DkgRemoval(data.plan),
|
topic: Topic::DkgRemoval(data.plan),
|
||||||
label: data.label,
|
label: data.label,
|
||||||
attempt: data.attempt,
|
attempt: data.attempt,
|
||||||
};
|
};
|
||||||
let Accumulation::Ready(DataSet::Participating(results)) =
|
let Accumulation::Ready(DataSet::Participating(results)) =
|
||||||
self.handle_data(&data_spec, data.data.encode(), &data.signed).await
|
self.handle_data(&removed, &data_spec, data.data.encode(), &data.signed).await
|
||||||
else {
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
@ -542,7 +625,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
Label::Preprocess => {
|
Label::Preprocess => {
|
||||||
RemovalNonces::set(self.txn, genesis, data.plan, data.attempt, &results);
|
RemovalNonces::set(self.txn, genesis, data.plan, data.attempt, &results);
|
||||||
|
|
||||||
let Ok(share) = self.dkg_removal(&data).share(results) else {
|
let Ok(share) = self.dkg_removal(&removed, &data).share(results) else {
|
||||||
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
|
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
|
||||||
// slash) and censor transactions (yet don't explicitly ban)
|
// slash) and censor transactions (yet don't explicitly ban)
|
||||||
return;
|
return;
|
||||||
|
@ -562,7 +645,8 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
let preprocesses =
|
let preprocesses =
|
||||||
RemovalNonces::get(self.txn, genesis, data.plan, data.attempt).unwrap();
|
RemovalNonces::get(self.txn, genesis, data.plan, data.attempt).unwrap();
|
||||||
|
|
||||||
let Ok((signers, signature)) = self.dkg_removal(&data).complete(preprocesses, results)
|
let Ok((signers, signature)) =
|
||||||
|
self.dkg_removal(&removed, &data).complete(preprocesses, results)
|
||||||
else {
|
else {
|
||||||
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
|
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
|
||||||
// slash) and censor transactions (yet don't explicitly ban)
|
// slash) and censor transactions (yet don't explicitly ban)
|
||||||
|
@ -656,8 +740,21 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateSign(data) => {
|
Transaction::SubstrateSign(data) => {
|
||||||
|
// Provided transactions ensure synchrony on any signing protocol, and we won't start
|
||||||
|
// signing with threshold keys before we've confirmed them on-chain
|
||||||
|
let Some(removed) =
|
||||||
|
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||||
|
else {
|
||||||
|
self
|
||||||
|
.fatal_slash(
|
||||||
|
data.signed.signer.to_bytes(),
|
||||||
|
"signing despite not having set keys on substrate",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
};
|
||||||
let signer = data.signed.signer;
|
let signer = data.signed.signer;
|
||||||
let Ok(_) = self.check_sign_data_len(signer, data.data.len()).await else {
|
let Ok(_) = self.check_sign_data_len(&removed, signer, data.data.len()).await else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
let expected_len = match data.label {
|
let expected_len = match data.label {
|
||||||
|
@ -672,6 +769,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
"unexpected length data for substrate signing protocol",
|
"unexpected length data for substrate signing protocol",
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -681,11 +779,11 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
attempt: data.attempt,
|
attempt: data.attempt,
|
||||||
};
|
};
|
||||||
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||||
self.handle_data(&data_spec, data.data.encode(), &data.signed).await
|
self.handle_data(&removed, &data_spec, data.data.encode(), &data.signed).await
|
||||||
else {
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
unflatten(self.spec, &mut results);
|
unflatten(self.spec, &removed, &mut results);
|
||||||
|
|
||||||
let id = SubstrateSignId {
|
let id = SubstrateSignId {
|
||||||
session: self.spec.set().session,
|
session: self.spec.set().session,
|
||||||
|
@ -706,7 +804,19 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Sign(data) => {
|
Transaction::Sign(data) => {
|
||||||
let Ok(_) = self.check_sign_data_len(data.signed.signer, data.data.len()).await else {
|
let Some(removed) =
|
||||||
|
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||||
|
else {
|
||||||
|
self
|
||||||
|
.fatal_slash(
|
||||||
|
data.signed.signer.to_bytes(),
|
||||||
|
"signing despite not having set keys on substrate",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
let Ok(_) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()).await
|
||||||
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -716,9 +826,9 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
attempt: data.attempt,
|
attempt: data.attempt,
|
||||||
};
|
};
|
||||||
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||||
self.handle_data(&data_spec, data.data.encode(), &data.signed).await
|
self.handle_data(&removed, &data_spec, data.data.encode(), &data.signed).await
|
||||||
{
|
{
|
||||||
unflatten(self.spec, &mut results);
|
unflatten(self.spec, &removed, &mut results);
|
||||||
let id =
|
let id =
|
||||||
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
|
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
|
||||||
self
|
self
|
||||||
|
|
|
@ -1,3 +1,7 @@
|
||||||
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
|
|
||||||
|
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||||
|
|
||||||
use tributary::{
|
use tributary::{
|
||||||
ReadWrite,
|
ReadWrite,
|
||||||
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||||
|
@ -20,6 +24,58 @@ pub use handle::*;
|
||||||
|
|
||||||
pub mod scanner;
|
pub mod scanner;
|
||||||
|
|
||||||
|
pub fn removed_as_of_dkg_attempt(
|
||||||
|
getter: &impl Get,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
attempt: u32,
|
||||||
|
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
||||||
|
if attempt == 0 {
|
||||||
|
Some(vec![])
|
||||||
|
} else {
|
||||||
|
FatalSlashesAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
|
||||||
|
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn latest_removed(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristretto as Ciphersuite>::G> {
|
||||||
|
#[allow(clippy::unwrap_or_default)]
|
||||||
|
FatalSlashes::get(getter, genesis)
|
||||||
|
.unwrap_or(vec![])
|
||||||
|
.iter()
|
||||||
|
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn removed_as_of_set_keys(
|
||||||
|
getter: &impl Get,
|
||||||
|
set: ValidatorSet,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
||||||
|
// SeraiDkgCompleted has the key placed on-chain.
|
||||||
|
// This key can be uniquely mapped to an attempt so long as one participant was honest, which we
|
||||||
|
// assume as a presumably honest participant.
|
||||||
|
// Resolve from generated key to attempt to fatally slashed as of attempt.
|
||||||
|
|
||||||
|
// This expect will trigger if this is prematurely called and Substrate has tracked the keys yet
|
||||||
|
// we haven't locally synced and handled the Tributary
|
||||||
|
// All callers of this, at the time of writing, ensure the Tributary has sufficiently synced
|
||||||
|
// making the panic with context more desirable than the None
|
||||||
|
let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?)
|
||||||
|
.expect("key completed on-chain didn't have an attempt related");
|
||||||
|
removed_as_of_dkg_attempt(getter, genesis, attempt)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn removed_as_of_fatal_slash(
|
||||||
|
getter: &impl Get,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
fatally_slashed: [u8; 32],
|
||||||
|
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
||||||
|
FatalSlashesAsOfFatalSlash::get(getter, genesis, fatally_slashed).map(|keys| {
|
||||||
|
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
tributary: &Tributary<D, Transaction, P>,
|
tributary: &Tributary<D, Transaction, P>,
|
||||||
|
|
|
@ -133,10 +133,19 @@ pub struct TributaryBlockHandler<
|
||||||
impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P: P2p>
|
impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P: P2p>
|
||||||
TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P>
|
TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P>
|
||||||
{
|
{
|
||||||
async fn dkg_removal_attempt(&mut self, removing: [u8; 32], attempt: u32) {
|
async fn attempt_dkg_removal(&mut self, removing: [u8; 32], attempt: u32) {
|
||||||
let preprocess =
|
let genesis = self.spec.genesis();
|
||||||
(DkgRemoval { spec: self.spec, key: self.our_key, txn: self.txn, removing, attempt })
|
let removed = crate::tributary::removed_as_of_fatal_slash(self.txn, genesis, removing)
|
||||||
.preprocess();
|
.expect("attempting DKG removal to remove someone who wasn't removed");
|
||||||
|
let preprocess = (DkgRemoval {
|
||||||
|
key: self.our_key,
|
||||||
|
spec: self.spec,
|
||||||
|
txn: self.txn,
|
||||||
|
removed: &removed,
|
||||||
|
removing,
|
||||||
|
attempt,
|
||||||
|
})
|
||||||
|
.preprocess();
|
||||||
let mut tx = Transaction::DkgRemoval(SignData {
|
let mut tx = Transaction::DkgRemoval(SignData {
|
||||||
plan: removing,
|
plan: removing,
|
||||||
attempt,
|
attempt,
|
||||||
|
@ -144,7 +153,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
data: vec![preprocess.to_vec()],
|
data: vec![preprocess.to_vec()],
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
});
|
});
|
||||||
tx.sign(&mut OsRng, self.spec.genesis(), self.our_key);
|
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||||
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,7 +172,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
// If during a DKG, remove the participant
|
// If during a DKG, remove the participant
|
||||||
if DkgCompleted::get(self.txn, genesis).is_none() {
|
if DkgCompleted::get(self.txn, genesis).is_none() {
|
||||||
AttemptDb::recognize_topic(self.txn, genesis, Topic::DkgRemoval(slashing));
|
AttemptDb::recognize_topic(self.txn, genesis, Topic::DkgRemoval(slashing));
|
||||||
self.dkg_removal_attempt(slashing, 0).await;
|
self.attempt_dkg_removal(slashing, 0).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,12 +180,17 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
// Tributary post-DKG
|
// Tributary post-DKG
|
||||||
// https://github.com/serai-dex/serai/issues/426
|
// https://github.com/serai-dex/serai/issues/426
|
||||||
|
|
||||||
pub async fn fatal_slash_with_participant_index(&mut self, i: Participant, reason: &str) {
|
pub async fn fatal_slash_with_participant_index(
|
||||||
|
&mut self,
|
||||||
|
removed: &[<Ristretto as Ciphersuite>::G],
|
||||||
|
i: Participant,
|
||||||
|
reason: &str,
|
||||||
|
) {
|
||||||
// Resolve from Participant to <Ristretto as Ciphersuite>::G
|
// Resolve from Participant to <Ristretto as Ciphersuite>::G
|
||||||
let i = u16::from(i);
|
let i = u16::from(i);
|
||||||
let mut validator = None;
|
let mut validator = None;
|
||||||
for (potential, _) in self.spec.validators() {
|
for (potential, _) in self.spec.validators() {
|
||||||
let v_i = self.spec.i(potential).unwrap();
|
let v_i = self.spec.i(removed, potential).unwrap();
|
||||||
if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) {
|
if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) {
|
||||||
validator = Some(potential);
|
validator = Some(potential);
|
||||||
break;
|
break;
|
||||||
|
@ -250,19 +264,34 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
*/
|
*/
|
||||||
match topic {
|
match topic {
|
||||||
Topic::Dkg => {
|
Topic::Dkg => {
|
||||||
|
#[allow(clippy::unwrap_or_default)]
|
||||||
|
FatalSlashesAsOfDkgAttempt::set(
|
||||||
|
self.txn,
|
||||||
|
genesis,
|
||||||
|
attempt,
|
||||||
|
&FatalSlashes::get(self.txn, genesis).unwrap_or(vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
if DkgCompleted::get(self.txn, genesis).is_none() {
|
if DkgCompleted::get(self.txn, genesis).is_none() {
|
||||||
// Since it wasn't completed, instruct the processor to start the next attempt
|
// Since it wasn't completed, instruct the processor to start the next attempt
|
||||||
let id =
|
let id =
|
||||||
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
|
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
|
||||||
let our_i = self.spec.i(Ristretto::generator() * self.our_key.deref()).unwrap();
|
|
||||||
|
|
||||||
// TODO: Handle removed parties (modify n/i to accept list of removed)
|
let removed = crate::tributary::latest_removed(self.txn, genesis);
|
||||||
|
let our_i =
|
||||||
|
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()).unwrap();
|
||||||
|
|
||||||
// TODO: Don't fatal slash, yet don't include, parties who have been offline so long as
|
// TODO: Don't fatal slash, yet don't include, parties who have been offline so long as
|
||||||
// we still meet the needed threshold. We'd need a complete DKG protocol we then remove
|
// we still meet the needed threshold. This will have to have any parties removed for
|
||||||
// the offline participants from. publishing the DKG protocol completed without them.
|
// being offline, who aren't participating in the confirmed key, drop the Tributary and
|
||||||
|
// notify their processor.
|
||||||
|
|
||||||
|
// TODO: Instead of DKG confirmations taking a n-of-n MuSig, have it take a t-of-n with
|
||||||
|
// a specification of those explicitly removed and those removed due to being offline.
|
||||||
|
|
||||||
let params =
|
let params =
|
||||||
frost::ThresholdParams::new(self.spec.t(), self.spec.n(), our_i.start).unwrap();
|
frost::ThresholdParams::new(self.spec.t(), self.spec.n(&removed), our_i.start)
|
||||||
|
.unwrap();
|
||||||
let shares = u16::from(our_i.end) - u16::from(our_i.start);
|
let shares = u16::from(our_i.end) - u16::from(our_i.start);
|
||||||
|
|
||||||
self
|
self
|
||||||
|
@ -284,7 +313,7 @@ impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P:
|
||||||
SeraiDkgRemoval::get(self.txn, self.spec.set(), removing).is_none()
|
SeraiDkgRemoval::get(self.txn, self.spec.set(), removing).is_none()
|
||||||
{
|
{
|
||||||
// Since it wasn't completed, attempt a new DkgRemoval
|
// Since it wasn't completed, attempt a new DkgRemoval
|
||||||
self.dkg_removal_attempt(removing, attempt).await;
|
self.attempt_dkg_removal(removing, attempt).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Topic::SubstrateSign(inner_id) => {
|
Topic::SubstrateSign(inner_id) => {
|
||||||
|
|
|
@ -217,19 +217,20 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||||
// index the validators in the order they've been defined.
|
// index the validators in the order they've been defined.
|
||||||
fn threshold_i_map_to_keys_and_musig_i_map(
|
fn threshold_i_map_to_keys_and_musig_i_map(
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
|
removed: &[<Ristretto as Ciphersuite>::G],
|
||||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
mut map: HashMap<Participant, Vec<u8>>,
|
mut map: HashMap<Participant, Vec<u8>>,
|
||||||
sort_by_keys: bool,
|
sort_by_keys: bool,
|
||||||
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
||||||
// Insert our own index so calculations aren't offset
|
// Insert our own index so calculations aren't offset
|
||||||
let our_threshold_i =
|
let our_threshold_i =
|
||||||
spec.i(<Ristretto as Ciphersuite>::generator() * our_key.deref()).unwrap().start;
|
spec.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref()).unwrap().start;
|
||||||
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
||||||
|
|
||||||
let spec_validators = spec.validators();
|
let spec_validators = spec.validators();
|
||||||
let key_from_threshold_i = |threshold_i| {
|
let key_from_threshold_i = |threshold_i| {
|
||||||
for (key, _) in &spec_validators {
|
for (key, _) in &spec_validators {
|
||||||
if threshold_i == spec.i(*key).unwrap().start {
|
if threshold_i == spec.i(removed, *key).unwrap().start {
|
||||||
return *key;
|
return *key;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -262,13 +263,25 @@ fn threshold_i_map_to_keys_and_musig_i_map(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
||||||
pub(crate) key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
pub(crate) spec: &'a TributarySpec,
|
spec: &'a TributarySpec,
|
||||||
pub(crate) txn: &'a mut T,
|
removed: Vec<<Ristretto as Ciphersuite>::G>,
|
||||||
pub(crate) attempt: u32,
|
txn: &'a mut T,
|
||||||
|
attempt: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: DbTxn> DkgConfirmer<'_, T> {
|
impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||||
|
pub(crate) fn new<'a>(
|
||||||
|
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
spec: &'a TributarySpec,
|
||||||
|
txn: &'a mut T,
|
||||||
|
attempt: u32,
|
||||||
|
) -> Option<DkgConfirmer<'a, T>> {
|
||||||
|
// This relies on how confirmations are inlined into the DKG protocol and they accordingly
|
||||||
|
// share attempts
|
||||||
|
let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?;
|
||||||
|
Some(DkgConfirmer { key, spec, removed, txn, attempt })
|
||||||
|
}
|
||||||
fn signing_protocol(&mut self) -> SigningProtocol<'_, T, (&'static [u8; 12], u32)> {
|
fn signing_protocol(&mut self) -> SigningProtocol<'_, T, (&'static [u8; 12], u32)> {
|
||||||
let context = (b"DkgConfirmer", self.attempt);
|
let context = (b"DkgConfirmer", self.attempt);
|
||||||
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
||||||
|
@ -289,8 +302,14 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||||
key_pair: &KeyPair,
|
key_pair: &KeyPair,
|
||||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||||
let preprocesses =
|
let preprocesses = threshold_i_map_to_keys_and_musig_i_map(
|
||||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses, false).1;
|
self.spec,
|
||||||
|
&self.removed,
|
||||||
|
self.key,
|
||||||
|
preprocesses,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
.1;
|
||||||
let msg = set_keys_message(&self.spec.set(), key_pair);
|
let msg = set_keys_message(&self.spec.set(), key_pair);
|
||||||
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||||
}
|
}
|
||||||
|
@ -309,7 +328,8 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||||
key_pair: &KeyPair,
|
key_pair: &KeyPair,
|
||||||
shares: HashMap<Participant, Vec<u8>>,
|
shares: HashMap<Participant, Vec<u8>>,
|
||||||
) -> Result<[u8; 64], Participant> {
|
) -> Result<[u8; 64], Participant> {
|
||||||
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares, false).1;
|
let shares =
|
||||||
|
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares, false).1;
|
||||||
|
|
||||||
let machine = self
|
let machine = self
|
||||||
.share_internal(preprocesses, key_pair)
|
.share_internal(preprocesses, key_pair)
|
||||||
|
@ -323,6 +343,7 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||||
pub(crate) struct DkgRemoval<'a, T: DbTxn> {
|
pub(crate) struct DkgRemoval<'a, T: DbTxn> {
|
||||||
pub(crate) key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
pub(crate) key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
pub(crate) spec: &'a TributarySpec,
|
pub(crate) spec: &'a TributarySpec,
|
||||||
|
pub(crate) removed: &'a [<Ristretto as Ciphersuite>::G],
|
||||||
pub(crate) txn: &'a mut T,
|
pub(crate) txn: &'a mut T,
|
||||||
pub(crate) removing: [u8; 32],
|
pub(crate) removing: [u8; 32],
|
||||||
pub(crate) attempt: u32,
|
pub(crate) attempt: u32,
|
||||||
|
@ -362,8 +383,13 @@ impl<T: DbTxn> DkgRemoval<'_, T> {
|
||||||
&mut self,
|
&mut self,
|
||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
let (participants, preprocesses) =
|
let (participants, preprocesses) = threshold_i_map_to_keys_and_musig_i_map(
|
||||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses, true);
|
self.spec,
|
||||||
|
self.removed,
|
||||||
|
self.key,
|
||||||
|
preprocesses,
|
||||||
|
true,
|
||||||
|
);
|
||||||
let msg = remove_participant_message(&self.spec.set(), Public(self.removing));
|
let msg = remove_participant_message(&self.spec.set(), Public(self.removing));
|
||||||
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||||
}
|
}
|
||||||
|
@ -381,7 +407,7 @@ impl<T: DbTxn> DkgRemoval<'_, T> {
|
||||||
shares: HashMap<Participant, Vec<u8>>,
|
shares: HashMap<Participant, Vec<u8>>,
|
||||||
) -> Result<(Vec<SeraiAddress>, [u8; 64]), Participant> {
|
) -> Result<(Vec<SeraiAddress>, [u8; 64]), Participant> {
|
||||||
let (participants, shares) =
|
let (participants, shares) =
|
||||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares, true);
|
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.removed, self.key, shares, true);
|
||||||
let signers = participants.iter().map(|key| SeraiAddress(key.to_bytes())).collect::<Vec<_>>();
|
let signers = participants.iter().map(|key| SeraiAddress(key.to_bytes())).collect::<Vec<_>>();
|
||||||
|
|
||||||
let machine = self
|
let machine = self
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use core::{ops::Range, fmt::Debug};
|
use core::{ops::Range, fmt::Debug};
|
||||||
use std::io;
|
use std::{io, collections::HashMap};
|
||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
|
||||||
|
@ -88,26 +88,53 @@ impl TributarySpec {
|
||||||
self.start_time
|
self.start_time
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn n(&self) -> u16 {
|
pub fn n(&self, removed_validators: &[<Ristretto as Ciphersuite>::G]) -> u16 {
|
||||||
self.validators.iter().map(|(_, weight)| weight).sum()
|
self
|
||||||
|
.validators
|
||||||
|
.iter()
|
||||||
|
.map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight })
|
||||||
|
.sum()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn t(&self) -> u16 {
|
pub fn t(&self) -> u16 {
|
||||||
((2 * self.n()) / 3) + 1
|
// t doesn't change with regards to the amount of removed validators
|
||||||
|
((2 * self.n(&[])) / 3) + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
|
pub fn i(
|
||||||
|
&self,
|
||||||
|
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
||||||
|
key: <Ristretto as Ciphersuite>::G,
|
||||||
|
) -> Option<Range<Participant>> {
|
||||||
|
let mut all_is = HashMap::new();
|
||||||
let mut i = 1;
|
let mut i = 1;
|
||||||
for (validator, weight) in &self.validators {
|
for (validator, weight) in &self.validators {
|
||||||
if validator == &key {
|
all_is.insert(
|
||||||
return Some(Range {
|
*validator,
|
||||||
start: Participant::new(i).unwrap(),
|
Range { start: Participant::new(i).unwrap(), end: Participant::new(i + weight).unwrap() },
|
||||||
end: Participant::new(i + weight).unwrap(),
|
);
|
||||||
});
|
|
||||||
}
|
|
||||||
i += weight;
|
i += weight;
|
||||||
}
|
}
|
||||||
None
|
|
||||||
|
let original_i = all_is.get(&key)?.clone();
|
||||||
|
let mut result_i = original_i.clone();
|
||||||
|
for removed_validator in removed_validators {
|
||||||
|
let removed_i = all_is
|
||||||
|
.get(removed_validator)
|
||||||
|
.expect("removed validator wasn't present in set to begin with");
|
||||||
|
// If the queried key was removed, return None
|
||||||
|
if &original_i == removed_i {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the removed was before the queried, shift the queried down accordingly
|
||||||
|
if removed_i.start < original_i.start {
|
||||||
|
let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start);
|
||||||
|
result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap();
|
||||||
|
result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(result_i)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
||||||
|
|
|
@ -130,7 +130,10 @@ impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq)]
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
pub enum Transaction {
|
pub enum Transaction {
|
||||||
RemoveParticipant(Participant),
|
RemoveParticipantDueToDkg {
|
||||||
|
attempt: u32,
|
||||||
|
participant: Participant,
|
||||||
|
},
|
||||||
|
|
||||||
DkgCommitments {
|
DkgCommitments {
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
|
@ -194,9 +197,10 @@ pub enum Transaction {
|
||||||
impl Debug for Transaction {
|
impl Debug for Transaction {
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipant(participant) => fmt
|
Transaction::RemoveParticipantDueToDkg { attempt, participant } => fmt
|
||||||
.debug_struct("Transaction::RemoveParticipant")
|
.debug_struct("Transaction::RemoveParticipantDueToDkg")
|
||||||
.field("participant", participant)
|
.field("participant", participant)
|
||||||
|
.field("attempt", attempt)
|
||||||
.finish(),
|
.finish(),
|
||||||
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
|
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
|
||||||
.debug_struct("Transaction::DkgCommitments")
|
.debug_struct("Transaction::DkgCommitments")
|
||||||
|
@ -255,12 +259,19 @@ impl ReadWrite for Transaction {
|
||||||
reader.read_exact(&mut kind)?;
|
reader.read_exact(&mut kind)?;
|
||||||
|
|
||||||
match kind[0] {
|
match kind[0] {
|
||||||
0 => Ok(Transaction::RemoveParticipant({
|
0 => Ok(Transaction::RemoveParticipantDueToDkg {
|
||||||
let mut participant = [0; 2];
|
attempt: {
|
||||||
reader.read_exact(&mut participant)?;
|
let mut attempt = [0; 4];
|
||||||
Participant::new(u16::from_le_bytes(participant))
|
reader.read_exact(&mut attempt)?;
|
||||||
.ok_or_else(|| io::Error::other("invalid participant in RemoveParticipant"))?
|
u32::from_le_bytes(attempt)
|
||||||
})),
|
},
|
||||||
|
participant: {
|
||||||
|
let mut participant = [0; 2];
|
||||||
|
reader.read_exact(&mut participant)?;
|
||||||
|
Participant::new(u16::from_le_bytes(participant))
|
||||||
|
.ok_or_else(|| io::Error::other("invalid participant in RemoveParticipantDueToDkg"))?
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
|
||||||
1 => {
|
1 => {
|
||||||
let mut attempt = [0; 4];
|
let mut attempt = [0; 4];
|
||||||
|
@ -424,9 +435,10 @@ impl ReadWrite for Transaction {
|
||||||
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipant(i) => {
|
Transaction::RemoveParticipantDueToDkg { attempt, participant } => {
|
||||||
writer.write_all(&[0])?;
|
writer.write_all(&[0])?;
|
||||||
writer.write_all(&u16::from(*i).to_le_bytes())
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
|
writer.write_all(&u16::from(*participant).to_le_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||||
|
@ -545,7 +557,7 @@ impl ReadWrite for Transaction {
|
||||||
impl TransactionTrait for Transaction {
|
impl TransactionTrait for Transaction {
|
||||||
fn kind(&self) -> TransactionKind<'_> {
|
fn kind(&self) -> TransactionKind<'_> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipant(_) => TransactionKind::Provided("remove"),
|
Transaction::RemoveParticipantDueToDkg { .. } => TransactionKind::Provided("remove"),
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments: _, signed } => {
|
Transaction::DkgCommitments { attempt, commitments: _, signed } => {
|
||||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||||
|
@ -623,7 +635,9 @@ impl Transaction {
|
||||||
) {
|
) {
|
||||||
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
||||||
let nonce = match tx {
|
let nonce = match tx {
|
||||||
Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"),
|
Transaction::RemoveParticipantDueToDkg { .. } => {
|
||||||
|
panic!("signing RemoveParticipantDueToDkg")
|
||||||
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { .. } => 0,
|
Transaction::DkgCommitments { .. } => 0,
|
||||||
Transaction::DkgShares { .. } => 1,
|
Transaction::DkgShares { .. } => 1,
|
||||||
|
@ -645,7 +659,7 @@ impl Transaction {
|
||||||
(
|
(
|
||||||
nonce,
|
nonce,
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"),
|
Transaction::RemoveParticipantDueToDkg { .. } => panic!("signing RemoveParticipant"),
|
||||||
|
|
||||||
Transaction::DkgCommitments { ref mut signed, .. } => signed,
|
Transaction::DkgCommitments { ref mut signed, .. } => signed,
|
||||||
Transaction::DkgShares { ref mut signed, .. } => signed,
|
Transaction::DkgShares { ref mut signed, .. } => signed,
|
||||||
|
|
|
@ -249,8 +249,6 @@ impl<T: TransactionTrait> Block<T> {
|
||||||
}
|
}
|
||||||
last_tx_order = current_tx_order;
|
last_tx_order = current_tx_order;
|
||||||
|
|
||||||
// TODO: should we modify the verify_transaction to take `Transaction<T>` or
|
|
||||||
// use this pattern of verifying tendermint Txs and app txs differently?
|
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::Tendermint(tx) => {
|
Transaction::Tendermint(tx) => {
|
||||||
match verify_tendermint_tx::<N>(tx, schema.clone(), &commit) {
|
match verify_tendermint_tx::<N>(tx, schema.clone(), &commit) {
|
||||||
|
|
|
@ -272,6 +272,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||||
provided_in_chain,
|
provided_in_chain,
|
||||||
allow_non_local_provided,
|
allow_non_local_provided,
|
||||||
);
|
);
|
||||||
|
// Drop this TXN's changes as we're solely verifying the block
|
||||||
drop(txn);
|
drop(txn);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
|
@ -179,7 +179,6 @@ pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
|
||||||
pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}
|
pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}
|
||||||
impl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}
|
impl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}
|
||||||
|
|
||||||
// This will only cause mutations when the transaction is valid
|
|
||||||
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
||||||
tx: &T,
|
tx: &T,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
|
@ -204,7 +203,7 @@ pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
||||||
Err(TransactionError::InvalidSigner)?;
|
Err(TransactionError::InvalidSigner)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Use Schnorr half-aggregation and a batch verification here
|
// TODO: Use a batch verification here
|
||||||
if !signature.verify(*signer, tx.sig_hash(genesis)) {
|
if !signature.verify(*signer, tx.sig_hash(genesis)) {
|
||||||
Err(TransactionError::InvalidSignature)?;
|
Err(TransactionError::InvalidSignature)?;
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ pub struct KeyConfirmed<C: Ciphersuite> {
|
||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
KeyGenDb {
|
KeyGenDb {
|
||||||
ParamsDb: (session: &Session) -> (ThresholdParams, u16),
|
ParamsDb: (session: &Session, attempt: u32) -> (ThresholdParams, u16),
|
||||||
// Not scoped to the set since that'd have latter attempts overwrite former
|
// Not scoped to the set since that'd have latter attempts overwrite former
|
||||||
// A former attempt may become the finalized attempt, even if it doesn't in a timely manner
|
// A former attempt may become the finalized attempt, even if it doesn't in a timely manner
|
||||||
// Overwriting its commitments would be accordingly poor
|
// Overwriting its commitments would be accordingly poor
|
||||||
|
@ -152,7 +152,10 @@ impl<N: Network, D: Db> KeyGen<N, D> {
|
||||||
|
|
||||||
pub fn in_set(&self, session: &Session) -> bool {
|
pub fn in_set(&self, session: &Session) -> bool {
|
||||||
// We determine if we're in set using if we have the parameters for a session's key generation
|
// We determine if we're in set using if we have the parameters for a session's key generation
|
||||||
ParamsDb::get(&self.db, session).is_some()
|
// The usage of 0 for the attempt is valid so long as we aren't malicious and accordingly
|
||||||
|
// aren't fatally slashed
|
||||||
|
// TODO: Revisit once we do DKG removals for being offline
|
||||||
|
ParamsDb::get(&self.db, session, 0).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
|
@ -319,7 +322,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
|
||||||
self.active_share.remove(&id.session).is_none()
|
self.active_share.remove(&id.session).is_none()
|
||||||
{
|
{
|
||||||
// If we haven't handled this session before, save the params
|
// If we haven't handled this session before, save the params
|
||||||
ParamsDb::set(txn, &id.session, &(params, shares));
|
ParamsDb::set(txn, &id.session, id.attempt, &(params, shares));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (machines, commitments) = key_gen_machines(id, params, shares);
|
let (machines, commitments) = key_gen_machines(id, params, shares);
|
||||||
|
@ -338,7 +341,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
|
||||||
panic!("commitments when already handled commitments");
|
panic!("commitments when already handled commitments");
|
||||||
}
|
}
|
||||||
|
|
||||||
let (params, share_quantity) = ParamsDb::get(txn, &id.session).unwrap();
|
let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap();
|
||||||
|
|
||||||
// Unwrap the machines, rebuilding them if we didn't have them in our cache
|
// Unwrap the machines, rebuilding them if we didn't have them in our cache
|
||||||
// We won't if the processor rebooted
|
// We won't if the processor rebooted
|
||||||
|
@ -373,7 +376,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
|
||||||
CoordinatorMessage::Shares { id, shares } => {
|
CoordinatorMessage::Shares { id, shares } => {
|
||||||
info!("Received shares for {:?}", id);
|
info!("Received shares for {:?}", id);
|
||||||
|
|
||||||
let (params, share_quantity) = ParamsDb::get(txn, &id.session).unwrap();
|
let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap();
|
||||||
|
|
||||||
// Same commentary on inconsistency as above exists
|
// Same commentary on inconsistency as above exists
|
||||||
let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| {
|
let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| {
|
||||||
|
@ -514,7 +517,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
|
||||||
}
|
}
|
||||||
|
|
||||||
CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {
|
CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {
|
||||||
let params = ParamsDb::get(txn, &id.session).unwrap().0;
|
let params = ParamsDb::get(txn, &id.session, id.attempt).unwrap().0;
|
||||||
|
|
||||||
let mut share_ref = share.as_slice();
|
let mut share_ref = share.as_slice();
|
||||||
let Ok(substrate_share) = EncryptedMessage::<
|
let Ok(substrate_share) = EncryptedMessage::<
|
||||||
|
|
Loading…
Reference in a new issue